|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
22 typedef enum _tcp_output_next
31 #define foreach_tcp4_output_next \
32 _ (DROP, "error-drop") \
33 _ (IP_LOOKUP, "ip4-lookup") \
34 _ (IP_REWRITE, "ip4-rewrite") \
37 #define foreach_tcp6_output_next \
38 _ (DROP, "error-drop") \
39 _ (IP_LOOKUP, "ip6-lookup") \
40 _ (IP_REWRITE, "ip6-rewrite") \
41 _ (IP_ARP, "ip6-discover-neighbor")
44 #define tcp_error(n,s) s,
71 #ifndef CLIB_MARCH_VARIANT
107 if (tc->state != TCP_STATE_SYN_RCVD ||
tcp_opts_wscale (&tc->rcv_opts))
118 u32 available_space, wnd;
130 observed_wnd = (
i32) tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las);
137 TCP_EVT (TCP_EVT_RCV_WND_SHRUNK, tc, observed_wnd, available_space);
158 if (
state < TCP_STATE_ESTABLISHED)
162 return tc->rcv_wnd >> tc->rcv_wscale;
170 opts->
flags |= TCP_OPTS_FLAG_MSS;
174 opts->
flags |= TCP_OPTS_FLAG_WSCALE;
175 opts->
wscale = tc->rcv_wscale;
178 opts->
flags |= TCP_OPTS_FLAG_TSTAMP;
185 opts->
flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
199 opts->
flags |= TCP_OPTS_FLAG_MSS;
205 opts->
flags |= TCP_OPTS_FLAG_WSCALE;
206 opts->
wscale = tc->rcv_wscale;
212 opts->
flags |= TCP_OPTS_FLAG_TSTAMP;
214 opts->
tsecr = tc->tsval_recent;
220 opts->
flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
238 opts->
flags |= TCP_OPTS_FLAG_TSTAMP;
240 opts->
tsecr = tc->tsval_recent;
247 opts->
flags |= TCP_OPTS_FLAG_SACK;
248 if (tc->snd_sack_pos >=
vec_len (tc->snd_sacks))
249 tc->snd_sack_pos = 0;
250 opts->
sacks = &tc->snd_sacks[tc->snd_sack_pos];
270 case TCP_STATE_ESTABLISHED:
271 case TCP_STATE_CLOSE_WAIT:
272 case TCP_STATE_FIN_WAIT_1:
273 case TCP_STATE_LAST_ACK:
274 case TCP_STATE_CLOSING:
275 case TCP_STATE_FIN_WAIT_2:
276 case TCP_STATE_TIME_WAIT:
277 case TCP_STATE_CLOSED:
279 case TCP_STATE_SYN_RCVD:
281 case TCP_STATE_SYN_SENT:
307 TCP_STATE_ESTABLISHED);
310 tc->snd_mss =
clib_min (tc->mss, tc->rcv_opts.mss) - tc->snd_opts_len;
318 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
321 if (tc->snd_una == tc->snd_nxt)
327 if (tc->flags & TCP_CONN_PSH_PENDING)
331 tc->psh_seq = tc->snd_una + max_deq - 1;
340 if (
b->
flags & VLIB_BUFFER_NEXT_PRESENT)
343 b->
flags &= VLIB_BUFFER_NEXT_PRESENT - 1;
353 #ifndef CLIB_MARCH_VARIANT
358 b->
flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
370 ip46_address_t *
src, ip46_address_t *
dst)
373 u16 payload_length_host_byte_order;
378 clib_host_to_net_u16 (IP_PROTOCOL_TCP);
390 payload_length_host_byte_order, NULL, 0,
396 ip46_address_t *
src, ip46_address_t *
dst)
399 u32 payload_length_host_byte_order;
403 clib_host_to_net_u32 (payload_length_host_byte_order +
404 (IP_PROTOCOL_TCP << 16));
410 payload_length_host_byte_order, NULL, 0,
418 if (
PREDICT_FALSE (tc->cfg_flags & TCP_CFG_F_NO_CSUM_OFFLOAD))
425 (
vm,
b, &tc->c_lcl_ip, &tc->c_rmt_ip);
428 (
vm,
b, &tc->c_lcl_ip, &tc->c_rmt_ip);
445 u8 tcp_opts_len, tcp_hdr_opts_len;
453 tcp_hdr_opts_len = tcp_opts_len +
sizeof (
tcp_header_t);
456 tc->rcv_nxt, tcp_hdr_opts_len,
flags, wnd);
480 TCP_EVT (TCP_EVT_ACK_SENT, tc);
481 tc->rcv_las = tc->rcv_nxt;
499 u8 tcp_hdr_opts_len, tcp_opts_len;
509 tcp_hdr_opts_len = tcp_opts_len +
sizeof (
tcp_header_t);
526 u8 tcp_opts_len, tcp_hdr_opts_len;
533 tcp_hdr_opts_len = tcp_opts_len +
sizeof (
tcp_header_t);
536 tc->rcv_nxt, tcp_hdr_opts_len,
546 u8 is_ip4,
u32 fib_index)
551 b->
flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
558 tm->ipl_next_node[!is_ip4]);
568 b->
flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
572 wrk->tco_next_node[!is_ip4]);
584 ip6_address_t src_ip6, dst_ip6;
620 seq = th->ack_number;
626 tmp = clib_net_to_host_u32 (th->seq_number);
628 ack = clib_host_to_net_u32 (
tmp +
len);
653 #ifndef CLIB_MARCH_VARIANT
703 seq = pkt_th->ack_number;
704 ack = (tc->state >= TCP_STATE_SYN_RCVD) ? tc->rcv_nxt : 0;
710 ack = clib_host_to_net_u32 (
vnet_buffer (pkt)->tcp.seq_end);
714 seq, ack, tcp_hdr_len,
flags, 0);
733 tc->ipv6_flow_label);
739 TCP_EVT (TCP_EVT_RST_SENT, tc);
741 TCP_ERROR_RST_SENT, 1);
755 u16 tcp_hdr_opts_len, advertise_wnd, opts_write_len;
767 tcp_hdr_opts_len = tc->snd_opts_len +
sizeof (
tcp_header_t);
768 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
771 tc->rcv_nxt, tcp_hdr_opts_len,
flags,
775 ASSERT (opts_write_len == tc->snd_opts_len);
778 TCP_EVT (TCP_EVT_RST_SENT, tc);
780 TCP_ERROR_RST_SENT, 1);
795 IP_PROTOCOL_TCP, tc->ipv6_flow_label);
834 tc->rtt_seq = tc->snd_nxt;
839 TCP_EVT (TCP_EVT_SYN_SENT, tc);
850 ASSERT (tc->snd_una != tc->snd_nxt);
865 TCP_EVT (TCP_EVT_SYNACK_SENT, tc);
880 fin_snt = tc->
flags & TCP_CONN_FINSNT;
892 tc->flags |= TCP_CONN_FINSNT;
898 if ((tc->flags & TCP_CONN_SNDACK) && !tc->pending_dupacks)
899 tc->flags &= ~TCP_CONN_SNDACK;
905 TCP_EVT (TCP_EVT_FIN_SENT, tc);
911 tc->flags |= TCP_CONN_FINSNT;
912 tc->flags &= ~TCP_CONN_FINPNDG;
922 u8 compute_opts,
u8 maybe_burst,
u8 update_snd_nxt)
939 tcp_hdr_opts_len = tc->snd_opts_len +
sizeof (
tcp_header_t);
942 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
948 if (
seq_geq (tc->psh_seq, snd_nxt)
950 flags |= TCP_FLAG_PSH;
953 tc->rcv_nxt, tcp_hdr_opts_len,
flags,
959 tm->wrk_ctx[tc->c_thread_index].cached_opts,
974 tc->rcv_las = tc->rcv_nxt;
977 tc->data_segs_out += 1;
998 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1009 tc->rtt_seq = tc->snd_nxt;
1043 if (!(tc->flags & TCP_CONN_SNDACK))
1046 tc->flags |= TCP_CONN_SNDACK;
1053 if (!(tc->flags & TCP_CONN_SNDACK))
1056 tc->flags |= TCP_CONN_SNDACK;
1058 if (tc->pending_dupacks < 255)
1059 tc->pending_dupacks += 1;
1065 if (!(tc->flags & TCP_CONN_RXT_PENDING))
1068 tc->flags |= TCP_CONN_RXT_PENDING;
1085 if (tc->rcv_wnd >=
tcp_cfg.rwnd_min_update_ack * tc->snd_mss)
1146 u32 chain_bi = ~0, n_bufs_per_seg, n_bufs;
1147 u16 n_peeked, len_to_deq;
1152 n_bufs_per_seg = ceil ((
double) seg_size / bytes_per_buffer);
1170 b[0]->
flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1175 for (
i = 1;
i < n_bufs_per_seg;
i++)
1178 len_to_deq =
clib_min (max_deq_bytes, bytes_per_buffer);
1179 chain_bi =
wrk->tx_buffers[--n_bufs];
1186 ASSERT (n_peeked == len_to_deq);
1193 prev_b->
flags |= VLIB_BUFFER_NEXT_PRESENT;
1195 max_deq_bytes -= n_peeked;
1210 ASSERT (((*b)->current_data + (*b)->current_length) <= bytes_per_buffer);
1226 u32 start, available_bytes;
1229 ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
1230 ASSERT (max_deq_bytes != 0);
1237 available_bytes -=
offset;
1238 if (!available_bytes)
1241 max_deq_bytes =
clib_min (tc->snd_mss, max_deq_bytes);
1242 max_deq_bytes =
clib_min (available_bytes, max_deq_bytes);
1244 start = tc->snd_una +
offset;
1253 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1257 tc->segs_retrans += 1;
1271 if (!sb->is_reneging && (!hole || hole->start == tc->snd_una))
1283 TCP_EVT (TCP_EVT_CC_EVT, tc, 6);
1285 tc->prev_ssthresh = tc->ssthresh;
1286 tc->prev_cwnd = tc->cwnd;
1297 tc->cwnd_acc_bytes = 0;
1298 tc->tr_occurences += 1;
1318 if (tc->state == TCP_STATE_CLOSED)
1321 if (tc->state >= TCP_STATE_ESTABLISHED)
1323 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1326 if (tc->flags & TCP_CONN_FINSNT)
1335 if (tc->snd_una == tc->snd_nxt)
1369 tc->snd_congestion = tc->snd_nxt;
1388 if (tc->rto_boff == 1)
1401 else if (tc->state == TCP_STATE_SYN_RCVD)
1403 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1428 ASSERT (tc->snd_una != tc->snd_nxt);
1434 TCP_EVT (TCP_EVT_SYN_RXT, tc, 1);
1441 ASSERT (tc->state == TCP_STATE_CLOSED);
1464 if (tc->flags & TCP_CONN_HALF_OPEN_DONE)
1467 TCP_DBG (
"could not remove half-open connection");
1471 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1499 TCP_EVT (TCP_EVT_SYN_RXT, tc, 0);
1517 u32 bi, max_snd_bytes, available_bytes,
offset;
1525 if (tc->state == TCP_STATE_CLOSED || tc->snd_wnd > tc->snd_mss
1526 || (tc->flags & TCP_CONN_FINSNT))
1527 goto update_scheduler;
1530 offset = tc->snd_nxt - tc->snd_una;
1534 if (!available_bytes)
1540 if (available_bytes <=
offset)
1541 goto update_scheduler;
1562 max_snd_bytes =
clib_min (tc->snd_mss,
1568 || tc->snd_una == tc->snd_nxt
1569 || tc->rto_boff > 1));
1571 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1603 TCP_EVT (TCP_EVT_CC_EVT, tc, 1);
1619 u32 offset, n_segs = 0, n_written, bi, available_wnd;
1623 offset = tc->snd_nxt - tc->snd_una;
1624 available_wnd = tc->snd_wnd -
offset;
1625 burst_size =
clib_min (burst_size, available_wnd / tc->snd_mss);
1627 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1630 while (n_segs < burst_size)
1641 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1644 tc->snd_nxt += n_written;
1661 prr_out = tc->snd_rxt_bytes + (tc->snd_nxt - tc->snd_congestion);
1663 if (pipe > tc->ssthresh)
1665 space = ((int) tc->prr_delivered * ((
f64) tc->ssthresh / tc->prev_cwnd))
1671 limit =
clib_max ((
int) (tc->prr_delivered - prr_out), 0) + tc->snd_mss;
1682 u32 tx_adv_sack = sb->high_sacked - tc->snd_congestion;
1683 f64 rr = (
f64) tc->ssthresh / tc->prev_cwnd;
1688 return (tx_adv_sack > (tc->snd_una - tc->prr_start) * rr);
1695 - (tc->snd_nxt - tc->snd_una));
1698 #define scoreboard_rescue_rxt_valid(_sb, _tc) \
1699 (seq_geq (_sb->rescue_rxt, _tc->snd_una) \
1700 && seq_leq (_sb->rescue_rxt, _tc->snd_congestion))
1709 u32 n_written = 0,
offset, max_bytes, n_segs = 0;
1710 u8 snd_limited = 0, can_rescue = 0;
1711 u32 bi, max_deq, burst_bytes;
1721 burst_size =
clib_min (burst_size, burst_bytes / tc->snd_mss);
1733 if (snd_space < tc->snd_mss)
1740 &&
seq_gt (sb->high_sacked, tc->snd_congestion)
1741 && tc->rxt_head != tc->snd_una
1744 max_bytes =
clib_min (tc->snd_mss, tc->snd_congestion - tc->snd_una);
1755 tc->rxt_head = tc->snd_una;
1756 tc->rxt_delivered += n_written;
1757 tc->prr_delivered += n_written;
1758 ASSERT (tc->rxt_delivered <= tc->snd_rxt_bytes);
1763 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
1767 max_deq -= tc->snd_nxt - tc->snd_una;
1769 while (snd_space > 0 && n_segs < burst_size)
1776 if (max_deq > tc->snd_mss)
1783 av_wnd = (int) tc->snd_wnd - (tc->snd_nxt - tc->snd_una);
1784 av_wnd =
clib_max (av_wnd - tc->snd_mss, 0);
1785 snd_space =
clib_min (snd_space, av_wnd);
1786 snd_space =
clib_min (max_deq, snd_space);
1787 burst_size =
clib_min (burst_size - n_segs,
1788 snd_space / tc->snd_mss);
1791 if (max_deq > n_segs_new * tc->snd_mss)
1794 n_segs += n_segs_new;
1808 max_bytes =
clib_min (tc->snd_mss, hole->end - hole->start);
1809 max_bytes =
clib_min (max_bytes, snd_space);
1810 offset = hole->end - tc->snd_una - max_bytes;
1816 sb->rescue_rxt = tc->snd_congestion;
1823 max_bytes =
clib_min (hole->end - sb->high_rxt, snd_space);
1824 max_bytes = snd_limited ?
clib_min (max_bytes, tc->snd_mss) : max_bytes;
1828 offset = sb->high_rxt - tc->snd_una;
1831 ASSERT (n_written <= snd_space);
1840 sb->high_rxt += n_written;
1843 snd_space -= n_written;
1863 u32 n_written = 0,
offset = 0, bi, max_deq, n_segs_now, max_bytes;
1864 u32 burst_bytes, sent_bytes;
1866 int snd_space, n_segs = 0;
1871 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
1874 burst_size =
clib_min (burst_size, burst_bytes / tc->snd_mss);
1882 cc_limited = snd_space < burst_bytes;
1889 while (snd_space > 0 && n_segs < burst_size)
1892 tc->snd_congestion - tc->snd_una -
offset);
1904 snd_space -= n_written;
1909 if (n_segs == burst_size)
1915 if (snd_space < tc->snd_mss || tc->snd_mss == 0)
1919 max_deq -= tc->snd_nxt - tc->snd_una;
1922 snd_space =
clib_min (max_deq, snd_space);
1923 burst_size =
clib_min (burst_size - n_segs, snd_space / tc->snd_mss);
1925 if (n_segs_now && max_deq > n_segs_now * tc->snd_mss)
1927 n_segs += n_segs_now;
1933 sent_bytes =
clib_min (n_segs * tc->snd_mss, burst_bytes);
1934 sent_bytes = cc_limited ? burst_bytes : sent_bytes;
1945 if (!tc->pending_dupacks)
1948 || tc->state != TCP_STATE_ESTABLISHED)
1961 tc->dupacks_out += 1;
1962 tc->pending_dupacks = 0;
1967 tc->snd_sack_pos = 0;
1973 n_acks =
clib_min (n_acks, tc->pending_dupacks);
1975 for (j = 0; j <
clib_min (n_acks, max_burst_size); j++)
1978 if (n_acks < max_burst_size)
1980 tc->pending_dupacks = 0;
1981 tc->snd_sack_pos = 0;
1982 tc->dupacks_out += n_acks;
1987 TCP_DBG (
"constrained by burst size");
1988 tc->pending_dupacks = n_acks - max_burst_size;
1989 tc->dupacks_out += max_burst_size;
1991 return max_burst_size;
2022 tc->flags &= ~TCP_CONN_RXT_PENDING;
2026 if (!(tc->flags & TCP_CONN_SNDACK))
2029 tc->flags &= ~TCP_CONN_SNDACK;
2032 if (n_segs && !tc->pending_dupacks)
2049 u16 * next0,
u32 * error0)
2062 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2074 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2081 u32 * to_next,
u32 n_bufs)
2089 for (
i = 0;
i < n_bufs;
i++)
2092 if (!(
b->
flags & VLIB_BUFFER_IS_TRACED))
2116 IP_PROTOCOL_TCP, tc0->ipv6_flow_label);
2134 ASSERT ((
b->
flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID) != 0);
2135 ASSERT ((
b->
flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID) != 0);
2136 b->
flags |= VNET_BUFFER_F_GSO;
2149 if (tc0->next_node_index)
2151 *next0 = tc0->next_node_index;
2152 vnet_buffer (b0)->tcp.next_node_opaque = tc0->next_node_opaque;
2236 b[0]->
error =
node->errors[TCP_ERROR_INVALID_CONNECTION];
2247 b[1]->
error =
node->errors[TCP_ERROR_INVALID_CONNECTION];
2277 b[0]->
error =
node->errors[TCP_ERROR_INVALID_CONNECTION];
2288 TCP_ERROR_PKTS_SENT,
frame->n_vectors);
2289 return frame->n_vectors;
2307 .name =
"tcp4-output",
2309 .vector_size =
sizeof (
u32),
2315 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2327 .name =
"tcp6-output",
2329 .vector_size =
sizeof (
u32),
2335 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2344 typedef enum _tcp_reset_next
2351 #define foreach_tcp4_reset_next \
2352 _(DROP, "error-drop") \
2353 _(IP_LOOKUP, "ip4-lookup")
2355 #define foreach_tcp6_reset_next \
2356 _(DROP, "error-drop") \
2357 _(IP_LOOKUP, "ip6-lookup")
2389 n_left_to_next -= 1;
2398 b0->
flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
2412 n_left_to_next, bi0, next0);
2433 .name =
"tcp4-reset",
2434 .vector_size =
sizeof (
u32),
2439 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2449 .name =
"tcp6-reset",
2450 .vector_size =
sizeof (
u32),
2455 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
u32 next_buffer
Next buffer for this linked-list of buffers.
vlib_node_registration_t tcp4_reset_node
(constructor) VLIB_REGISTER_NODE (tcp4_reset_node)
static uword ip6_address_is_link_local_unicast(const ip6_address_t *a)
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
u32 tsval
Timestamp value.
static u32 vlib_num_workers()
void tcp_connection_cleanup(tcp_connection_t *tc)
Cleans up connection state.
vlib_node_registration_t tcp6_output_node
(constructor) VLIB_REGISTER_NODE (tcp6_output_node)
#define seq_geq(_s1, _s2)
@ IP_LOOKUP_NEXT_ARP
This packet matches an "incomplete adjacency" and packets need to be passed to ARP to find rewrite st...
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
static void tcp_check_sack_reneging(tcp_connection_t *tc)
#define tcp_opts_tstamp(_to)
vl_api_ip_port_and_mask_t dst_port
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
u8 wscale
Window scale advertised.
#define TCP_USE_SACKS
Disable only for testing.
static void tcp_enqueue_to_output(tcp_worker_ctx_t *wrk, vlib_buffer_t *b, u32 bi, u8 is_ip4)
void tcp_send_reset(tcp_connection_t *tc)
Build and set reset packet for connection.
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
enum _tcp_output_next tcp_output_next_t
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
format_function_t format_tcp_state
nat44_ei_hairpin_src_next_t next_index
void tcp_make_syn(tcp_connection_t *tc, vlib_buffer_t *b)
Convert buffer to SYN.
#define TCP_OPTS_MAX_SACK_BLOCKS
static void tcp_cc_init_rxt_timeout(tcp_connection_t *tc)
Reset congestion control, switch cwnd to loss window and try again.
#define TCP_OPTION_LEN_WINDOW_SCALE
static u8 * format_tcp_tx_trace(u8 *s, va_list *args)
void session_transport_closing_notify(transport_connection_t *tc)
Notification from transport that connection is being closed.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static void tcp_retransmit_timer_set(tcp_timer_wheel_t *tw, tcp_connection_t *tc)
#define ADJ_INDEX_INVALID
Invalid ADJ index - used when no adj is known likewise blazoned capitals INVALID speak volumes where ...
static u32 tcp_buffer_len(vlib_buffer_t *b)
static uword tcp46_send_reset_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u8 is_ip4)
struct _tcp_header tcp_header_t
static void * vlib_buffer_push_ip6(vlib_main_t *vm, vlib_buffer_t *b, ip6_address_t *src, ip6_address_t *dst, int proto)
Push IPv6 header to buffer.
vlib_get_buffers(vm, from, b, n_left_from)
static sack_scoreboard_hole_t * scoreboard_get_hole(sack_scoreboard_t *sb, u32 index)
vlib_main_t vlib_node_runtime_t * node
static_always_inline void vnet_buffer_offload_flags_set(vlib_buffer_t *b, vnet_buffer_oflags_t oflags)
struct _tcp_connection tcp_connection_t
void tcp_timer_retransmit_syn_handler(tcp_connection_t *tc)
SYN retransmit timer handler.
#define TCP_DBG(_fmt, _args...)
#define tcp_zero_rwnd_sent_on(tc)
static void * vlib_buffer_push_ip4(vlib_main_t *vm, vlib_buffer_t *b, ip4_address_t *src, ip4_address_t *dst, int proto, u8 csum_offload)
Push IPv4 header to buffer.
int tcp_session_custom_tx(void *conn, transport_send_params_t *sp)
static tcp_main_t * vnet_get_tcp_main()
static void session_add_pending_tx_buffer(u32 thread_index, u32 bi, u32 next_node)
Add session node pending buffer with custom node.
#define tcp_csum_offload(tc)
static void tcp_make_synack(tcp_connection_t *tc, vlib_buffer_t *b)
Convert buffer to SYN-ACK.
static tcp_worker_ctx_t * tcp_get_worker(u32 thread_index)
#define TCP_DUPACK_THRESHOLD
sack_block_t * sacks
SACK blocks.
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
static void tcp_timer_update(tcp_timer_wheel_t *tw, tcp_connection_t *tc, u8 timer_id, u32 interval)
void transport_connection_reschedule(transport_connection_t *tc)
static u32 tcp_time_tstamp(u32 thread_index)
Time used to generate timestamps, not the timestamp.
vl_api_dhcp_client_state_t state
void session_transport_closed_notify(transport_connection_t *tc)
Notification from transport that it is closed.
void session_queue_run_on_main_thread(vlib_main_t *vm)
static void tcp_cc_congestion(tcp_connection_t *tc)
vlib_main_t vlib_node_runtime_t vlib_frame_t * from_frame
vlib_buffer_enqueue_to_next(vm, node, from,(u16 *) nexts, frame->n_vectors)
int tcp_fastrecovery_prr_snd_space(tcp_connection_t *tc)
Estimate send space using proportional rate reduction (RFC6937)
u32 tsecr
Echoed/reflected time stamp.
u16 mss
Maximum segment size advertised.
u8 n_sack_blocks
Number of SACKs blocks.
static void * tcp_reuse_buffer(vlib_main_t *vm, vlib_buffer_t *b)
struct _transport_connection transport_connection_t
void transport_connection_tx_pacer_update_bytes(transport_connection_t *tc, u32 bytes)
#define TCP_TO_TIMER_TICK
Factor for converting ticks to timer ticks.
static void * ip6_next_header(ip6_header_t *i)
static u16 ip_calculate_l4_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip_csum_t sum0, u32 payload_length, u8 *iph, u32 ip_header_size, u8 *l4h)
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
enum _tcp_reset_next tcp_reset_next_t
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
static void tcp46_output_trace_frame(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *to_next, u32 n_bufs)
static char * tcp_error_strings[]
u32 transport_connection_tx_pacer_burst(transport_connection_t *tc)
Get tx pacer max burst.
int tcp_retransmit_first_unacked(tcp_worker_ctx_t *wrk, tcp_connection_t *tc)
Retransmit first unacked segment.
#define foreach_tcp6_output_next
struct _tcp_main tcp_main_t
static void tcp_make_ack(tcp_connection_t *tc, vlib_buffer_t *b)
Convert buffer to ACK.
u8 flags
Option flags, see above.
static int tcp_make_synack_options(tcp_connection_t *tc, tcp_options_t *opts)
static u8 tcp_max_tx_deq(tcp_connection_t *tc)
static void tcp_make_fin(tcp_connection_t *tc, vlib_buffer_t *b)
Convert buffer to FIN-ACK.
enum _tcp_state tcp_state_t
static void tcp_update_rto(tcp_connection_t *tc)
#define CLIB_PREFETCH(addr, size, type)
void tcp_timer_retransmit_handler(tcp_connection_t *tc)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
u32 tcp_session_push_header(transport_connection_t *tconn, vlib_buffer_t *b)
vlib_error_t * errors
Vector of errors for this node.
void tcp_update_burst_snd_vars(tcp_connection_t *tc)
Update burst send vars.
#define seq_leq(_s1, _s2)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
static void * tcp_init_buffer(vlib_main_t *vm, vlib_buffer_t *b)
void tcp_send_syn(tcp_connection_t *tc)
Send SYN.
void transport_connection_tx_pacer_reset_bucket(transport_connection_t *tc, u32 bucket)
Reset tx pacer bucket.
#define foreach_tcp6_reset_next
void tcp_program_cleanup(tcp_worker_ctx_t *wrk, tcp_connection_t *tc)
#define VLIB_NODE_FN(node)
static void tcp_cc_event(tcp_connection_t *tc, tcp_cc_event_t evt)
#define tcp_opts_wscale(_to)
ip_lookup_next_t lookup_next_index
Next hop after ip4-lookup.
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
#define tcp_validate_txf_size(_tc, _a)
static void tcp_make_ack_i(tcp_connection_t *tc, vlib_buffer_t *b, tcp_state_t state, u8 flags)
Prepare ACK.
void tcp_timer_persist_handler(tcp_connection_t *tc)
Got 0 snd_wnd from peer, try to do something about it.
static void tcp_output_handle_packet(tcp_connection_t *tc0, vlib_buffer_t *b0, vlib_node_runtime_t *error_node, u16 *next0, u8 is_ip4)
#define tcp_in_recovery(tc)
#define VLIB_NODE_FLAG_TRACE
static u8 tcp_timer_is_active(tcp_connection_t *tc, tcp_timers_e timer)
struct clib_bihash_value offset
template key/value backing page structure
void tcp_send_ack(tcp_connection_t *tc)
#define tcp_recovery_on(tc)
struct _sack_scoreboard sack_scoreboard_t
static uword tcp46_output_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_ip4)
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
u16 ip4_tcp_udp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip4_header_t *ip0)
format_function_t format_tcp_connection_id
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
vlib_node_registration_t tcp4_output_node
(constructor) VLIB_REGISTER_NODE (tcp4_output_node)
static int tcp_make_syn_options(tcp_connection_t *tc, tcp_options_t *opts)
#define tcp_node_index(node_id, is_ip4)
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
#define scoreboard_rescue_rxt_valid(_sb, _tc)
@ IP_LOOKUP_NEXT_REWRITE
This packet is to be rewritten and forwarded to the next processing node.
#define clib_mem_unaligned(pointer, type)
static u32 transport_max_tx_dequeue(transport_connection_t *tc)
static u16 tcp_compute_checksum(tcp_connection_t *tc, vlib_buffer_t *b)
static void tcp_connection_set_state(tcp_connection_t *tc, tcp_state_t state)
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
static sack_scoreboard_hole_t * scoreboard_last_hole(sack_scoreboard_t *sb)
void tcp_connection_timers_reset(tcp_connection_t *tc)
Stop all connection timers.
vlib_node_registration_t tcp6_reset_node
(constructor) VLIB_REGISTER_NODE (tcp6_reset_node)
void tcp_bt_track_tx(tcp_connection_t *tc, u32 len)
Track a tcp tx burst.
vl_api_ip_port_and_mask_t src_port
sll srl srl sll sra u16x4 i
static void tcp_retransmit_timer_update(tcp_timer_wheel_t *tw, tcp_connection_t *tc)
#define foreach_tcp4_reset_next
static int tcp_make_options(tcp_connection_t *tc, tcp_options_t *opts, tcp_state_t state)
static void * vlib_buffer_push_tcp_net_order(vlib_buffer_t *b, u16 sp, u16 dp, u32 seq, u32 ack, u8 tcp_hdr_opts_len, u8 flags, u16 wnd)
Push TCP header to buffer.
#define tcp_opts_sack_permitted(_to)
static u32 tcp_options_write(u8 *data, tcp_options_t *opts)
Write TCP options to segment.
enum fib_protocol_t_ fib_protocol_t
Protocol Type.
#define TCP_EVT(_evt, _args...)
void tcp_send_reset_w_pkt(tcp_connection_t *tc, vlib_buffer_t *pkt, u32 thread_index, u8 is_ip4)
Send reset without reusing existing buffer.
#define CLIB_CACHE_LINE_BYTES
static u32 tcp_tstamp(tcp_connection_t *tc)
Generate timestamp for tcp connection.
struct _vlib_node_registration vlib_node_registration_t
static void tcp_cc_loss(tcp_connection_t *tc)
u16 current_length
Nbytes between current data and the end of this buffer.
#define TRANSPORT_MAX_HDRS_LEN
#define TCP_MAX_WND_SCALE
#define TCP_RTO_SYN_RETRIES
#define TCP_RXT_MAX_BURST
static f64 tcp_time_now_us(u32 thread_index)
static tcp_header_t * tcp_buffer_hdr(vlib_buffer_t *b)
static void transport_rx_fifo_req_deq_ntf(transport_connection_t *tc)
void tcp_send_window_update_ack(tcp_connection_t *tc)
Send window update ack.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
template key/value backing page structure
description No buffer space
static int tcp_transmit_unsent(tcp_worker_ctx_t *wrk, tcp_connection_t *tc, u32 burst_size)
struct _sack_scoreboard_hole sack_scoreboard_hole_t
static void tcp_update_rcv_wnd(tcp_connection_t *tc)
sack_scoreboard_hole_t * scoreboard_next_rxt_hole(sack_scoreboard_t *sb, sack_scoreboard_hole_t *start, u8 have_unsent, u8 *can_rescue, u8 *snd_limited)
Figure out the next hole to retransmit.
void tcp_bt_track_rxt(tcp_connection_t *tc, u32 start, u32 end)
Track a tcp retransmission.
#define TCP_OPTION_LEN_TIMESTAMP
void tcp_program_ack(tcp_connection_t *tc)
int session_tx_fifo_peek_bytes(transport_connection_t *tc, u8 *buffer, u32 offset, u32 max_bytes)
@ VLIB_NODE_PROTO_HINT_TCP
void tcp_connection_tx_pacer_reset(tcp_connection_t *tc, u32 window, u32 start_bucket)
description fragment has unexpected format
void tcp_program_dupack(tcp_connection_t *tc)
#define TCP_OPTION_LEN_SACK_PERMITTED
#define tcp_in_fastrecovery(tc)
vlib_put_next_frame(vm, node, next_index, 0)
static u8 tcp_window_compute_scale(u32 window)
static ip_csum_t ip_csum_with_carry(ip_csum_t sum, ip_csum_t x)
u32 fib_table_get_index_for_sw_if_index(fib_protocol_t proto, u32 sw_if_index)
Get the index of the FIB bound to the interface.
static void * vlib_buffer_make_headroom(vlib_buffer_t *b, u8 size)
Make head room, typically for packet headers.
static int tcp_make_established_options(tcp_connection_t *tc, tcp_options_t *opts)
static int tcp_send_acks(tcp_connection_t *tc, u32 max_burst_size)
#define foreach_tcp4_output_next
#define tcp_zero_rwnd_sent(tc)
void scoreboard_init_rxt(sack_scoreboard_t *sb, u32 snd_una)
void tcp_send_fin(tcp_connection_t *tc)
Send FIN.
static tcp_connection_t * tcp_connection_get(u32 conn_index, u32 thread_index)
static void tcp_push_hdr_i(tcp_connection_t *tc, vlib_buffer_t *b, u32 snd_nxt, u8 compute_opts, u8 maybe_burst, u8 update_snd_nxt)
Push TCP header and update connection variables.
vlib_main_t * vm
Convenience pointer to this worker's vlib_main.
void scoreboard_clear_reneging(sack_scoreboard_t *sb, u32 start, u32 end)
#define tcp_zero_rwnd_sent_off(tc)
#define tcp_in_cong_recovery(tc)
static int tcp_retransmit_no_sack(tcp_worker_ctx_t *wrk, tcp_connection_t *tc, u32 burst_size)
Fast retransmit without SACK info.
#define tcp_fastrecovery_first(tc)
static uword pool_elts(void *v)
Number of active elements in a pool.
static u8 tcp_retransmit_should_retry_head(tcp_connection_t *tc, sack_scoreboard_t *sb)
void tcp_send_synack(tcp_connection_t *tc)
u16 ip6_tcp_compute_checksum_custom(vlib_main_t *vm, vlib_buffer_t *p0, ip46_address_t *src, ip46_address_t *dst)
static u32 tcp_window_to_advertise(tcp_connection_t *tc, tcp_state_t state)
Compute and return window to advertise, scaled as per RFC1323.
#define TRANSPORT_PACER_MIN_BURST
static uword round_down_pow2(uword x, uword pow2)
void tcp_program_retransmit(tcp_connection_t *tc)
@ TCP_RESET_NEXT_IP_LOOKUP
static void tcp_update_time_now(tcp_worker_ctx_t *wrk)
static int tcp_retransmit_sack(tcp_worker_ctx_t *wrk, tcp_connection_t *tc, u32 burst_size)
Do retransmit with SACKs.
u32 adj_index_t
An index for adjacencies.
static void tcp_push_ip_hdr(tcp_worker_ctx_t *wrk, tcp_connection_t *tc, vlib_buffer_t *b)
adj_index_t adj_nbr_find(fib_protocol_t nh_proto, vnet_link_t link_type, const ip46_address_t *nh_addr, u32 sw_if_index)
Lookup neighbor adjancency.
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static u32 tcp_prepare_retransmit_segment(tcp_worker_ctx_t *wrk, tcp_connection_t *tc, u32 offset, u32 max_deq_bytes, vlib_buffer_t **b)
Build a retransmit segment.
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
void tcp_bt_check_app_limited(tcp_connection_t *tc)
Check if sample to be generated is app limited.
static u32 tcp_flight_size(const tcp_connection_t *tc)
Our estimate of the number of bytes in flight (pipe size)
static u32 transport_max_rx_enqueue(transport_connection_t *tc)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
static sack_scoreboard_hole_t * scoreboard_first_hole(sack_scoreboard_t *sb)
#define clib_warning(format, args...)
#define tcp_fastrecovery_first_off(tc)
@ TCP_OUTPUT_NEXT_IP_LOOKUP
u16 nexts[VLIB_FRAME_SIZE]
static int tcp_prepare_segment(tcp_worker_ctx_t *wrk, tcp_connection_t *tc, u32 offset, u32 max_deq_bytes, vlib_buffer_t **b)
Allocate a new buffer and build a new tcp segment.
tcp_connection_t tcp_connection
static void tcp_output_push_ip(vlib_main_t *vm, vlib_buffer_t *b0, tcp_connection_t *tc0, u8 is_ip4)
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
u16 ip4_tcp_compute_checksum_custom(vlib_main_t *vm, vlib_buffer_t *p0, ip46_address_t *src, ip46_address_t *dst)
#define TCP_ESTABLISH_TIME
static int tcp_make_reset_in_place(vlib_main_t *vm, vlib_buffer_t *b, u8 is_ip4)
#define TCP_OPTION_LEN_MSS
static int tcp_do_retransmit(tcp_connection_t *tc, u32 max_burst_size)
static void tcp_persist_timer_set(tcp_timer_wheel_t *tw, tcp_connection_t *tc)
static void * vlib_buffer_push_tcp(vlib_buffer_t *b, u16 sp_net, u16 dp_net, u32 seq, u32 ack, u8 tcp_hdr_opts_len, u8 flags, u16 wnd)
Push TCP header to buffer.
static void tcp_check_if_gso(tcp_connection_t *tc, vlib_buffer_t *b)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
vl_api_interface_index_t sw_if_index
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
#define tcp_worker_stats_inc(_wrk, _stat, _val)
#define TCP_OPTION_LEN_SACK_BLOCK
static ip_adjacency_t * adj_get(adj_index_t adj_index)
Get a pointer to an adjacency object from its index.
u32 tcp_initial_window_to_advertise(tcp_connection_t *tc)
Compute initial window and scale factor.
static void tcp_output_handle_link_local(tcp_connection_t *tc0, vlib_buffer_t *b0, u16 *next0, u32 *error0)
static void tcp_enqueue_to_ip_lookup(tcp_worker_ctx_t *wrk, vlib_buffer_t *b, u32 bi, u8 is_ip4, u32 fib_index)
@ TCP_OUTPUT_NEXT_IP_REWRITE
static void * vlib_buffer_push_ip6_custom(vlib_main_t *vm, vlib_buffer_t *b, ip6_address_t *src, ip6_address_t *dst, int proto, u32 flow_label)
Push IPv6 header to buffer.
static u8 tcp_is_descheduled(tcp_connection_t *tc)
static void * ip4_next_header(ip4_header_t *i)
static u32 tcp_initial_wnd_unscaled(tcp_connection_t *tc)
TCP's initial window.
void session_add_self_custom_tx_evt(transport_connection_t *tc, u8 has_prio)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
int session_stream_connect_notify(transport_connection_t *tc, session_error_t err)
VLIB buffer representation.
#define VLIB_REGISTER_NODE(x,...)
int tcp_half_open_connection_cleanup(tcp_connection_t *tc)
Try to cleanup half-open connection.
static u32 tcp_available_cc_snd_space(const tcp_connection_t *tc)
Estimate of how many bytes we can still push into the network.
vl_api_wireguard_peer_flags_t flags