39 s =
format (s,
"GTPU decap from gtpu_tunnel%d teid %d next %d error %d",
44 s =
format (s,
"GTPU decap error - tunnel for teid %d does not exist",
62 u32 n_left_from, next_index, * from, * to_next;
66 u32 last_tunnel_index = ~0;
67 gtpu4_tunnel_key_t last_key4;
68 gtpu6_tunnel_key_t last_key6;
69 u32 pkts_decapsulated = 0;
71 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
74 last_key4.as_u64 = ~0;
83 stats_n_packets = stats_n_bytes = 0;
85 while (n_left_from > 0)
90 to_next, n_left_to_next);
91 while (n_left_from >= 4 && n_left_to_next >= 2)
99 u32 gtpu_hdr_len0, gtpu_hdr_len1;
101 u32 tunnel_index0, tunnel_index1;
103 gtpu4_tunnel_key_t key4_0, key4_1;
104 gtpu6_tunnel_key_t key6_0, key6_1;
106 u32 sw_if_index0, sw_if_index1, len0, len1;
107 u8 has_space0, has_space1;
166 gtpu_hdr_len1 =
sizeof(
gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
173 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
174 next0 = GTPU_INPUT_NEXT_DROP;
181 key4_0.teid = gtpu0->
teid;
190 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
191 next0 = GTPU_INPUT_NEXT_DROP;
194 last_key4.as_u64 = key4_0.as_u64;
195 tunnel_index0 = last_tunnel_index = p0[0];
198 tunnel_index0 = last_tunnel_index;
204 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
205 next0 = GTPU_INPUT_NEXT_DROP;
215 key4_0.teid = gtpu0->
teid;
224 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
225 next0 = GTPU_INPUT_NEXT_DROP;
229 key6_0.src.as_u64[0] = ip6_0->
src_address.as_u64[0];
230 key6_0.src.as_u64[1] = ip6_0->
src_address.as_u64[1];
231 key6_0.teid = gtpu0->
teid;
235 if (
PREDICT_FALSE (memcmp(&key6_0, &last_key6,
sizeof(last_key6)) != 0))
240 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
241 next0 = GTPU_INPUT_NEXT_DROP;
245 tunnel_index0 = last_tunnel_index = p0[0];
248 tunnel_index0 = last_tunnel_index;
254 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
255 next0 = GTPU_INPUT_NEXT_DROP;
265 key6_0.src.as_u64[0] = ip6_0->
dst_address.as_u64[0];
266 key6_0.src.as_u64[1] = ip6_0->
dst_address.as_u64[1];
267 key6_0.teid = gtpu0->
teid;
275 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
276 next0 = GTPU_INPUT_NEXT_DROP;
294 sw_if_index0 = (mt0) ? mt0->
sw_if_index : sw_if_index0;
296 pkts_decapsulated ++;
297 stats_n_packets += 1;
298 stats_n_bytes += len0;
304 stats_n_packets -= 1;
305 stats_n_bytes -= len0;
309 thread_index, stats_sw_if_index,
310 stats_n_packets, stats_n_bytes);
312 stats_n_bytes = len0;
313 stats_sw_if_index = sw_if_index0;
326 tr->
teid = has_space0 ? clib_net_to_host_u32(gtpu0->
teid) : ~0;
331 error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
332 next1 = GTPU_INPUT_NEXT_DROP;
339 key4_1.teid = gtpu1->
teid;
348 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
349 next1 = GTPU_INPUT_NEXT_DROP;
352 last_key4.as_u64 = key4_1.as_u64;
353 tunnel_index1 = last_tunnel_index = p1[0];
356 tunnel_index1 = last_tunnel_index;
362 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
363 next1 = GTPU_INPUT_NEXT_DROP;
373 key4_1.teid = gtpu1->
teid;
382 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
383 next1 = GTPU_INPUT_NEXT_DROP;
387 key6_1.src.as_u64[0] = ip6_1->
src_address.as_u64[0];
388 key6_1.src.as_u64[1] = ip6_1->
src_address.as_u64[1];
389 key6_1.teid = gtpu1->
teid;
393 if (
PREDICT_FALSE (memcmp(&key6_1, &last_key6,
sizeof(last_key6)) != 0))
399 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
400 next1 = GTPU_INPUT_NEXT_DROP;
405 tunnel_index1 = last_tunnel_index = p1[0];
408 tunnel_index1 = last_tunnel_index;
414 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
415 next1 = GTPU_INPUT_NEXT_DROP;
425 key6_1.src.as_u64[0] = ip6_1->
dst_address.as_u64[0];
426 key6_1.src.as_u64[1] = ip6_1->
dst_address.as_u64[1];
427 key6_1.teid = gtpu1->
teid;
435 error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
436 next1 = GTPU_INPUT_NEXT_DROP;
454 sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
456 pkts_decapsulated ++;
457 stats_n_packets += 1;
458 stats_n_bytes += len1;
464 stats_n_packets -= 1;
465 stats_n_bytes -= len1;
469 thread_index, stats_sw_if_index,
470 stats_n_packets, stats_n_bytes);
472 stats_n_bytes = len1;
473 stats_sw_if_index = sw_if_index1;
486 tr->
teid = has_space1 ? clib_net_to_host_u32(gtpu1->
teid) : ~0;
490 to_next, n_left_to_next,
491 bi0, bi1, next0, next1);
494 while (n_left_from > 0 && n_left_to_next > 0)
506 gtpu4_tunnel_key_t key4_0;
507 gtpu6_tunnel_key_t key6_0;
509 u32 sw_if_index0, len0;
547 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
548 next0 = GTPU_INPUT_NEXT_DROP;
554 key4_0.teid = gtpu0->
teid;
563 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
564 next0 = GTPU_INPUT_NEXT_DROP;
567 last_key4.as_u64 = key4_0.as_u64;
568 tunnel_index0 = last_tunnel_index = p0[0];
571 tunnel_index0 = last_tunnel_index;
577 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
578 next0 = GTPU_INPUT_NEXT_DROP;
588 key4_0.teid = gtpu0->
teid;
597 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
598 next0 = GTPU_INPUT_NEXT_DROP;
602 key6_0.src.as_u64[0] = ip6_0->
src_address.as_u64[0];
603 key6_0.src.as_u64[1] = ip6_0->
src_address.as_u64[1];
604 key6_0.teid = gtpu0->
teid;
608 if (
PREDICT_FALSE (memcmp(&key6_0, &last_key6,
sizeof(last_key6)) != 0))
613 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
614 next0 = GTPU_INPUT_NEXT_DROP;
618 tunnel_index0 = last_tunnel_index = p0[0];
621 tunnel_index0 = last_tunnel_index;
627 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
628 next0 = GTPU_INPUT_NEXT_DROP;
638 key6_0.src.as_u64[0] = ip6_0->
dst_address.as_u64[0];
639 key6_0.src.as_u64[1] = ip6_0->
dst_address.as_u64[1];
640 key6_0.teid = gtpu0->
teid;
648 error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
649 next0 = GTPU_INPUT_NEXT_DROP;
667 sw_if_index0 = (mt0) ? mt0->
sw_if_index : sw_if_index0;
669 pkts_decapsulated ++;
670 stats_n_packets += 1;
671 stats_n_bytes += len0;
677 stats_n_packets -= 1;
678 stats_n_bytes -= len0;
682 thread_index, stats_sw_if_index,
683 stats_n_packets, stats_n_bytes);
685 stats_n_bytes = len0;
686 stats_sw_if_index = sw_if_index0;
699 tr->
teid = has_space0 ? clib_net_to_host_u32(gtpu0->
teid) : ~0;
702 to_next, n_left_to_next,
711 GTPU_ERROR_DECAPSULATED,
719 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
741 #define gtpu_error(n,s) s, 748 .name =
"gtpu4-input",
750 .vector_size =
sizeof (
u32),
757 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n, 768 .name =
"gtpu6-input",
770 .vector_size =
sizeof (
u32),
777 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n, 800 u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
802 vtep4_key_t last_vtep4;
804 vtep6_key_t last_vtep6;
807 #ifdef CLIB_HAVE_VEC512 825 while (n_left_from > 0)
829 while (n_left_from >= 4 && n_left_to_next >= 2)
835 u32 bi0, ip_len0, udp_len0, flags0, next0;
836 u32 bi1, ip_len1, udp_len1, flags1, next1;
837 i32 len_diff0, len_diff1;
838 u8 error0, good_udp0, proto0;
839 u8 error1, good_udp1, proto1;
850 bi0 = to_next[0] = from[0];
851 bi1 = to_next[1] = from[1];
889 if (proto0 != IP_PROTOCOL_UDP)
897 if (udp0->
dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
903 #ifdef CLIB_HAVE_VEC512 905 (>m->
vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
918 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
925 ip_len0 = clib_net_to_host_u16 (ip40->
length);
928 udp_len0 = clib_net_to_host_u16 (udp0->
length);
929 len_diff0 = ip_len0 - udp_len0;
934 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
941 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
947 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
948 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
952 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
953 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
958 b0->
error = error0 ? error_node->
errors[error0] : 0;
968 if (proto1 != IP_PROTOCOL_UDP)
976 if (udp1->
dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
982 #ifdef CLIB_HAVE_VEC512 984 (>m->
vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
997 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1004 ip_len1 = clib_net_to_host_u16 (ip41->
length);
1007 udp_len1 = clib_net_to_host_u16 (udp1->
length);
1008 len_diff1 = ip_len1 - udp_len1;
1013 if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1020 (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1026 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1027 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1031 error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1032 error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1037 b1->
error = error1 ? error_node->
errors[error1] : 0;
1047 to_next, n_left_to_next,
1048 bi0, bi1, next0, next1);
1051 while (n_left_from > 0 && n_left_to_next > 0)
1057 u32 bi0, ip_len0, udp_len0, flags0, next0;
1059 u8 error0, good_udp0, proto0;
1061 bi0 = to_next[0] = from[0];
1065 n_left_to_next -= 1;
1084 if (proto0 != IP_PROTOCOL_UDP)
1092 if (udp0->
dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1098 #ifdef CLIB_HAVE_VEC512 1100 (>m->
vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
1113 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1120 ip_len0 = clib_net_to_host_u16 (ip40->
length);
1123 udp_len0 = clib_net_to_host_u16 (udp0->
length);
1124 len_diff0 = ip_len0 - udp_len0;
1129 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1136 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1142 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1143 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1147 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1148 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1153 b0->
error = error0 ? error_node->
errors[error0] : 0;
1163 to_next, n_left_to_next,
1181 .name =
"ip4-gtpu-bypass",
1182 .vector_size =
sizeof (
u32),
1194 #ifndef CLIB_MARCH_VARIANT 1210 .name =
"ip6-gtpu-bypass",
1211 .vector_size =
sizeof (
u32),
1223 #ifndef CLIB_MARCH_VARIANT 1230 #define foreach_gtpu_flow_error \ 1231 _(NONE, "no error") \ 1232 _(PAYLOAD_ERROR, "Payload type errors") \ 1233 _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \ 1234 _(IP_HEADER_ERROR, "Rx ip header errors") \ 1235 _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \ 1236 _(UDP_LENGTH_ERROR, "Rx udp length errors") 1240 #define _(f,s) GTPU_FLOW_ERROR_##f, 1243 #define gtpu_error(n,s) GTPU_FLOW_ERROR_##n, 1253 #define gtpu_error(n,s) s, 1260 #define gtpu_local_need_csum_check(_b) \ 1261 (!(_b->flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED \ 1262 || _b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)) 1264 #define gtpu_local_csum_is_valid(_b) \ 1265 ((_b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT \ 1266 || _b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) != 0) 1275 if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1282 return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1290 u16 ip_len = clib_net_to_host_u16 (ip4_hdr->
length);
1301 u16 ip_len = clib_net_to_host_u16 (ip4_hdr->
length);
1302 u16 udp_len = clib_net_to_host_u16 (udp_hdr->
length);
1303 return udp_len > ip_len;
1309 u8 error0 = GTPU_FLOW_ERROR_NONE;
1311 error0 = GTPU_FLOW_ERROR_IP_HEADER_ERROR;
1313 error0 = GTPU_FLOW_ERROR_UDP_LENGTH_ERROR;
1315 error0 = GTPU_FLOW_ERROR_UDP_CHECKSUM_ERROR;
1325 u32 n_left_from, next_index, * from, * to_next;
1329 u32 pkts_decapsulated = 0;
1331 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
1332 u8 ip_err0, ip_err1, udp_err0, udp_err1, csum_err0, csum_err1;
1339 stats_n_packets = stats_n_bytes = 0;
1341 while (n_left_from > 0)
1346 to_next, n_left_to_next);
1348 while (n_left_from >= 4 && n_left_to_next >= 2)
1354 u32 gtpu_hdr_len0, gtpu_hdr_len1;
1355 u32 tunnel_index0, tunnel_index1;
1358 u32 sw_if_index0, sw_if_index1, len0, len1;
1359 u8 has_space0 = 0, has_space1 = 0;
1382 n_left_to_next -= 2;
1415 if (ip_err0 || udp_err0 || csum_err0)
1417 next0 = GTPU_INPUT_NEXT_DROP;
1435 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1436 next0 = GTPU_INPUT_NEXT_DROP;
1453 error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1454 next0 = GTPU_INPUT_NEXT_DROP;
1464 pkts_decapsulated ++;
1465 stats_n_packets += 1;
1466 stats_n_bytes += len0;
1472 stats_n_packets -= 1;
1473 stats_n_bytes -= len0;
1474 if (stats_n_packets)
1477 thread_index, stats_sw_if_index,
1478 stats_n_packets, stats_n_bytes);
1479 stats_n_packets = 1;
1480 stats_n_bytes = len0;
1481 stats_sw_if_index = sw_if_index0;
1494 tr->
teid = has_space0 ? clib_net_to_host_u32(gtpu0->
teid) : ~0;
1497 if (ip_err1 || udp_err1 || csum_err1)
1499 next1 = GTPU_INPUT_NEXT_DROP;
1512 gtpu_hdr_len1 =
sizeof(
gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
1516 error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1517 next1 = GTPU_INPUT_NEXT_DROP;
1534 error1 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1535 next1 = GTPU_INPUT_NEXT_DROP;
1551 pkts_decapsulated ++;
1552 stats_n_packets += 1;
1553 stats_n_bytes += len1;
1559 stats_n_packets -= 1;
1560 stats_n_bytes -= len1;
1561 if (stats_n_packets)
1564 thread_index, stats_sw_if_index,
1565 stats_n_packets, stats_n_bytes);
1566 stats_n_packets = 1;
1567 stats_n_bytes = len1;
1568 stats_sw_if_index = sw_if_index1;
1581 tr->
teid = has_space1 ? clib_net_to_host_u32(gtpu1->
teid) : ~0;
1585 to_next, n_left_to_next,
1586 bi0, bi1, next0, next1);
1589 while (n_left_from > 0 && n_left_to_next > 0)
1599 u32 sw_if_index0, len0;
1608 n_left_to_next -= 1;
1623 if (ip_err0 || udp_err0 || csum_err0)
1625 next0 = GTPU_INPUT_NEXT_DROP;
1646 error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1647 next0 = GTPU_INPUT_NEXT_DROP;
1663 error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1664 next0 = GTPU_INPUT_NEXT_DROP;
1674 pkts_decapsulated ++;
1675 stats_n_packets += 1;
1676 stats_n_bytes += len0;
1682 stats_n_packets -= 1;
1683 stats_n_bytes -= len0;
1684 if (stats_n_packets)
1687 thread_index, stats_sw_if_index,
1688 stats_n_packets, stats_n_bytes);
1689 stats_n_packets = 1;
1690 stats_n_bytes = len0;
1691 stats_sw_if_index = sw_if_index0;
1703 tr->
teid = has_space0 ? clib_net_to_host_u32(gtpu0->
teid) : ~0;
1706 to_next, n_left_to_next,
1715 GTPU_ERROR_DECAPSULATED,
1719 if (stats_n_packets)
1723 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1739 #ifndef CLIB_MULTIARCH_VARIANT 1741 .name =
"gtpu4-flow-input",
1743 .vector_size =
sizeof (
u32),
1752 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n, u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
vl_api_wireguard_peer_flags_t flags
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
u8 runtime_data[0]
Function dependent node-runtime data.
static u8 * format_gtpu_rx_trace(u8 *s, va_list *args)
vnet_interface_main_t interface_main
#define clib_memcpy_fast(a, b, c)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static u32 validate_gtpu_fib(vlib_buffer_t *b, gtpu_tunnel_t *t, u32 is_ip4)
vlib_node_registration_t ip6_gtpu_bypass_node
(constructor) VLIB_REGISTER_NODE (ip6_gtpu_bypass_node)
static_always_inline u8 gtpu_check_ip(vlib_buffer_t *b, u16 payload_len)
clib_error_t * ip6_gtpu_bypass_init(vlib_main_t *vm)
#define VLIB_NODE_FN(node)
static uword ip4_address_is_multicast(const ip4_address_t *a)
vlib_error_t * errors
Vector of errors for this node.
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
static u32 vlib_buffer_get_ip_fib_index(vlib_buffer_t *b, u8 is_ip4)
static int ip4_is_fragment(const ip4_header_t *i)
#define static_always_inline
#define VLIB_INIT_FUNCTION(x)
vlib_node_registration_t ip4_gtpu_bypass_node
(constructor) VLIB_REGISTER_NODE (ip4_gtpu_bypass_node)
vlib_combined_counter_main_t * combined_sw_if_counters
description fragment has unexpected format
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
static void * ip4_next_header(ip4_header_t *i)
vlib_node_registration_t gtpu4_flow_input_node
(constructor) VLIB_REGISTER_NODE (gtpu4_flow_input_node)
static void vtep4_key_init(vtep4_key_t *k4)
static char * gtpu_flow_error_strings[]
vlib_error_t error
Error code for buffers to be enqueued to error handler.
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
vlib_node_registration_t ip4_input_node
Global ip4 input node.
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
static char * gtpu_error_strings[]
static uword ip_gtpu_bypass_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 is_ip4)
static_always_inline u8 gtpu_validate_udp_csum(vlib_main_t *vm, vlib_buffer_t *b)
u32 ip4_tcp_udp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
vlib_node_registration_t gtpu4_input_node
(constructor) VLIB_REGISTER_NODE (gtpu4_input_node)
static u8 vtep6_check(vtep_table_t *t, vlib_buffer_t *b0, ip6_header_t *ip60, vtep6_key_t *last_k6)
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
static u8 vtep4_check(vtep_table_t *t, vlib_buffer_t *b0, ip4_header_t *ip40, vtep4_key_t *last_k4)
clib_error_t * ip4_gtpu_bypass_init(vlib_main_t *vm)
#define VLIB_REGISTER_NODE(x,...)
static_always_inline uword vlib_get_thread_index(void)
#define CLIB_PREFETCH(addr, size, type)
u32 flow_id
Generic flow identifier.
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
static uword gtpu_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
static void * ip6_next_header(ip6_header_t *i)
vlib_main_t vlib_node_runtime_t * node
#define foreach_gtpu_input_next
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
static void vtep6_key_init(vtep6_key_t *k6)
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
#define gtpu_local_need_csum_check(_b)
static uword ip6_address_is_equal(const ip6_address_t *a, const ip6_address_t *b)
static uword ip6_address_is_multicast(const ip6_address_t *a)
uword * gtpu4_tunnel_by_key
static_always_inline u8 gtpu_err_code(u8 ip_err0, u8 udp_err0, u8 csum_err0)
struct _vlib_node_registration vlib_node_registration_t
template key/value backing page structure
static u8 vlib_buffer_has_space(vlib_buffer_t *b, word l)
Check if there is enough space in buffer to advance.
u32 ip6_tcp_udp_icmp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
uword * gtpu6_tunnel_by_key
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
u8 * format_ip4_forward_next_trace(u8 *s, va_list *args)
static uword gtpu_flow_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
#define hash_get_mem(h, key)
u16 flags
Copy of main node flags.
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static_always_inline u8 gtpu_check_ip_udp_len(vlib_buffer_t *b)
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
#define VLIB_NODE_FLAG_TRACE
#define CLIB_CACHE_LINE_BYTES
vlib_node_registration_t gtpu6_input_node
(constructor) VLIB_REGISTER_NODE (gtpu6_input_node)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
#define foreach_gtpu_flow_error
#define gtpu_local_csum_is_valid(_b)
static u8 vtep4_check_vector(vtep_table_t *t, vlib_buffer_t *b0, ip4_header_t *ip40, vtep4_key_t *last_k4, vtep4_cache_t *vtep4_u512)
u8 * format_ip6_forward_next_trace(u8 *s, va_list *args)