FD.io VPP  v21.10.1-2-g0a485f517
Vector Packet Processing
gtpu_decap.c
Go to the documentation of this file.
1 /*
2  * decap.c: gtpu tunnel decap packet processing
3  *
4  * Copyright (c) 2017 Intel and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vlib/vlib.h>
19 #include <gtpu/gtpu.h>
20 
23 
24 typedef struct {
30 
31 static u8 * format_gtpu_rx_trace (u8 * s, va_list * args)
32 {
33  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
34  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
35  gtpu_rx_trace_t * t = va_arg (*args, gtpu_rx_trace_t *);
36 
37  if (t->tunnel_index != ~0)
38  {
39  s = format (s, "GTPU decap from gtpu_tunnel%d teid %d next %d error %d",
40  t->tunnel_index, t->teid, t->next_index, t->error);
41  }
42  else
43  {
44  s = format (s, "GTPU decap error - tunnel for teid %d does not exist",
45  t->teid);
46  }
47  return s;
48 }
49 
52 {
53  return t->encap_fib_index == vlib_buffer_get_ip_fib_index (b, is_ip4);
54 }
55 
60  u32 is_ip4)
61 {
62  u32 n_left_from, next_index, * from, * to_next;
63  gtpu_main_t * gtm = &gtpu_main;
64  vnet_main_t * vnm = gtm->vnet_main;
66  u32 last_tunnel_index = ~0;
67  gtpu4_tunnel_key_t last_key4;
68  gtpu6_tunnel_key_t last_key6;
69  u32 pkts_decapsulated = 0;
71  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
72 
73  if (is_ip4)
74  last_key4.as_u64 = ~0;
75  else
76  clib_memset (&last_key6, 0xff, sizeof (last_key6));
77 
79  n_left_from = from_frame->n_vectors;
80 
81  next_index = node->cached_next_index;
82  stats_sw_if_index = node->runtime_data[0];
83  stats_n_packets = stats_n_bytes = 0;
84 
85  while (n_left_from > 0)
86  {
87  u32 n_left_to_next;
88 
90  to_next, n_left_to_next);
91  while (n_left_from >= 4 && n_left_to_next >= 2)
92  {
93  u32 bi0, bi1;
94  vlib_buffer_t * b0, * b1;
95  u32 next0, next1;
96  ip4_header_t * ip4_0, * ip4_1;
97  ip6_header_t * ip6_0, * ip6_1;
98  gtpu_header_t * gtpu0, * gtpu1;
99  u32 gtpu_hdr_len0, gtpu_hdr_len1;
100  uword * p0, * p1;
101  u32 tunnel_index0, tunnel_index1;
102  gtpu_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL;
103  gtpu4_tunnel_key_t key4_0, key4_1;
104  gtpu6_tunnel_key_t key6_0, key6_1;
105  u32 error0, error1;
106  u32 sw_if_index0, sw_if_index1, len0, len1;
107  u8 has_space0, has_space1;
108  u8 ver0, ver1;
109 
110  /* Prefetch next iteration. */
111  {
112  vlib_buffer_t * p2, * p3;
113 
114  p2 = vlib_get_buffer (vm, from[2]);
115  p3 = vlib_get_buffer (vm, from[3]);
116 
117  vlib_prefetch_buffer_header (p2, LOAD);
118  vlib_prefetch_buffer_header (p3, LOAD);
119 
122  }
123 
124  bi0 = from[0];
125  bi1 = from[1];
126  to_next[0] = bi0;
127  to_next[1] = bi1;
128  from += 2;
129  to_next += 2;
130  n_left_to_next -= 2;
131  n_left_from -= 2;
132 
133  b0 = vlib_get_buffer (vm, bi0);
134  b1 = vlib_get_buffer (vm, bi1);
135 
136  /* udp leaves current_data pointing at the gtpu header */
137  gtpu0 = vlib_buffer_get_current (b0);
138  gtpu1 = vlib_buffer_get_current (b1);
139  if (is_ip4)
140  {
141  ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
142  ip4_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip4_header_t));
143  }
144  else
145  {
146  ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
147  ip6_1 = (void *)((u8*)gtpu1 - sizeof(udp_header_t) - sizeof(ip6_header_t));
148  }
149 
150  tunnel_index0 = ~0;
151  error0 = 0;
152 
153  tunnel_index1 = ~0;
154  error1 = 0;
155 
156  /* speculatively load gtp header version field */
157  ver0 = gtpu0->ver_flags;
158  ver1 = gtpu1->ver_flags;
159 
160  /*
161  * Manipulate gtpu header
162  * TBD: Manipulate Sequence Number and N-PDU Number
163  * TBD: Manipulate Next Extension Header
164  */
165  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
166  gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
167 
168  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
169  has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
170 
171  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
172  {
173  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
174  next0 = GTPU_INPUT_NEXT_DROP;
175  goto trace0;
176  }
177 
178  /* Manipulate packet 0 */
179  if (is_ip4) {
180  key4_0.src = ip4_0->src_address.as_u32;
181  key4_0.teid = gtpu0->teid;
182 
183  /* Make sure GTPU tunnel exist according to packet SIP and teid
184  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
185  if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
186  {
187  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
188  if (PREDICT_FALSE (p0 == NULL))
189  {
190  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
191  next0 = GTPU_INPUT_NEXT_DROP;
192  goto trace0;
193  }
194  last_key4.as_u64 = key4_0.as_u64;
195  tunnel_index0 = last_tunnel_index = p0[0];
196  }
197  else
198  tunnel_index0 = last_tunnel_index;
199  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
200 
201  /* Validate GTPU tunnel encap-fib index against packet */
202  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
203  {
204  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
205  next0 = GTPU_INPUT_NEXT_DROP;
206  goto trace0;
207  }
208 
209  /* Validate GTPU tunnel SIP against packet DIP */
210  if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
211  goto next0; /* valid packet */
213  {
214  key4_0.src = ip4_0->dst_address.as_u32;
215  key4_0.teid = gtpu0->teid;
216  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
217  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
218  if (PREDICT_TRUE (p0 != NULL))
219  {
220  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
221  goto next0; /* valid packet */
222  }
223  }
224  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
225  next0 = GTPU_INPUT_NEXT_DROP;
226  goto trace0;
227 
228  } else /* !is_ip4 */ {
229  key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
230  key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
231  key6_0.teid = gtpu0->teid;
232 
233  /* Make sure GTPU tunnel exist according to packet SIP and teid
234  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
235  if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
236  {
237  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
238  if (PREDICT_FALSE (p0 == NULL))
239  {
240  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
241  next0 = GTPU_INPUT_NEXT_DROP;
242  goto trace0;
243  }
244  clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
245  tunnel_index0 = last_tunnel_index = p0[0];
246  }
247  else
248  tunnel_index0 = last_tunnel_index;
249  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
250 
251  /* Validate GTPU tunnel encap-fib index against packet */
252  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
253  {
254  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
255  next0 = GTPU_INPUT_NEXT_DROP;
256  goto trace0;
257  }
258 
259  /* Validate GTPU tunnel SIP against packet DIP */
261  &t0->src.ip6)))
262  goto next0; /* valid packet */
264  {
265  key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
266  key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
267  key6_0.teid = gtpu0->teid;
268  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
269  if (PREDICT_TRUE (p0 != NULL))
270  {
271  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
272  goto next0; /* valid packet */
273  }
274  }
275  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
276  next0 = GTPU_INPUT_NEXT_DROP;
277  goto trace0;
278  }
279 
280  next0:
281  /* Pop gtpu header */
282  vlib_buffer_advance (b0, gtpu_hdr_len0);
283 
284  next0 = t0->decap_next_index;
285  sw_if_index0 = t0->sw_if_index;
286  len0 = vlib_buffer_length_in_chain (vm, b0);
287 
288  /* Required to make the l2 tag push / pop code work on l2 subifs */
289  if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
290  vnet_update_l2_len (b0);
291 
292  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
293  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
294  sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
295 
296  pkts_decapsulated ++;
297  stats_n_packets += 1;
298  stats_n_bytes += len0;
299 
300  /* Batch stats increment on the same gtpu tunnel so counter
301  is not incremented per packet */
302  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
303  {
304  stats_n_packets -= 1;
305  stats_n_bytes -= len0;
306  if (stats_n_packets)
309  thread_index, stats_sw_if_index,
310  stats_n_packets, stats_n_bytes);
311  stats_n_packets = 1;
312  stats_n_bytes = len0;
313  stats_sw_if_index = sw_if_index0;
314  }
315 
316  trace0:
317  b0->error = error0 ? node->errors[error0] : 0;
318 
319  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
320  {
321  gtpu_rx_trace_t *tr
322  = vlib_add_trace (vm, node, b0, sizeof (*tr));
323  tr->next_index = next0;
324  tr->error = error0;
325  tr->tunnel_index = tunnel_index0;
326  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
327  }
328 
329  if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
330  {
331  error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
332  next1 = GTPU_INPUT_NEXT_DROP;
333  goto trace1;
334  }
335 
336  /* Manipulate packet 1 */
337  if (is_ip4) {
338  key4_1.src = ip4_1->src_address.as_u32;
339  key4_1.teid = gtpu1->teid;
340 
341  /* Make sure GTPU tunnel exist according to packet SIP and teid
342  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
343  if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
344  {
345  p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
346  if (PREDICT_FALSE (p1 == NULL))
347  {
348  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
349  next1 = GTPU_INPUT_NEXT_DROP;
350  goto trace1;
351  }
352  last_key4.as_u64 = key4_1.as_u64;
353  tunnel_index1 = last_tunnel_index = p1[0];
354  }
355  else
356  tunnel_index1 = last_tunnel_index;
357  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
358 
359  /* Validate GTPU tunnel encap-fib index against packet */
360  if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
361  {
362  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
363  next1 = GTPU_INPUT_NEXT_DROP;
364  goto trace1;
365  }
366 
367  /* Validate GTPU tunnel SIP against packet DIP */
368  if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
369  goto next1; /* valid packet */
371  {
372  key4_1.src = ip4_1->dst_address.as_u32;
373  key4_1.teid = gtpu1->teid;
374  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
375  p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
376  if (PREDICT_TRUE (p1 != NULL))
377  {
378  mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
379  goto next1; /* valid packet */
380  }
381  }
382  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
383  next1 = GTPU_INPUT_NEXT_DROP;
384  goto trace1;
385 
386  } else /* !is_ip4 */ {
387  key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
388  key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
389  key6_1.teid = gtpu1->teid;
390 
391  /* Make sure GTPU tunnel exist according to packet SIP and teid
392  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
393  if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
394  {
395  p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
396 
397  if (PREDICT_FALSE (p1 == NULL))
398  {
399  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
400  next1 = GTPU_INPUT_NEXT_DROP;
401  goto trace1;
402  }
403 
404  clib_memcpy_fast (&last_key6, &key6_1, sizeof(key6_1));
405  tunnel_index1 = last_tunnel_index = p1[0];
406  }
407  else
408  tunnel_index1 = last_tunnel_index;
409  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
410 
411  /* Validate GTPU tunnel encap-fib index against packet */
412  if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
413  {
414  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
415  next1 = GTPU_INPUT_NEXT_DROP;
416  goto trace1;
417  }
418 
419  /* Validate GTPU tunnel SIP against packet DIP */
421  &t1->src.ip6)))
422  goto next1; /* valid packet */
424  {
425  key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
426  key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
427  key6_1.teid = gtpu1->teid;
428  p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
429  if (PREDICT_TRUE (p1 != NULL))
430  {
431  mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
432  goto next1; /* valid packet */
433  }
434  }
435  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
436  next1 = GTPU_INPUT_NEXT_DROP;
437  goto trace1;
438  }
439 
440  next1:
441  /* Pop gtpu header */
442  vlib_buffer_advance (b1, gtpu_hdr_len1);
443 
444  next1 = t1->decap_next_index;
445  sw_if_index1 = t1->sw_if_index;
446  len1 = vlib_buffer_length_in_chain (vm, b1);
447 
448  /* Required to make the l2 tag push / pop code work on l2 subifs */
449  if (PREDICT_TRUE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
450  vnet_update_l2_len (b1);
451 
452  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
453  vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
454  sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
455 
456  pkts_decapsulated ++;
457  stats_n_packets += 1;
458  stats_n_bytes += len1;
459 
460  /* Batch stats increment on the same gtpu tunnel so counter
461  is not incremented per packet */
462  if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
463  {
464  stats_n_packets -= 1;
465  stats_n_bytes -= len1;
466  if (stats_n_packets)
469  thread_index, stats_sw_if_index,
470  stats_n_packets, stats_n_bytes);
471  stats_n_packets = 1;
472  stats_n_bytes = len1;
473  stats_sw_if_index = sw_if_index1;
474  }
475 
476  trace1:
477  b1->error = error1 ? node->errors[error1] : 0;
478 
479  if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
480  {
481  gtpu_rx_trace_t *tr
482  = vlib_add_trace (vm, node, b1, sizeof (*tr));
483  tr->next_index = next1;
484  tr->error = error1;
485  tr->tunnel_index = tunnel_index1;
486  tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
487  }
488 
490  to_next, n_left_to_next,
491  bi0, bi1, next0, next1);
492  }
493 
494  while (n_left_from > 0 && n_left_to_next > 0)
495  {
496  u32 bi0;
497  vlib_buffer_t * b0;
498  u32 next0;
499  ip4_header_t * ip4_0;
500  ip6_header_t * ip6_0;
501  gtpu_header_t * gtpu0;
502  u32 gtpu_hdr_len0;
503  uword * p0;
504  u32 tunnel_index0;
505  gtpu_tunnel_t * t0, * mt0 = NULL;
506  gtpu4_tunnel_key_t key4_0;
507  gtpu6_tunnel_key_t key6_0;
508  u32 error0;
509  u32 sw_if_index0, len0;
510  u8 has_space0;
511  u8 ver0;
512 
513  bi0 = from[0];
514  to_next[0] = bi0;
515  from += 1;
516  to_next += 1;
517  n_left_from -= 1;
518  n_left_to_next -= 1;
519 
520  b0 = vlib_get_buffer (vm, bi0);
521 
522  /* udp leaves current_data pointing at the gtpu header */
523  gtpu0 = vlib_buffer_get_current (b0);
524  if (is_ip4) {
525  ip4_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip4_header_t));
526  } else {
527  ip6_0 = (void *)((u8*)gtpu0 - sizeof(udp_header_t) - sizeof(ip6_header_t));
528  }
529 
530  tunnel_index0 = ~0;
531  error0 = 0;
532 
533  /* speculatively load gtp header version field */
534  ver0 = gtpu0->ver_flags;
535 
536  /*
537  * Manipulate gtpu header
538  * TBD: Manipulate Sequence Number and N-PDU Number
539  * TBD: Manipulate Next Extension Header
540  */
541  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
542 
543  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
544 
545  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
546  {
547  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
548  next0 = GTPU_INPUT_NEXT_DROP;
549  goto trace00;
550  }
551 
552  if (is_ip4) {
553  key4_0.src = ip4_0->src_address.as_u32;
554  key4_0.teid = gtpu0->teid;
555 
556  /* Make sure GTPU tunnel exist according to packet SIP and teid
557  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
558  if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
559  {
560  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
561  if (PREDICT_FALSE (p0 == NULL))
562  {
563  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
564  next0 = GTPU_INPUT_NEXT_DROP;
565  goto trace00;
566  }
567  last_key4.as_u64 = key4_0.as_u64;
568  tunnel_index0 = last_tunnel_index = p0[0];
569  }
570  else
571  tunnel_index0 = last_tunnel_index;
572  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
573 
574  /* Validate GTPU tunnel encap-fib index against packet */
575  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
576  {
577  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
578  next0 = GTPU_INPUT_NEXT_DROP;
579  goto trace00;
580  }
581 
582  /* Validate GTPU tunnel SIP against packet DIP */
583  if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
584  goto next00; /* valid packet */
586  {
587  key4_0.src = ip4_0->dst_address.as_u32;
588  key4_0.teid = gtpu0->teid;
589  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
590  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
591  if (PREDICT_TRUE (p0 != NULL))
592  {
593  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
594  goto next00; /* valid packet */
595  }
596  }
597  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
598  next0 = GTPU_INPUT_NEXT_DROP;
599  goto trace00;
600 
601  } else /* !is_ip4 */ {
602  key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
603  key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
604  key6_0.teid = gtpu0->teid;
605 
606  /* Make sure GTPU tunnel exist according to packet SIP and teid
607  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
608  if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
609  {
610  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
611  if (PREDICT_FALSE (p0 == NULL))
612  {
613  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
614  next0 = GTPU_INPUT_NEXT_DROP;
615  goto trace00;
616  }
617  clib_memcpy_fast (&last_key6, &key6_0, sizeof(key6_0));
618  tunnel_index0 = last_tunnel_index = p0[0];
619  }
620  else
621  tunnel_index0 = last_tunnel_index;
622  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
623 
624  /* Validate GTPU tunnel encap-fib index against packet */
625  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
626  {
627  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
628  next0 = GTPU_INPUT_NEXT_DROP;
629  goto trace00;
630  }
631 
632  /* Validate GTPU tunnel SIP against packet DIP */
634  &t0->src.ip6)))
635  goto next00; /* valid packet */
637  {
638  key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
639  key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
640  key6_0.teid = gtpu0->teid;
641  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
642  if (PREDICT_TRUE (p0 != NULL))
643  {
644  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
645  goto next00; /* valid packet */
646  }
647  }
648  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
649  next0 = GTPU_INPUT_NEXT_DROP;
650  goto trace00;
651  }
652 
653  next00:
654  /* Pop gtpu header */
655  vlib_buffer_advance (b0, gtpu_hdr_len0);
656 
657  next0 = t0->decap_next_index;
658  sw_if_index0 = t0->sw_if_index;
659  len0 = vlib_buffer_length_in_chain (vm, b0);
660 
661  /* Required to make the l2 tag push / pop code work on l2 subifs */
662  if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
663  vnet_update_l2_len (b0);
664 
665  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
666  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
667  sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
668 
669  pkts_decapsulated ++;
670  stats_n_packets += 1;
671  stats_n_bytes += len0;
672 
673  /* Batch stats increment on the same gtpu tunnel so counter
674  is not incremented per packet */
675  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
676  {
677  stats_n_packets -= 1;
678  stats_n_bytes -= len0;
679  if (stats_n_packets)
682  thread_index, stats_sw_if_index,
683  stats_n_packets, stats_n_bytes);
684  stats_n_packets = 1;
685  stats_n_bytes = len0;
686  stats_sw_if_index = sw_if_index0;
687  }
688 
689  trace00:
690  b0->error = error0 ? node->errors[error0] : 0;
691 
692  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
693  {
694  gtpu_rx_trace_t *tr
695  = vlib_add_trace (vm, node, b0, sizeof (*tr));
696  tr->next_index = next0;
697  tr->error = error0;
698  tr->tunnel_index = tunnel_index0;
699  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
700  }
702  to_next, n_left_to_next,
703  bi0, next0);
704  }
705 
706  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
707  }
708  /* Do we still need this now that tunnel tx stats is kept? */
711  GTPU_ERROR_DECAPSULATED,
712  pkts_decapsulated);
713 
714  /* Increment any remaining batch stats */
715  if (stats_n_packets)
716  {
719  thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
720  node->runtime_data[0] = stats_sw_if_index;
721  }
722 
723  return from_frame->n_vectors;
724 }
725 
729 {
730  return gtpu_input(vm, node, from_frame, /* is_ip4 */ 1);
731 }
732 
736 {
737  return gtpu_input(vm, node, from_frame, /* is_ip4 */ 0);
738 }
739 
740 static char * gtpu_error_strings[] = {
741 #define gtpu_error(n,s) s,
742 #include <gtpu/gtpu_error.def>
743 #undef gtpu_error
744 #undef _
745 };
746 
748  .name = "gtpu4-input",
749  /* Takes a vector of packets. */
750  .vector_size = sizeof (u32),
751 
752  .n_errors = GTPU_N_ERROR,
753  .error_strings = gtpu_error_strings,
754 
755  .n_next_nodes = GTPU_INPUT_N_NEXT,
756  .next_nodes = {
757 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
759 #undef _
760  },
761 
762 //temp .format_buffer = format_gtpu_header,
763  .format_trace = format_gtpu_rx_trace,
764  // $$$$ .unformat_buffer = unformat_gtpu_header,
765 };
766 
768  .name = "gtpu6-input",
769  /* Takes a vector of packets. */
770  .vector_size = sizeof (u32),
771 
772  .n_errors = GTPU_N_ERROR,
773  .error_strings = gtpu_error_strings,
774 
775  .n_next_nodes = GTPU_INPUT_N_NEXT,
776  .next_nodes = {
777 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
779 #undef _
780  },
781 
782 //temp .format_buffer = format_gtpu_header,
783  .format_trace = format_gtpu_rx_trace,
784  // $$$$ .unformat_buffer = unformat_gtpu_header,
785 };
786 
787 typedef enum {
792 
797  u32 is_ip4)
798 {
799  gtpu_main_t * gtm = &gtpu_main;
800  u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
802  vtep4_key_t last_vtep4; /* last IPv4 address / fib index
803  matching a local VTEP address */
804  vtep6_key_t last_vtep6; /* last IPv6 address / fib index
805  matching a local VTEP address */
807 
809  n_left_from = frame->n_vectors;
810  next_index = node->cached_next_index;
812 
813  if (node->flags & VLIB_NODE_FLAG_TRACE)
815 
816  if (is_ip4)
817  vtep4_key_init (&last_vtep4);
818  else
819  vtep6_key_init (&last_vtep6);
820 
821  while (n_left_from > 0)
822  {
823  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
824 
825  while (n_left_from >= 4 && n_left_to_next >= 2)
826  {
827  vlib_buffer_t * b0, * b1;
828  ip4_header_t * ip40, * ip41;
829  ip6_header_t * ip60, * ip61;
830  udp_header_t * udp0, * udp1;
831  u32 bi0, ip_len0, udp_len0, flags0, next0;
832  u32 bi1, ip_len1, udp_len1, flags1, next1;
833  i32 len_diff0, len_diff1;
834  u8 error0, good_udp0, proto0;
835  u8 error1, good_udp1, proto1;
836 
837  /* Prefetch next iteration. */
838  {
839  vlib_prefetch_buffer_header (b[2], LOAD);
840  vlib_prefetch_buffer_header (b[3], LOAD);
841 
842  CLIB_PREFETCH (b[2]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
843  CLIB_PREFETCH (b[3]->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
844  }
845 
846  bi0 = to_next[0] = from[0];
847  bi1 = to_next[1] = from[1];
848  from += 2;
849  n_left_from -= 2;
850  to_next += 2;
851  n_left_to_next -= 2;
852 
853  b0 = b[0];
854  b1 = b[1];
855  b += 2;
856  if (is_ip4)
857  {
858  ip40 = vlib_buffer_get_current (b0);
859  ip41 = vlib_buffer_get_current (b1);
860  }
861  else
862  {
863  ip60 = vlib_buffer_get_current (b0);
864  ip61 = vlib_buffer_get_current (b1);
865  }
866 
867  /* Setup packet for next IP feature */
868  vnet_feature_next(&next0, b0);
869  vnet_feature_next(&next1, b1);
870 
871  if (is_ip4)
872  {
873  /* Treat IP frag packets as "experimental" protocol for now
874  until support of IP frag reassembly is implemented */
875  proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
876  proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
877  }
878  else
879  {
880  proto0 = ip60->protocol;
881  proto1 = ip61->protocol;
882  }
883 
884  /* Process packet 0 */
885  if (proto0 != IP_PROTOCOL_UDP)
886  goto exit0; /* not UDP packet */
887 
888  if (is_ip4)
889  udp0 = ip4_next_header (ip40);
890  else
891  udp0 = ip6_next_header (ip60);
892 
893  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
894  goto exit0; /* not GTPU packet */
895 
896  /* Validate DIP against VTEPs*/
897  if (is_ip4)
898  {
899 #ifdef CLIB_HAVE_VEC512
900  if (!vtep4_check_vector (&gtm->vtep_table, b0, ip40, &last_vtep4,
901  &gtm->vtep4_u512))
902 #else
903  if (!vtep4_check (&gtm->vtep_table, b0, ip40, &last_vtep4))
904 #endif
905  goto exit0; /* no local VTEP for GTPU packet */
906  }
907  else
908  {
909  if (!vtep6_check (&gtm->vtep_table, b0, ip60, &last_vtep6))
910  goto exit0; /* no local VTEP for GTPU packet */
911  }
912 
913  flags0 = b0->flags;
914  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
915 
916  /* Don't verify UDP checksum for packets with explicit zero checksum. */
917  good_udp0 |= udp0->checksum == 0;
918 
919  /* Verify UDP length */
920  if (is_ip4)
921  ip_len0 = clib_net_to_host_u16 (ip40->length);
922  else
923  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
924  udp_len0 = clib_net_to_host_u16 (udp0->length);
925  len_diff0 = ip_len0 - udp_len0;
926 
927  /* Verify UDP checksum */
928  if (PREDICT_FALSE (!good_udp0))
929  {
930  if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
931  {
932  if (is_ip4)
933  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
934  else
935  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
936  good_udp0 =
937  (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
938  }
939  }
940 
941  if (is_ip4)
942  {
943  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
944  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
945  }
946  else
947  {
948  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
949  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
950  }
951 
952  next0 = error0 ?
954  b0->error = error0 ? error_node->errors[error0] : 0;
955 
956  /* gtpu-input node expect current at GTPU header */
957  if (is_ip4)
958  vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
959  else
960  vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
961 
962  exit0:
963  /* Process packet 1 */
964  if (proto1 != IP_PROTOCOL_UDP)
965  goto exit1; /* not UDP packet */
966 
967  if (is_ip4)
968  udp1 = ip4_next_header (ip41);
969  else
970  udp1 = ip6_next_header (ip61);
971 
972  if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
973  goto exit1; /* not GTPU packet */
974 
975  /* Validate DIP against VTEPs*/
976  if (is_ip4)
977  {
978 #ifdef CLIB_HAVE_VEC512
979  if (!vtep4_check_vector (&gtm->vtep_table, b1, ip41, &last_vtep4,
980  &gtm->vtep4_u512))
981 #else
982  if (!vtep4_check (&gtm->vtep_table, b1, ip41, &last_vtep4))
983 #endif
984  goto exit1; /* no local VTEP for GTPU packet */
985  }
986  else
987  {
988  if (!vtep6_check (&gtm->vtep_table, b1, ip61, &last_vtep6))
989  goto exit1; /* no local VTEP for GTPU packet */
990  }
991 
992  flags1 = b1->flags;
993  good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
994 
995  /* Don't verify UDP checksum for packets with explicit zero checksum. */
996  good_udp1 |= udp1->checksum == 0;
997 
998  /* Verify UDP length */
999  if (is_ip4)
1000  ip_len1 = clib_net_to_host_u16 (ip41->length);
1001  else
1002  ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1003  udp_len1 = clib_net_to_host_u16 (udp1->length);
1004  len_diff1 = ip_len1 - udp_len1;
1005 
1006  /* Verify UDP checksum */
1007  if (PREDICT_FALSE (!good_udp1))
1008  {
1009  if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1010  {
1011  if (is_ip4)
1012  flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1013  else
1014  flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1015  good_udp1 =
1016  (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1017  }
1018  }
1019 
1020  if (is_ip4)
1021  {
1022  error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1023  error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1024  }
1025  else
1026  {
1027  error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1028  error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1029  }
1030 
1031  next1 = error1 ?
1033  b1->error = error1 ? error_node->errors[error1] : 0;
1034 
1035  /* gtpu-input node expect current at GTPU header */
1036  if (is_ip4)
1037  vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
1038  else
1039  vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
1040 
1041  exit1:
1043  to_next, n_left_to_next,
1044  bi0, bi1, next0, next1);
1045  }
1046 
1047  while (n_left_from > 0 && n_left_to_next > 0)
1048  {
1049  vlib_buffer_t * b0;
1050  ip4_header_t * ip40;
1051  ip6_header_t * ip60;
1052  udp_header_t * udp0;
1053  u32 bi0, ip_len0, udp_len0, flags0, next0;
1054  i32 len_diff0;
1055  u8 error0, good_udp0, proto0;
1056 
1057  bi0 = to_next[0] = from[0];
1058  from += 1;
1059  n_left_from -= 1;
1060  to_next += 1;
1061  n_left_to_next -= 1;
1062 
1063  b0 = b[0];
1064  b++;
1065  if (is_ip4)
1066  ip40 = vlib_buffer_get_current (b0);
1067  else
1068  ip60 = vlib_buffer_get_current (b0);
1069 
1070  /* Setup packet for next IP feature */
1071  vnet_feature_next(&next0, b0);
1072 
1073  if (is_ip4)
1074  /* Treat IP4 frag packets as "experimental" protocol for now
1075  until support of IP frag reassembly is implemented */
1076  proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1077  else
1078  proto0 = ip60->protocol;
1079 
1080  if (proto0 != IP_PROTOCOL_UDP)
1081  goto exit; /* not UDP packet */
1082 
1083  if (is_ip4)
1084  udp0 = ip4_next_header (ip40);
1085  else
1086  udp0 = ip6_next_header (ip60);
1087 
1088  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1089  goto exit; /* not GTPU packet */
1090 
1091  /* Validate DIP against VTEPs*/
1092  if (is_ip4)
1093  {
1094 #ifdef CLIB_HAVE_VEC512
1095  if (!vtep4_check_vector (&gtm->vtep_table, b0, ip40, &last_vtep4,
1096  &gtm->vtep4_u512))
1097 #else
1098  if (!vtep4_check (&gtm->vtep_table, b0, ip40, &last_vtep4))
1099 #endif
1100  goto exit; /* no local VTEP for GTPU packet */
1101  }
1102  else
1103  {
1104  if (!vtep6_check (&gtm->vtep_table, b0, ip60, &last_vtep6))
1105  goto exit; /* no local VTEP for GTPU packet */
1106  }
1107 
1108  flags0 = b0->flags;
1109  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1110 
1111  /* Don't verify UDP checksum for packets with explicit zero checksum. */
1112  good_udp0 |= udp0->checksum == 0;
1113 
1114  /* Verify UDP length */
1115  if (is_ip4)
1116  ip_len0 = clib_net_to_host_u16 (ip40->length);
1117  else
1118  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1119  udp_len0 = clib_net_to_host_u16 (udp0->length);
1120  len_diff0 = ip_len0 - udp_len0;
1121 
1122  /* Verify UDP checksum */
1123  if (PREDICT_FALSE (!good_udp0))
1124  {
1125  if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1126  {
1127  if (is_ip4)
1128  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1129  else
1130  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1131  good_udp0 =
1132  (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1133  }
1134  }
1135 
1136  if (is_ip4)
1137  {
1138  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1139  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1140  }
1141  else
1142  {
1143  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1144  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1145  }
1146 
1147  next0 = error0 ?
1149  b0->error = error0 ? error_node->errors[error0] : 0;
1150 
1151  /* gtpu-input node expect current at GTPU header */
1152  if (is_ip4)
1153  vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1154  else
1155  vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1156 
1157  exit:
1159  to_next, n_left_to_next,
1160  bi0, next0);
1161  }
1162 
1163  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1164  }
1165 
1166  return frame->n_vectors;
1167 }
1168 
1171  vlib_frame_t * frame)
1172 {
1173  return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1174 }
1175 
1177  .name = "ip4-gtpu-bypass",
1178  .vector_size = sizeof (u32),
1179 
1180  .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1181  .next_nodes = {
1182  [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1183  [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu4-input",
1184  },
1185 
1186  .format_buffer = format_ip4_header,
1187  .format_trace = format_ip4_forward_next_trace,
1188 };
1189 
1190 #ifndef CLIB_MARCH_VARIANT
1191 /* Dummy init function to get us linked in. */
1193 { return 0; }
1194 
1196 #endif /* CLIB_MARCH_VARIANT */
1197 
1200  vlib_frame_t * frame)
1201 {
1202  return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1203 }
1204 
1206  .name = "ip6-gtpu-bypass",
1207  .vector_size = sizeof (u32),
1208 
1209  .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1210  .next_nodes = {
1211  [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1212  [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu6-input",
1213  },
1214 
1215  .format_buffer = format_ip6_header,
1216  .format_trace = format_ip6_forward_next_trace,
1217 };
1218 
1219 #ifndef CLIB_MARCH_VARIANT
1220 /* Dummy init function to get us linked in. */
1222 { return 0; }
1223 
1225 
1226 #define foreach_gtpu_flow_error \
1227  _(NONE, "no error") \
1228  _(PAYLOAD_ERROR, "Payload type errors") \
1229  _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
1230  _(IP_HEADER_ERROR, "Rx ip header errors") \
1231  _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
1232  _(UDP_LENGTH_ERROR, "Rx udp length errors")
1233 
1234 typedef enum
1235 {
1236 #define _(f,s) GTPU_FLOW_ERROR_##f,
1238 #undef _
1239 #define gtpu_error(n,s) GTPU_FLOW_ERROR_##n,
1240 #include <gtpu/gtpu_error.def>
1241 #undef gtpu_error
1244 
1245 static char *gtpu_flow_error_strings[] = {
1246 #define _(n,s) s,
1248 #undef _
1249 #define gtpu_error(n,s) s,
1250 #include <gtpu/gtpu_error.def>
1251 #undef gtpu_error
1252 #undef _
1253 
1254 };
1255 
1256 #define gtpu_local_need_csum_check(_b) \
1257  (!(_b->flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED || \
1258  (_b->flags & VNET_BUFFER_F_OFFLOAD && \
1259  vnet_buffer (_b)->oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)))
1260 
1261 #define gtpu_local_csum_is_valid(_b) \
1262  ((_b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT || \
1263  (_b->flags & VNET_BUFFER_F_OFFLOAD && \
1264  vnet_buffer (_b)->oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)) != 0)
1265 
1268 {
1269  u32 flags = b->flags;
1270  enum { offset = sizeof(ip4_header_t) + sizeof(udp_header_t)};
1271 
1272  /* Verify UDP checksum */
1273  if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1274  {
1278  }
1279 
1280  return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1281 }
1282 
1285 {
1286  ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1287  sizeof(ip4_header_t) - sizeof(udp_header_t);
1288  u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1289  u16 expected = payload_len + sizeof(ip4_header_t) + sizeof(udp_header_t);
1290  return ip_len > expected || ip4_hdr->ttl == 0 || ip4_hdr->ip_version_and_header_length != 0x45;
1291 }
1292 
1295 {
1296  ip4_header_t * ip4_hdr = vlib_buffer_get_current(b) -
1297  sizeof(ip4_header_t) - sizeof(udp_header_t);
1298  udp_header_t * udp_hdr = vlib_buffer_get_current(b) - sizeof(udp_header_t);
1299  u16 ip_len = clib_net_to_host_u16 (ip4_hdr->length);
1300  u16 udp_len = clib_net_to_host_u16 (udp_hdr->length);
1301  return udp_len > ip_len;
1302 }
1303 
1305 gtpu_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
1306 {
1307  u8 error0 = GTPU_FLOW_ERROR_NONE;
1308  if (ip_err0)
1309  error0 = GTPU_FLOW_ERROR_IP_HEADER_ERROR;
1310  if (udp_err0)
1311  error0 = GTPU_FLOW_ERROR_UDP_LENGTH_ERROR;
1312  if (csum_err0)
1313  error0 = GTPU_FLOW_ERROR_UDP_CHECKSUM_ERROR;
1314  return error0;
1315 }
1316 
1317 
1322 {
1323  u32 n_left_from, next_index, * from, * to_next;
1324  gtpu_main_t * gtm = &gtpu_main;
1325  vnet_main_t * vnm = gtm->vnet_main;
1327  u32 pkts_decapsulated = 0;
1329  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
1330  u8 ip_err0, ip_err1, udp_err0, udp_err1, csum_err0, csum_err1;
1331 
1333  n_left_from = from_frame->n_vectors;
1334 
1335  next_index = node->cached_next_index;
1336  stats_sw_if_index = node->runtime_data[0];
1337  stats_n_packets = stats_n_bytes = 0;
1338 
1339  while (n_left_from > 0)
1340  {
1341  u32 n_left_to_next;
1342 
1344  to_next, n_left_to_next);
1345 
1346  while (n_left_from >= 4 && n_left_to_next >= 2)
1347  {
1348  u32 bi0, bi1;
1349  vlib_buffer_t * b0, * b1;
1350  u32 next0, next1;
1351  gtpu_header_t * gtpu0, * gtpu1;
1352  u32 gtpu_hdr_len0, gtpu_hdr_len1;
1353  u32 tunnel_index0, tunnel_index1;
1354  gtpu_tunnel_t * t0, * t1;
1355  u32 error0, error1;
1356  u32 sw_if_index0, sw_if_index1, len0, len1;
1357  u8 has_space0 = 0, has_space1 = 0;
1358  u8 ver0, ver1;
1359 
1360  /* Prefetch next iteration. */
1361  {
1362  vlib_buffer_t * p2, * p3;
1363 
1364  p2 = vlib_get_buffer (vm, from[2]);
1365  p3 = vlib_get_buffer (vm, from[3]);
1366 
1367  vlib_prefetch_buffer_header (p2, LOAD);
1368  vlib_prefetch_buffer_header (p3, LOAD);
1369 
1370  CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1371  CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
1372  }
1373 
1374  bi0 = from[0];
1375  bi1 = from[1];
1376  to_next[0] = bi0;
1377  to_next[1] = bi1;
1378  from += 2;
1379  to_next += 2;
1380  n_left_to_next -= 2;
1381  n_left_from -= 2;
1382 
1383  b0 = vlib_get_buffer (vm, bi0);
1384  b1 = vlib_get_buffer (vm, bi1);
1385 
1386  /* udp leaves current_data pointing at the gtpu header */
1387  gtpu0 = vlib_buffer_get_current (b0);
1388  gtpu1 = vlib_buffer_get_current (b1);
1389 
1390  len0 = vlib_buffer_length_in_chain (vm, b0);
1391  len1 = vlib_buffer_length_in_chain (vm, b1);
1392 
1393  tunnel_index0 = ~0;
1394  error0 = 0;
1395 
1396  tunnel_index1 = ~0;
1397  error1 = 0;
1398 
1399  ip_err0 = gtpu_check_ip (b0, len0);
1400  udp_err0 = gtpu_check_ip_udp_len (b0);
1401  ip_err1 = gtpu_check_ip (b1, len1);
1402  udp_err1 = gtpu_check_ip_udp_len (b1);
1403 
1405  csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1406  else
1407  csum_err0 = !gtpu_local_csum_is_valid (b0);
1409  csum_err1 = !gtpu_validate_udp_csum (vm, b1);
1410  else
1411  csum_err1 = !gtpu_local_csum_is_valid (b1);
1412 
1413  if (ip_err0 || udp_err0 || csum_err0)
1414  {
1415  next0 = GTPU_INPUT_NEXT_DROP;
1416  error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1417  goto trace0;
1418  }
1419 
1420  /* speculatively load gtp header version field */
1421  ver0 = gtpu0->ver_flags;
1422 
1423  /*
1424  * Manipulate gtpu header
1425  * TBD: Manipulate Sequence Number and N-PDU Number
1426  * TBD: Manipulate Next Extension Header
1427  */
1428  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1429 
1430  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1431  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1432  {
1433  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1434  next0 = GTPU_INPUT_NEXT_DROP;
1435  goto trace0;
1436  }
1437 
1438  /* Manipulate packet 0 */
1439  ASSERT (b0->flow_id != 0);
1440  tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1441  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1442  b0->flow_id = 0;
1443 
1444  /* Pop gtpu header */
1445  vlib_buffer_advance (b0, gtpu_hdr_len0);
1446 
1447  /* assign the next node */
1448  if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1449  (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1450  {
1451  error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1452  next0 = GTPU_INPUT_NEXT_DROP;
1453  goto trace0;
1454  }
1455  next0 = t0->decap_next_index;
1456 
1457  sw_if_index0 = t0->sw_if_index;
1458 
1459  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1460  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1461 
1462  pkts_decapsulated ++;
1463  stats_n_packets += 1;
1464  stats_n_bytes += len0;
1465 
1466  /* Batch stats increment on the same gtpu tunnel so counter
1467  is not incremented per packet */
1468  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1469  {
1470  stats_n_packets -= 1;
1471  stats_n_bytes -= len0;
1472  if (stats_n_packets)
1475  thread_index, stats_sw_if_index,
1476  stats_n_packets, stats_n_bytes);
1477  stats_n_packets = 1;
1478  stats_n_bytes = len0;
1479  stats_sw_if_index = sw_if_index0;
1480  }
1481 
1482 trace0:
1483  b0->error = error0 ? node->errors[error0] : 0;
1484 
1485  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1486  {
1487  gtpu_rx_trace_t *tr
1488  = vlib_add_trace (vm, node, b0, sizeof (*tr));
1489  tr->next_index = next0;
1490  tr->error = error0;
1491  tr->tunnel_index = tunnel_index0;
1492  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1493  }
1494 
1495  if (ip_err1 || udp_err1 || csum_err1)
1496  {
1497  next1 = GTPU_INPUT_NEXT_DROP;
1498  error1 = gtpu_err_code (ip_err1, udp_err1, csum_err1);
1499  goto trace1;
1500  }
1501 
1502  /* speculatively load gtp header version field */
1503  ver1 = gtpu1->ver_flags;
1504 
1505  /*
1506  * Manipulate gtpu header
1507  * TBD: Manipulate Sequence Number and N-PDU Number
1508  * TBD: Manipulate Next Extension Header
1509  */
1510  gtpu_hdr_len1 = sizeof(gtpu_header_t) - (((ver1 & GTPU_E_S_PN_BIT) == 0) * 4);
1511  has_space1 = vlib_buffer_has_space (b1, gtpu_hdr_len1);
1512  if (PREDICT_FALSE (((ver1 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space1)))
1513  {
1514  error1 = has_space1 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1515  next1 = GTPU_INPUT_NEXT_DROP;
1516  goto trace1;
1517  }
1518 
1519  /* Manipulate packet 1 */
1520  ASSERT (b1->flow_id != 0);
1521  tunnel_index1 = b1->flow_id - gtm->flow_id_start;
1522  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
1523  b1->flow_id = 0;
1524 
1525  /* Pop gtpu header */
1526  vlib_buffer_advance (b1, gtpu_hdr_len1);
1527 
1528  /* assign the next node */
1529  if (PREDICT_FALSE (t1->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1530  (t1->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1531  {
1532  error1 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1533  next1 = GTPU_INPUT_NEXT_DROP;
1534  goto trace1;
1535  }
1536  next1 = t1->decap_next_index;
1537 
1538  sw_if_index1 = t1->sw_if_index;
1539 
1540  /* Required to make the l2 tag push / pop code work on l2 subifs */
1541  /* This won't happen in current implementation as only
1542  ipv4/udp/gtpu/IPV4 type packets can be matched */
1543  if (PREDICT_FALSE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
1544  vnet_update_l2_len (b1);
1545 
1546  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1547  vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
1548 
1549  pkts_decapsulated ++;
1550  stats_n_packets += 1;
1551  stats_n_bytes += len1;
1552 
1553  /* Batch stats increment on the same gtpu tunnel so counter
1554  is not incremented per packet */
1555  if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
1556  {
1557  stats_n_packets -= 1;
1558  stats_n_bytes -= len1;
1559  if (stats_n_packets)
1562  thread_index, stats_sw_if_index,
1563  stats_n_packets, stats_n_bytes);
1564  stats_n_packets = 1;
1565  stats_n_bytes = len1;
1566  stats_sw_if_index = sw_if_index1;
1567  }
1568 
1569 trace1:
1570  b1->error = error1 ? node->errors[error1] : 0;
1571 
1572  if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
1573  {
1574  gtpu_rx_trace_t *tr
1575  = vlib_add_trace (vm, node, b1, sizeof (*tr));
1576  tr->next_index = next1;
1577  tr->error = error1;
1578  tr->tunnel_index = tunnel_index1;
1579  tr->teid = has_space1 ? clib_net_to_host_u32(gtpu1->teid) : ~0;
1580  }
1581 
1583  to_next, n_left_to_next,
1584  bi0, bi1, next0, next1);
1585  }
1586 
1587  while (n_left_from > 0 && n_left_to_next > 0)
1588  {
1589  u32 bi0;
1590  vlib_buffer_t * b0;
1591  u32 next0;
1592  gtpu_header_t * gtpu0;
1593  u32 gtpu_hdr_len0;
1594  u32 error0;
1595  u32 tunnel_index0;
1596  gtpu_tunnel_t * t0;
1597  u32 sw_if_index0, len0;
1598  u8 has_space0 = 0;
1599  u8 ver0;
1600 
1601  bi0 = from[0];
1602  to_next[0] = bi0;
1603  from += 1;
1604  to_next += 1;
1605  n_left_from -= 1;
1606  n_left_to_next -= 1;
1607 
1608  b0 = vlib_get_buffer (vm, bi0);
1609  len0 = vlib_buffer_length_in_chain (vm, b0);
1610 
1611  tunnel_index0 = ~0;
1612  error0 = 0;
1613 
1614  ip_err0 = gtpu_check_ip (b0, len0);
1615  udp_err0 = gtpu_check_ip_udp_len (b0);
1617  csum_err0 = !gtpu_validate_udp_csum (vm, b0);
1618  else
1619  csum_err0 = !gtpu_local_csum_is_valid (b0);
1620 
1621  if (ip_err0 || udp_err0 || csum_err0)
1622  {
1623  next0 = GTPU_INPUT_NEXT_DROP;
1624  error0 = gtpu_err_code (ip_err0, udp_err0, csum_err0);
1625  goto trace00;
1626  }
1627 
1628  /* udp leaves current_data pointing at the gtpu header */
1629  gtpu0 = vlib_buffer_get_current (b0);
1630 
1631  /* speculatively load gtp header version field */
1632  ver0 = gtpu0->ver_flags;
1633 
1634  /*
1635  * Manipulate gtpu header
1636  * TBD: Manipulate Sequence Number and N-PDU Number
1637  * TBD: Manipulate Next Extension Header
1638  */
1639  gtpu_hdr_len0 = sizeof(gtpu_header_t) - (((ver0 & GTPU_E_S_PN_BIT) == 0) * 4);
1640 
1641  has_space0 = vlib_buffer_has_space (b0, gtpu_hdr_len0);
1642  if (PREDICT_FALSE (((ver0 & GTPU_VER_MASK) != GTPU_V1_VER) | (!has_space0)))
1643  {
1644  error0 = has_space0 ? GTPU_ERROR_BAD_VER : GTPU_ERROR_TOO_SMALL;
1645  next0 = GTPU_INPUT_NEXT_DROP;
1646  goto trace00;
1647  }
1648 
1649  ASSERT (b0->flow_id != 0);
1650  tunnel_index0 = b0->flow_id - gtm->flow_id_start;
1651  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
1652  b0->flow_id = 0;
1653 
1654  /* Pop gtpu header */
1655  vlib_buffer_advance (b0, gtpu_hdr_len0);
1656 
1657  /* assign the next node */
1658  if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
1659  (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
1660  {
1661  error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
1662  next0 = GTPU_INPUT_NEXT_DROP;
1663  goto trace00;
1664  }
1665  next0 = t0->decap_next_index;
1666 
1667  sw_if_index0 = t0->sw_if_index;
1668 
1669  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
1670  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
1671 
1672  pkts_decapsulated ++;
1673  stats_n_packets += 1;
1674  stats_n_bytes += len0;
1675 
1676  /* Batch stats increment on the same gtpu tunnel so counter
1677  is not incremented per packet */
1678  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
1679  {
1680  stats_n_packets -= 1;
1681  stats_n_bytes -= len0;
1682  if (stats_n_packets)
1685  thread_index, stats_sw_if_index,
1686  stats_n_packets, stats_n_bytes);
1687  stats_n_packets = 1;
1688  stats_n_bytes = len0;
1689  stats_sw_if_index = sw_if_index0;
1690  }
1691  trace00:
1692  b0->error = error0 ? node->errors[error0] : 0;
1693 
1694  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1695  {
1696  gtpu_rx_trace_t *tr
1697  = vlib_add_trace (vm, node, b0, sizeof (*tr));
1698  tr->next_index = next0;
1699  tr->error = error0;
1700  tr->tunnel_index = tunnel_index0;
1701  tr->teid = has_space0 ? clib_net_to_host_u32(gtpu0->teid) : ~0;
1702  }
1704  to_next, n_left_to_next,
1705  bi0, next0);
1706  }
1707 
1708  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1709  }
1710 
1711  /* Do we still need this now that tunnel tx stats is kept? */
1713  GTPU_ERROR_DECAPSULATED,
1714  pkts_decapsulated);
1715 
1716  /* Increment any remaining batch stats */
1717  if (stats_n_packets)
1718  {
1721  thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1722  node->runtime_data[0] = stats_sw_if_index;
1723  }
1724 
1725  return from_frame->n_vectors;
1726 }
1727 
1731 {
1732  return gtpu_flow_input(vm, node, from_frame);
1733 }
1734 
1735 
1736 /* *INDENT-OFF* */
1737 #ifndef CLIB_MULTIARCH_VARIANT
1739  .name = "gtpu4-flow-input",
1740  .type = VLIB_NODE_TYPE_INTERNAL,
1741  .vector_size = sizeof (u32),
1742 
1743  .format_trace = format_gtpu_rx_trace,
1744 
1745  .n_errors = GTPU_FLOW_N_ERROR,
1746  .error_strings = gtpu_flow_error_strings,
1747 
1748  .n_next_nodes = GTPU_INPUT_N_NEXT,
1749  .next_nodes = {
1750 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
1752 #undef _
1753 
1754  },
1755 };
1756 #endif
1757 /* *INDENT-ON* */
1758 
1759 #endif /* CLIB_MARCH_VARIANT */
vlib.h
ip6_gtpu_bypass_node
vlib_node_registration_t ip6_gtpu_bypass_node
(constructor) VLIB_REGISTER_NODE (ip6_gtpu_bypass_node)
Definition: gtpu_decap.c:1205
ip6_address_is_equal
static uword ip6_address_is_equal(const ip6_address_t *a, const ip6_address_t *b)
Definition: ip6_packet.h:167
im
vnet_interface_main_t * im
Definition: interface_output.c:415
udp_header_t::length
u16 length
Definition: udp_packet.h:51
ip6_tcp_udp_icmp_validate_checksum
u32 ip6_tcp_udp_icmp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip6_forward.c:1161
thread_index
u32 thread_index
Definition: nat44_ei_hairpinning.c:495
bufs
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
Definition: nat44_ei_out2in.c:717
gtpu_header_t::ver_flags
u8 ver_flags
Definition: mobile.h:92
frame
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
vlib_prefetch_buffer_header
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
foreach_gtpu_flow_error
#define foreach_gtpu_flow_error
Definition: gtpu_decap.c:1226
gtpu.h
ip4_forward_next_trace
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
Definition: ip4_forward.c:1239
next_index
nat44_ei_hairpin_src_next_t next_index
Definition: nat44_ei_hairpinning.c:412
gtpu4_flow_input_node
vlib_node_registration_t gtpu4_flow_input_node
(constructor) VLIB_REGISTER_NODE (gtpu4_flow_input_node)
Definition: gtpu_decap.c:1738
gtpu_main_t
Definition: gtpu.h:205
vlib_get_buffer
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
format_ip6_header
format_function_t format_ip6_header
Definition: format.h:95
vtep4_key_init
static void vtep4_key_init(vtep4_key_t *k4)
Definition: vtep.h:80
pool_elt_at_index
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:549
ip4_gtpu_bypass_node
vlib_node_registration_t ip4_gtpu_bypass_node
(constructor) VLIB_REGISTER_NODE (ip4_gtpu_bypass_node)
Definition: gtpu_decap.c:1176
ip6_header_t::protocol
u8 protocol
Definition: ip6_packet.h:304
vlib_get_buffers
vlib_get_buffers(vm, from, b, n_left_from)
gtpu_main_t::vnet_main
vnet_main_t * vnet_main
Definition: gtpu.h:237
VLIB_NODE_TYPE_INTERNAL
@ VLIB_NODE_TYPE_INTERNAL
Definition: node.h:72
VLIB_FRAME_SIZE
#define VLIB_FRAME_SIZE
Definition: node.h:368
foreach_gtpu_input_next
#define foreach_gtpu_input_next
Definition: gtpu.h:183
node
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
IP_GTPU_BYPASS_NEXT_GTPU
@ IP_GTPU_BYPASS_NEXT_GTPU
Definition: gtpu_decap.c:789
ip4_address_t::as_u32
u32 as_u32
Definition: ip4_packet.h:57
vnet_interface_main_t
Definition: interface.h:990
gtpu_main_t::vtep_table
vtep_table_t vtep_table
Definition: gtpu.h:216
u16
unsigned short u16
Definition: types.h:57
gtpu_rx_trace_t::teid
u32 teid
Definition: gtpu_decap.c:28
vm
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
VLIB_RX
@ VLIB_RX
Definition: defs.h:46
from_frame
vlib_main_t vlib_node_runtime_t vlib_frame_t * from_frame
Definition: esp_encrypt.c:1328
gtpu_rx_trace_t::error
u32 error
Definition: gtpu_decap.c:27
ip6_next_header
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:407
vlib_frame_t
Definition: node.h:372
gtpu_rx_trace_t::tunnel_index
u32 tunnel_index
Definition: gtpu_decap.c:26
vlib_buffer_length_in_chain
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:433
clib_memcpy_fast
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
Definition: string.h:92
udp_header_t
Definition: udp_packet.h:45
ip4_header_t
Definition: ip4_packet.h:87
IP_GTPU_BYPASS_N_NEXT
@ IP_GTPU_BYPASS_N_NEXT
Definition: gtpu_decap.c:790
format_ip4_forward_next_trace
u8 * format_ip4_forward_next_trace(u8 *s, va_list *args)
Definition: ip4_forward.c:1189
ip4_header_t::length
u16 length
Definition: ip4_packet.h:99
i32
signed int i32
Definition: types.h:77
gtpu_tunnel_t::src
ip46_address_t src
Definition: gtpu.h:144
vlib_buffer_has_space
static u8 vlib_buffer_has_space(vlib_buffer_t *b, word l)
Check if there is enough space in buffer to advance.
Definition: buffer.h:293
CLIB_PREFETCH
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:76
gtpu_header_t::teid
u32 teid
Definition: mobile.h:95
gtpu_main_t::flow_id_start
u32 flow_id_start
Definition: gtpu.h:238
vlib_node_runtime_t::errors
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:460
vlib_buffer_advance
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:276
ip4_address_is_multicast
static uword ip4_address_is_multicast(const ip4_address_t *a)
Definition: ip4_packet.h:446
gtpu_tunnel_t::sw_if_index
u32 sw_if_index
Definition: gtpu.h:157
gtpu6_input_node
vlib_node_registration_t gtpu6_input_node
(constructor) VLIB_REGISTER_NODE (gtpu6_input_node)
Definition: gtpu_decap.c:767
ip6_address_is_multicast
static uword ip6_address_is_multicast(const ip6_address_t *a)
Definition: ip6_packet.h:121
vlib_buffer_t::error
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:145
ip4_gtpu_bypass_init
clib_error_t * ip4_gtpu_bypass_init(vlib_main_t *vm)
Definition: gtpu_decap.c:1192
vlib_buffer_get_ip_fib_index
static u32 vlib_buffer_get_ip_fib_index(vlib_buffer_t *b, u8 is_ip4)
Definition: ip.h:294
VLIB_NODE_FN
#define VLIB_NODE_FN(node)
Definition: node.h:202
GTPU_V1_VER
#define GTPU_V1_VER
Definition: mobile.h:162
gtpu_flow_error_strings
static char * gtpu_flow_error_strings[]
Definition: gtpu_decap.c:1245
ip4_is_fragment
static int ip4_is_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:168
CLIB_UNUSED
#define CLIB_UNUSED(x)
Definition: clib.h:90
vnet_buffer
#define vnet_buffer(b)
Definition: buffer.h:441
VLIB_NODE_FLAG_TRACE
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:291
PREDICT_FALSE
#define PREDICT_FALSE(x)
Definition: clib.h:124
vlib_get_thread_index
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:187
hash_get
#define hash_get(h, key)
Definition: hash.h:249
vnet_feature_next
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
vlib_frame_vector_args
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
gtpu_tunnel_t::decap_next_index
u32 decap_next_index
Definition: gtpu.h:151
static_always_inline
#define static_always_inline
Definition: clib.h:112
gtpu_check_ip_udp_len
static_always_inline u8 gtpu_check_ip_udp_len(vlib_buffer_t *b)
Definition: gtpu_decap.c:1294
gtpu_header_t
Definition: mobile.h:90
uword
u64 uword
Definition: types.h:112
if
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
GTPU_INPUT_N_NEXT
@ GTPU_INPUT_N_NEXT
Definition: gtpu.h:194
vlib_node_increment_counter
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1244
ip6_header_t::dst_address
ip6_address_t dst_address
Definition: ip6_packet.h:310
vlib_buffer_t::flow_id
u32 flow_id
Generic flow identifier.
Definition: buffer.h:136
VNET_INTERFACE_COUNTER_RX
@ VNET_INTERFACE_COUNTER_RX
Definition: interface.h:915
CLIB_CACHE_LINE_BYTES
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:58
vlib_node_registration_t
struct _vlib_node_registration vlib_node_registration_t
vnet_interface_main_t::combined_sw_if_counters
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:1024
ip4_header_t::dst_address
ip4_address_t dst_address
Definition: ip4_packet.h:125
gtpu_input
static uword gtpu_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
Definition: gtpu_decap.c:57
ip4_tcp_udp_validate_checksum
u32 ip4_tcp_udp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: pnat_test_stubs.h:84
udp_header_t::checksum
u16 checksum
Definition: udp_packet.h:55
data
u8 data[128]
Definition: ipsec_types.api:95
gtpu_check_ip
static_always_inline u8 gtpu_check_ip(vlib_buffer_t *b, u16 payload_len)
Definition: gtpu_decap.c:1284
hash_get_mem
#define hash_get_mem(h, key)
Definition: hash.h:269
vnet_main_t
Definition: vnet.h:76
vlib_validate_buffer_enqueue_x1
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
always_inline
#define always_inline
Definition: rdma_mlx5dv.h:23
clib_bihash_value
template key/value backing page structure
Definition: bihash_doc.h:44
vnet_update_l2_len
static u16 vnet_update_l2_len(vlib_buffer_t *b)
Definition: l2_input.h:300
gtpu_rx_trace_t
Definition: gtpu_decap.c:24
ip4_header_t::src_address
ip4_address_t src_address
Definition: ip4_packet.h:125
vtep6_check
static u8 vtep6_check(vtep_table_t *t, vlib_buffer_t *b0, ip6_header_t *ip60, vtep6_key_t *last_k6)
Definition: vtep.h:155
format
description fragment has unexpected format
Definition: map.api:433
ASSERT
#define ASSERT(truth)
Definition: error_bootstrap.h:69
gtpu_main_t::gtpu4_tunnel_by_key
uword * gtpu4_tunnel_by_key
Definition: gtpu.h:211
vlib_put_next_frame
vlib_put_next_frame(vm, node, next_index, 0)
gtpu4_input_node
vlib_node_registration_t gtpu4_input_node
(constructor) VLIB_REGISTER_NODE (gtpu4_input_node)
Definition: gtpu_decap.c:747
u32
unsigned int u32
Definition: types.h:88
VLIB_INIT_FUNCTION
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:172
udp_header_t::dst_port
u16 dst_port
Definition: udp_packet.h:48
ip4_header_t::ttl
u8 ttl
Definition: ip4_packet.h:112
gtpu_tunnel_t::encap_fib_index
u32 encap_fib_index
Definition: gtpu.h:154
gtpu_local_need_csum_check
#define gtpu_local_need_csum_check(_b)
Definition: gtpu_decap.c:1256
gtpu_flow_input
static uword gtpu_flow_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: gtpu_decap.c:1319
gtpu_validate_udp_csum
static_always_inline u8 gtpu_validate_udp_csum(vlib_main_t *vm, vlib_buffer_t *b)
Definition: gtpu_decap.c:1267
gtpu_tunnel_t
Definition: gtpu.h:128
gtpu_flow_error_t
gtpu_flow_error_t
Definition: gtpu_decap.c:1234
vlib_node_get_runtime
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:116
ip6_header_t
Definition: ip6_packet.h:294
format_ip4_header
format_function_t format_ip4_header
Definition: format.h:81
ip6_header_t::src_address
ip6_address_t src_address
Definition: ip6_packet.h:310
clib_memset
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
vlib_main_t
Definition: main.h:102
format_gtpu_rx_trace
static u8 * format_gtpu_rx_trace(u8 *s, va_list *args)
Definition: gtpu_decap.c:31
ip4_header_t::ip_version_and_header_length
u8 ip_version_and_header_length
Definition: ip4_packet.h:93
vlib_node_t
Definition: node.h:247
gtpu_rx_trace_t::next_index
u32 next_index
Definition: gtpu_decap.c:25
vlib_add_trace
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
gtpu_local_csum_is_valid
#define gtpu_local_csum_is_valid(_b)
Definition: gtpu_decap.c:1261
b
vlib_buffer_t ** b
Definition: nat44_ei_out2in.c:717
GTPU_VER_MASK
#define GTPU_VER_MASK
Definition: gtpu.h:69
u8
unsigned char u8
Definition: types.h:56
clib_error_t
Definition: clib_error.h:21
vlib_buffer_get_current
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
vlib_init_function_t
clib_error_t *() vlib_init_function_t(struct vlib_main_t *vm)
Definition: init.h:51
vlib_buffer_t::data
u8 data[]
Packet data.
Definition: buffer.h:204
ip4_input_node
vlib_node_registration_t ip4_input_node
Global ip4 input node.
Definition: ip4_input.c:386
vtep4_check
static u8 vtep4_check(vtep_table_t *t, vlib_buffer_t *b0, ip4_header_t *ip40, vtep4_key_t *last_k4)
Definition: vtep.h:100
gtpu_error.def
gtpu_main
gtpu_main_t gtpu_main
Definition: gtpu.c:36
vlib_validate_buffer_enqueue_x2
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
validate_gtpu_fib
static u32 validate_gtpu_fib(vlib_buffer_t *b, gtpu_tunnel_t *t, u32 is_ip4)
Definition: gtpu_decap.c:51
gtpu_main_t::tunnels
gtpu_tunnel_t * tunnels
Definition: gtpu.h:208
gtpu_error_strings
static char * gtpu_error_strings[]
Definition: gtpu_decap.c:740
GTPU_FLOW_N_ERROR
@ GTPU_FLOW_N_ERROR
Definition: gtpu_decap.c:1242
ip_vxan_bypass_next_t
ip_vxan_bypass_next_t
Definition: gtpu_decap.c:787
gtpu_err_code
static_always_inline u8 gtpu_err_code(u8 ip_err0, u8 udp_err0, u8 csum_err0)
Definition: gtpu_decap.c:1305
ip6_header_t::payload_length
u16 payload_length
Definition: ip6_packet.h:301
vlib_node_runtime_t
Definition: node.h:454
IP_GTPU_BYPASS_NEXT_DROP
@ IP_GTPU_BYPASS_NEXT_DROP
Definition: gtpu_decap.c:788
from
from
Definition: nat44_ei_hairpinning.c:415
PREDICT_TRUE
#define PREDICT_TRUE(x)
Definition: clib.h:125
vlib_get_next_frame
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:395
VLIB_TX
@ VLIB_TX
Definition: defs.h:47
GTPU_N_ERROR
@ GTPU_N_ERROR
Definition: gtpu.h:202
ip_gtpu_bypass_inline
static uword ip_gtpu_bypass_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 is_ip4)
Definition: gtpu_decap.c:794
n_left_from
n_left_from
Definition: nat44_ei_hairpinning.c:416
format_ip6_forward_next_trace
u8 * format_ip6_forward_next_trace(u8 *s, va_list *args)
Definition: ip6_forward.c:949
ip6_gtpu_bypass_init
clib_error_t * ip6_gtpu_bypass_init(vlib_main_t *vm)
Definition: gtpu_decap.c:1221
ip4_header_t::protocol
u8 protocol
Definition: ip4_packet.h:115
vnet_main_t::interface_main
vnet_interface_main_t interface_main
Definition: vnet.h:81
vlib_increment_combined_counter
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
GTPU_E_S_PN_BIT
#define GTPU_E_S_PN_BIT
Definition: gtpu.h:74
vtep6_key_init
static void vtep6_key_init(vtep6_key_t *k6)
Definition: vtep.h:86
ip4_next_header
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:196
vlib_buffer_t::flags
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
Definition: buffer.h:133
vlib_buffer_t
VLIB buffer representation.
Definition: buffer.h:111
VLIB_REGISTER_NODE
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
gtpu_main_t::gtpu6_tunnel_by_key
uword * gtpu6_tunnel_by_key
Definition: gtpu.h:212
flags
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105