FD.io VPP  v18.07-rc0-415-g6c78436
Vector Packet Processing
gtpu_decap.c
Go to the documentation of this file.
1 /*
2  * decap.c: gtpu tunnel decap packet processing
3  *
4  * Copyright (c) 2017 Intel and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <gtpu/gtpu.h>
21 
24 
25 typedef struct {
31 
32 static u8 * format_gtpu_rx_trace (u8 * s, va_list * args)
33 {
34  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
35  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
36  gtpu_rx_trace_t * t = va_arg (*args, gtpu_rx_trace_t *);
37 
38  if (t->tunnel_index != ~0)
39  {
40  s = format (s, "GTPU decap from gtpu_tunnel%d teid %d next %d error %d",
41  t->tunnel_index, t->teid, t->next_index, t->error);
42  }
43  else
44  {
45  s = format (s, "GTPU decap error - tunnel for teid %d does not exist",
46  t->teid);
47  }
48  return s;
49 }
50 
53 {
54  u32 fib_index, sw_if_index;
55 
56  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
57 
58  if (is_ip4)
59  fib_index = (vnet_buffer (b)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
61  vnet_buffer (b)->sw_if_index[VLIB_TX];
62  else
63  fib_index = (vnet_buffer (b)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
65  vnet_buffer (b)->sw_if_index[VLIB_TX];
66 
67  return (fib_index == t->encap_fib_index);
68 }
69 
72  vlib_node_runtime_t * node,
73  vlib_frame_t * from_frame,
74  u32 is_ip4)
75 {
76  u32 n_left_from, next_index, * from, * to_next;
77  gtpu_main_t * gtm = &gtpu_main;
78  vnet_main_t * vnm = gtm->vnet_main;
80  u32 last_tunnel_index = ~0;
81  gtpu4_tunnel_key_t last_key4;
82  gtpu6_tunnel_key_t last_key6;
83  u32 pkts_decapsulated = 0;
84  u32 thread_index = vlib_get_thread_index();
85  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
86 
87  if (is_ip4)
88  last_key4.as_u64 = ~0;
89  else
90  memset (&last_key6, 0xff, sizeof (last_key6));
91 
92  from = vlib_frame_vector_args (from_frame);
93  n_left_from = from_frame->n_vectors;
94 
95  next_index = node->cached_next_index;
96  stats_sw_if_index = node->runtime_data[0];
97  stats_n_packets = stats_n_bytes = 0;
98 
99  while (n_left_from > 0)
100  {
101  u32 n_left_to_next;
102 
103  vlib_get_next_frame (vm, node, next_index,
104  to_next, n_left_to_next);
105  while (n_left_from >= 4 && n_left_to_next >= 2)
106  {
107  u32 bi0, bi1;
108  vlib_buffer_t * b0, * b1;
109  u32 next0, next1;
110  ip4_header_t * ip4_0, * ip4_1;
111  ip6_header_t * ip6_0, * ip6_1;
112  gtpu_header_t * gtpu0, * gtpu1;
113  u32 gtpu_hdr_len0 = 0, gtpu_hdr_len1 =0 ;
114  uword * p0, * p1;
115  u32 tunnel_index0, tunnel_index1;
116  gtpu_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL;
117  gtpu4_tunnel_key_t key4_0, key4_1;
118  gtpu6_tunnel_key_t key6_0, key6_1;
119  u32 error0, error1;
120  u32 sw_if_index0, sw_if_index1, len0, len1;
121 
122  /* Prefetch next iteration. */
123  {
124  vlib_buffer_t * p2, * p3;
125 
126  p2 = vlib_get_buffer (vm, from[2]);
127  p3 = vlib_get_buffer (vm, from[3]);
128 
129  vlib_prefetch_buffer_header (p2, LOAD);
130  vlib_prefetch_buffer_header (p3, LOAD);
131 
134  }
135 
136  bi0 = from[0];
137  bi1 = from[1];
138  to_next[0] = bi0;
139  to_next[1] = bi1;
140  from += 2;
141  to_next += 2;
142  n_left_to_next -= 2;
143  n_left_from -= 2;
144 
145  b0 = vlib_get_buffer (vm, bi0);
146  b1 = vlib_get_buffer (vm, bi1);
147 
148  /* udp leaves current_data pointing at the gtpu header */
149  gtpu0 = vlib_buffer_get_current (b0);
150  gtpu1 = vlib_buffer_get_current (b1);
151  if (is_ip4) {
153  (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
155  (b1, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
156  ip4_0 = vlib_buffer_get_current (b0);
157  ip4_1 = vlib_buffer_get_current (b1);
158  } else {
160  (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
162  (b1, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
163  ip6_0 = vlib_buffer_get_current (b0);
164  ip6_1 = vlib_buffer_get_current (b1);
165  }
166 
167  /* pop (ip, udp, gtpu) */
168  if (is_ip4) {
170  (b0, sizeof(*ip4_0)+sizeof(udp_header_t));
172  (b1, sizeof(*ip4_1)+sizeof(udp_header_t));
173  } else {
175  (b0, sizeof(*ip6_0)+sizeof(udp_header_t));
177  (b1, sizeof(*ip6_1)+sizeof(udp_header_t));
178  }
179 
180  tunnel_index0 = ~0;
181  error0 = 0;
182 
183  tunnel_index1 = ~0;
184  error1 = 0;
185 
186  if (PREDICT_FALSE ((gtpu0->ver_flags & GTPU_VER_MASK) != GTPU_V1_VER))
187  {
188  error0 = GTPU_ERROR_BAD_VER;
189  next0 = GTPU_INPUT_NEXT_DROP;
190  goto trace0;
191  }
192 
193  /* Manipulate packet 0 */
194  if (is_ip4) {
195  key4_0.src = ip4_0->src_address.as_u32;
196  key4_0.teid = gtpu0->teid;
197 
198  /* Make sure GTPU tunnel exist according to packet SIP and teid
199  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
200  if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
201  {
202  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
203  if (PREDICT_FALSE (p0 == NULL))
204  {
205  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
206  next0 = GTPU_INPUT_NEXT_DROP;
207  goto trace0;
208  }
209  last_key4.as_u64 = key4_0.as_u64;
210  tunnel_index0 = last_tunnel_index = p0[0];
211  }
212  else
213  tunnel_index0 = last_tunnel_index;
214  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
215 
216  /* Validate GTPU tunnel encap-fib index agaist packet */
217  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
218  {
219  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
220  next0 = GTPU_INPUT_NEXT_DROP;
221  goto trace0;
222  }
223 
224  /* Validate GTPU tunnel SIP against packet DIP */
225  if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
226  goto next0; /* valid packet */
228  {
229  key4_0.src = ip4_0->dst_address.as_u32;
230  key4_0.teid = gtpu0->teid;
231  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
232  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
233  if (PREDICT_TRUE (p0 != NULL))
234  {
235  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
236  goto next0; /* valid packet */
237  }
238  }
239  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
240  next0 = GTPU_INPUT_NEXT_DROP;
241  goto trace0;
242 
243  } else /* !is_ip4 */ {
244  key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
245  key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
246  key6_0.teid = gtpu0->teid;
247 
248  /* Make sure GTPU tunnel exist according to packet SIP and teid
249  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
250  if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
251  {
252  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
253  if (PREDICT_FALSE (p0 == NULL))
254  {
255  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
256  next0 = GTPU_INPUT_NEXT_DROP;
257  goto trace0;
258  }
259  clib_memcpy (&last_key6, &key6_0, sizeof(key6_0));
260  tunnel_index0 = last_tunnel_index = p0[0];
261  }
262  else
263  tunnel_index0 = last_tunnel_index;
264  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
265 
266  /* Validate GTPU tunnel encap-fib index agaist packet */
267  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
268  {
269  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
270  next0 = GTPU_INPUT_NEXT_DROP;
271  goto trace0;
272  }
273 
274  /* Validate GTPU tunnel SIP against packet DIP */
276  &t0->src.ip6)))
277  goto next0; /* valid packet */
279  {
280  key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
281  key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
282  key6_0.teid = gtpu0->teid;
283  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
284  if (PREDICT_TRUE (p0 != NULL))
285  {
286  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
287  goto next0; /* valid packet */
288  }
289  }
290  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
291  next0 = GTPU_INPUT_NEXT_DROP;
292  goto trace0;
293  }
294 
295  next0:
296  /* Manipulate gtpu header */
297  if (PREDICT_FALSE((gtpu0->ver_flags & GTPU_E_S_PN_BIT) != 0))
298  {
299  gtpu_hdr_len0 = sizeof(gtpu_header_t);
300 
301  /* Manipulate Sequence Number and N-PDU Number */
302  /* TBD */
303 
304  /* Manipulate Next Extension Header */
305  /* TBD */
306  }
307  else
308  {
309  gtpu_hdr_len0 = sizeof(gtpu_header_t) - 4;
310  }
311 
312  /* Pop gtpu header */
313  vlib_buffer_advance (b0, gtpu_hdr_len0);
314 
315  next0 = t0->decap_next_index;
316  sw_if_index0 = t0->sw_if_index;
317  len0 = vlib_buffer_length_in_chain (vm, b0);
318 
319  /* Required to make the l2 tag push / pop code work on l2 subifs */
320  if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
321  vnet_update_l2_len (b0);
322 
323  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
324  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
325  sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
326 
327  pkts_decapsulated ++;
328  stats_n_packets += 1;
329  stats_n_bytes += len0;
330 
331  /* Batch stats increment on the same gtpu tunnel so counter
332  is not incremented per packet */
333  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
334  {
335  stats_n_packets -= 1;
336  stats_n_bytes -= len0;
337  if (stats_n_packets)
340  thread_index, stats_sw_if_index,
341  stats_n_packets, stats_n_bytes);
342  stats_n_packets = 1;
343  stats_n_bytes = len0;
344  stats_sw_if_index = sw_if_index0;
345  }
346 
347  trace0:
348  b0->error = error0 ? node->errors[error0] : 0;
349 
350  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
351  {
352  gtpu_rx_trace_t *tr
353  = vlib_add_trace (vm, node, b0, sizeof (*tr));
354  tr->next_index = next0;
355  tr->error = error0;
356  tr->tunnel_index = tunnel_index0;
357  tr->teid = clib_net_to_host_u32(gtpu0->teid);
358  }
359 
360  if (PREDICT_FALSE ((gtpu1->ver_flags & GTPU_VER_MASK) != GTPU_V1_VER))
361  {
362  error1 = GTPU_ERROR_BAD_VER;
363  next1 = GTPU_INPUT_NEXT_DROP;
364  goto trace1;
365  }
366 
367  /* Manipulate packet 1 */
368  if (is_ip4) {
369  key4_1.src = ip4_1->src_address.as_u32;
370  key4_1.teid = gtpu1->teid;
371 
372  /* Make sure GTPU tunnel exist according to packet SIP and teid
373  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
374  if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
375  {
376  p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
377  if (PREDICT_FALSE (p1 == NULL))
378  {
379  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
380  next1 = GTPU_INPUT_NEXT_DROP;
381  goto trace1;
382  }
383  last_key4.as_u64 = key4_1.as_u64;
384  tunnel_index1 = last_tunnel_index = p1[0];
385  }
386  else
387  tunnel_index1 = last_tunnel_index;
388  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
389 
390  /* Validate GTPU tunnel encap-fib index agaist packet */
391  if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
392  {
393  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
394  next1 = GTPU_INPUT_NEXT_DROP;
395  goto trace1;
396  }
397 
398  /* Validate GTPU tunnel SIP against packet DIP */
399  if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
400  goto next1; /* valid packet */
402  {
403  key4_1.src = ip4_1->dst_address.as_u32;
404  key4_1.teid = gtpu1->teid;
405  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
406  p1 = hash_get (gtm->gtpu4_tunnel_by_key, key4_1.as_u64);
407  if (PREDICT_TRUE (p1 != NULL))
408  {
409  mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
410  goto next1; /* valid packet */
411  }
412  }
413  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
414  next1 = GTPU_INPUT_NEXT_DROP;
415  goto trace1;
416 
417  } else /* !is_ip4 */ {
418  key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
419  key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
420  key6_1.teid = gtpu1->teid;
421 
422  /* Make sure GTPU tunnel exist according to packet SIP and teid
423  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
424  if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
425  {
426  p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
427 
428  if (PREDICT_FALSE (p1 == NULL))
429  {
430  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
431  next1 = GTPU_INPUT_NEXT_DROP;
432  goto trace1;
433  }
434 
435  clib_memcpy (&last_key6, &key6_1, sizeof(key6_1));
436  tunnel_index1 = last_tunnel_index = p1[0];
437  }
438  else
439  tunnel_index1 = last_tunnel_index;
440  t1 = pool_elt_at_index (gtm->tunnels, tunnel_index1);
441 
442  /* Validate GTPU tunnel encap-fib index agaist packet */
443  if (PREDICT_FALSE (validate_gtpu_fib (b1, t1, is_ip4) == 0))
444  {
445  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
446  next1 = GTPU_INPUT_NEXT_DROP;
447  goto trace1;
448  }
449 
450  /* Validate GTPU tunnel SIP against packet DIP */
452  &t1->src.ip6)))
453  goto next1; /* valid packet */
455  {
456  key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
457  key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
458  key6_1.teid = gtpu1->teid;
459  p1 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_1);
460  if (PREDICT_TRUE (p1 != NULL))
461  {
462  mt1 = pool_elt_at_index (gtm->tunnels, p1[0]);
463  goto next1; /* valid packet */
464  }
465  }
466  error1 = GTPU_ERROR_NO_SUCH_TUNNEL;
467  next1 = GTPU_INPUT_NEXT_DROP;
468  goto trace1;
469  }
470 
471  next1:
472  /* Manipulate gtpu header */
473  if (PREDICT_FALSE((gtpu1->ver_flags & GTPU_E_S_PN_BIT) != 0))
474  {
475  gtpu_hdr_len1 = sizeof(gtpu_header_t);
476 
477  /* Manipulate Sequence Number and N-PDU Number */
478  /* TBD */
479 
480  /* Manipulate Next Extension Header */
481  /* TBD */
482  }
483  else
484  {
485  gtpu_hdr_len1 = sizeof(gtpu_header_t) - 4;
486  }
487 
488  /* Pop gtpu header */
489  vlib_buffer_advance (b1, gtpu_hdr_len1);
490 
491  next1 = t1->decap_next_index;
492  sw_if_index1 = t1->sw_if_index;
493  len1 = vlib_buffer_length_in_chain (vm, b1);
494 
495  /* Required to make the l2 tag push / pop code work on l2 subifs */
496  if (PREDICT_TRUE(next1 == GTPU_INPUT_NEXT_L2_INPUT))
497  vnet_update_l2_len (b1);
498 
499  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
500  vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
501  sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
502 
503  pkts_decapsulated ++;
504  stats_n_packets += 1;
505  stats_n_bytes += len1;
506 
507  /* Batch stats increment on the same gtpu tunnel so counter
508  is not incremented per packet */
509  if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
510  {
511  stats_n_packets -= 1;
512  stats_n_bytes -= len1;
513  if (stats_n_packets)
516  thread_index, stats_sw_if_index,
517  stats_n_packets, stats_n_bytes);
518  stats_n_packets = 1;
519  stats_n_bytes = len1;
520  stats_sw_if_index = sw_if_index1;
521  }
522 
523  trace1:
524  b1->error = error1 ? node->errors[error1] : 0;
525 
526  if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
527  {
528  gtpu_rx_trace_t *tr
529  = vlib_add_trace (vm, node, b1, sizeof (*tr));
530  tr->next_index = next1;
531  tr->error = error1;
532  tr->tunnel_index = tunnel_index1;
533  tr->teid = clib_net_to_host_u32(gtpu1->teid);
534  }
535 
536  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
537  to_next, n_left_to_next,
538  bi0, bi1, next0, next1);
539  }
540 
541  while (n_left_from > 0 && n_left_to_next > 0)
542  {
543  u32 bi0;
544  vlib_buffer_t * b0;
545  u32 next0;
546  ip4_header_t * ip4_0;
547  ip6_header_t * ip6_0;
548  gtpu_header_t * gtpu0;
549  u32 gtpu_hdr_len0 = 0;
550  uword * p0;
551  u32 tunnel_index0;
552  gtpu_tunnel_t * t0, * mt0 = NULL;
553  gtpu4_tunnel_key_t key4_0;
554  gtpu6_tunnel_key_t key6_0;
555  u32 error0;
556  u32 sw_if_index0, len0;
557 
558  bi0 = from[0];
559  to_next[0] = bi0;
560  from += 1;
561  to_next += 1;
562  n_left_from -= 1;
563  n_left_to_next -= 1;
564 
565  b0 = vlib_get_buffer (vm, bi0);
566 
567  /* udp leaves current_data pointing at the gtpu header */
568  gtpu0 = vlib_buffer_get_current (b0);
569  if (is_ip4) {
571  (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
572  ip4_0 = vlib_buffer_get_current (b0);
573  } else {
575  (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
576  ip6_0 = vlib_buffer_get_current (b0);
577  }
578 
579  /* pop (ip, udp) */
580  if (is_ip4) {
582  (b0, sizeof(*ip4_0)+sizeof(udp_header_t));
583  } else {
585  (b0, sizeof(*ip6_0)+sizeof(udp_header_t));
586  }
587 
588  tunnel_index0 = ~0;
589  error0 = 0;
590  if (PREDICT_FALSE ((gtpu0->ver_flags & GTPU_VER_MASK) != GTPU_V1_VER))
591  {
592  error0 = GTPU_ERROR_BAD_VER;
593  next0 = GTPU_INPUT_NEXT_DROP;
594  goto trace00;
595  }
596 
597  if (is_ip4) {
598  key4_0.src = ip4_0->src_address.as_u32;
599  key4_0.teid = gtpu0->teid;
600 
601  /* Make sure GTPU tunnel exist according to packet SIP and teid
602  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
603  if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
604  {
605  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
606  if (PREDICT_FALSE (p0 == NULL))
607  {
608  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
609  next0 = GTPU_INPUT_NEXT_DROP;
610  goto trace00;
611  }
612  last_key4.as_u64 = key4_0.as_u64;
613  tunnel_index0 = last_tunnel_index = p0[0];
614  }
615  else
616  tunnel_index0 = last_tunnel_index;
617  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
618 
619  /* Validate GTPU tunnel encap-fib index agaist packet */
620  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
621  {
622  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
623  next0 = GTPU_INPUT_NEXT_DROP;
624  goto trace00;
625  }
626 
627  /* Validate GTPU tunnel SIP against packet DIP */
628  if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
629  goto next00; /* valid packet */
631  {
632  key4_0.src = ip4_0->dst_address.as_u32;
633  key4_0.teid = gtpu0->teid;
634  /* Make sure mcast GTPU tunnel exist by packet DIP and teid */
635  p0 = hash_get (gtm->gtpu4_tunnel_by_key, key4_0.as_u64);
636  if (PREDICT_TRUE (p0 != NULL))
637  {
638  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
639  goto next00; /* valid packet */
640  }
641  }
642  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
643  next0 = GTPU_INPUT_NEXT_DROP;
644  goto trace00;
645 
646  } else /* !is_ip4 */ {
647  key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
648  key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
649  key6_0.teid = gtpu0->teid;
650 
651  /* Make sure GTPU tunnel exist according to packet SIP and teid
652  * SIP identify a GTPU path, and teid identify a tunnel in a given GTPU path */
653  if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
654  {
655  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
656  if (PREDICT_FALSE (p0 == NULL))
657  {
658  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
659  next0 = GTPU_INPUT_NEXT_DROP;
660  goto trace00;
661  }
662  clib_memcpy (&last_key6, &key6_0, sizeof(key6_0));
663  tunnel_index0 = last_tunnel_index = p0[0];
664  }
665  else
666  tunnel_index0 = last_tunnel_index;
667  t0 = pool_elt_at_index (gtm->tunnels, tunnel_index0);
668 
669  /* Validate GTPU tunnel encap-fib index agaist packet */
670  if (PREDICT_FALSE (validate_gtpu_fib (b0, t0, is_ip4) == 0))
671  {
672  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
673  next0 = GTPU_INPUT_NEXT_DROP;
674  goto trace00;
675  }
676 
677  /* Validate GTPU tunnel SIP against packet DIP */
679  &t0->src.ip6)))
680  goto next00; /* valid packet */
682  {
683  key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
684  key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
685  key6_0.teid = gtpu0->teid;
686  p0 = hash_get_mem (gtm->gtpu6_tunnel_by_key, &key6_0);
687  if (PREDICT_TRUE (p0 != NULL))
688  {
689  mt0 = pool_elt_at_index (gtm->tunnels, p0[0]);
690  goto next00; /* valid packet */
691  }
692  }
693  error0 = GTPU_ERROR_NO_SUCH_TUNNEL;
694  next0 = GTPU_INPUT_NEXT_DROP;
695  goto trace00;
696  }
697 
698  next00:
699  /* Manipulate gtpu header */
700  if (PREDICT_FALSE((gtpu0->ver_flags & GTPU_E_S_PN_BIT) != 0))
701  {
702  gtpu_hdr_len0 = sizeof(gtpu_header_t);
703 
704  /* Manipulate Sequence Number and N-PDU Number */
705  /* TBD */
706 
707  /* Manipulate Next Extension Header */
708  /* TBD */
709  }
710  else
711  {
712  gtpu_hdr_len0 = sizeof(gtpu_header_t) - 4;
713  }
714 
715  /* Pop gtpu header */
716  vlib_buffer_advance (b0, gtpu_hdr_len0);
717 
718  next0 = t0->decap_next_index;
719  sw_if_index0 = t0->sw_if_index;
720  len0 = vlib_buffer_length_in_chain (vm, b0);
721 
722  /* Required to make the l2 tag push / pop code work on l2 subifs */
723  if (PREDICT_TRUE(next0 == GTPU_INPUT_NEXT_L2_INPUT))
724  vnet_update_l2_len (b0);
725 
726  /* Set packet input sw_if_index to unicast GTPU tunnel for learning */
727  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
728  sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
729 
730  pkts_decapsulated ++;
731  stats_n_packets += 1;
732  stats_n_bytes += len0;
733 
734  /* Batch stats increment on the same gtpu tunnel so counter
735  is not incremented per packet */
736  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
737  {
738  stats_n_packets -= 1;
739  stats_n_bytes -= len0;
740  if (stats_n_packets)
743  thread_index, stats_sw_if_index,
744  stats_n_packets, stats_n_bytes);
745  stats_n_packets = 1;
746  stats_n_bytes = len0;
747  stats_sw_if_index = sw_if_index0;
748  }
749 
750  trace00:
751  b0->error = error0 ? node->errors[error0] : 0;
752 
753  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
754  {
755  gtpu_rx_trace_t *tr
756  = vlib_add_trace (vm, node, b0, sizeof (*tr));
757  tr->next_index = next0;
758  tr->error = error0;
759  tr->tunnel_index = tunnel_index0;
760  tr->teid = clib_net_to_host_u32(gtpu0->teid);
761  }
762  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
763  to_next, n_left_to_next,
764  bi0, next0);
765  }
766 
767  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
768  }
769  /* Do we still need this now that tunnel tx stats is kept? */
770  vlib_node_increment_counter (vm, is_ip4?
772  GTPU_ERROR_DECAPSULATED,
773  pkts_decapsulated);
774 
775  /* Increment any remaining batch stats */
776  if (stats_n_packets)
777  {
780  thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
781  node->runtime_data[0] = stats_sw_if_index;
782  }
783 
784  return from_frame->n_vectors;
785 }
786 
787 static uword
789  vlib_node_runtime_t * node,
790  vlib_frame_t * from_frame)
791 {
792  return gtpu_input(vm, node, from_frame, /* is_ip4 */ 1);
793 }
794 
795 static uword
797  vlib_node_runtime_t * node,
798  vlib_frame_t * from_frame)
799 {
800  return gtpu_input(vm, node, from_frame, /* is_ip4 */ 0);
801 }
802 
803 static char * gtpu_error_strings[] = {
804 #define gtpu_error(n,s) s,
805 #include <gtpu/gtpu_error.def>
806 #undef gtpu_error
807 #undef _
808 };
809 
811  .function = gtpu4_input,
812  .name = "gtpu4-input",
813  /* Takes a vector of packets. */
814  .vector_size = sizeof (u32),
815 
816  .n_errors = GTPU_N_ERROR,
817  .error_strings = gtpu_error_strings,
818 
819  .n_next_nodes = GTPU_INPUT_N_NEXT,
820  .next_nodes = {
821 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
823 #undef _
824  },
825 
826 //temp .format_buffer = format_gtpu_header,
827  .format_trace = format_gtpu_rx_trace,
828  // $$$$ .unformat_buffer = unformat_gtpu_header,
829 };
830 
832 
834  .function = gtpu6_input,
835  .name = "gtpu6-input",
836  /* Takes a vector of packets. */
837  .vector_size = sizeof (u32),
838 
839  .n_errors = GTPU_N_ERROR,
840  .error_strings = gtpu_error_strings,
841 
842  .n_next_nodes = GTPU_INPUT_N_NEXT,
843  .next_nodes = {
844 #define _(s,n) [GTPU_INPUT_NEXT_##s] = n,
846 #undef _
847  },
848 
849 //temp .format_buffer = format_gtpu_header,
850  .format_trace = format_gtpu_rx_trace,
851  // $$$$ .unformat_buffer = unformat_gtpu_header,
852 };
853 
855 
856 
857 typedef enum {
862 
865  vlib_node_runtime_t * node,
866  vlib_frame_t * frame,
867  u32 is_ip4)
868 {
869  gtpu_main_t * gtm = &gtpu_main;
870  u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
871  vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
872  ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
873  ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
874 
875  from = vlib_frame_vector_args (frame);
876  n_left_from = frame->n_vectors;
877  next_index = node->cached_next_index;
878 
879  if (node->flags & VLIB_NODE_FLAG_TRACE)
880  ip4_forward_next_trace (vm, node, frame, VLIB_TX);
881 
882  if (is_ip4) addr4.data_u32 = ~0;
883  else ip6_address_set_zero (&addr6);
884 
885  while (n_left_from > 0)
886  {
887  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
888 
889  while (n_left_from >= 4 && n_left_to_next >= 2)
890  {
891  vlib_buffer_t * b0, * b1;
892  ip4_header_t * ip40, * ip41;
893  ip6_header_t * ip60, * ip61;
894  udp_header_t * udp0, * udp1;
895  u32 bi0, ip_len0, udp_len0, flags0, next0;
896  u32 bi1, ip_len1, udp_len1, flags1, next1;
897  i32 len_diff0, len_diff1;
898  u8 error0, good_udp0, proto0;
899  u8 error1, good_udp1, proto1;
900 
901  /* Prefetch next iteration. */
902  {
903  vlib_buffer_t * p2, * p3;
904 
905  p2 = vlib_get_buffer (vm, from[2]);
906  p3 = vlib_get_buffer (vm, from[3]);
907 
908  vlib_prefetch_buffer_header (p2, LOAD);
909  vlib_prefetch_buffer_header (p3, LOAD);
910 
913  }
914 
915  bi0 = to_next[0] = from[0];
916  bi1 = to_next[1] = from[1];
917  from += 2;
918  n_left_from -= 2;
919  to_next += 2;
920  n_left_to_next -= 2;
921 
922  b0 = vlib_get_buffer (vm, bi0);
923  b1 = vlib_get_buffer (vm, bi1);
924  if (is_ip4)
925  {
926  ip40 = vlib_buffer_get_current (b0);
927  ip41 = vlib_buffer_get_current (b1);
928  }
929  else
930  {
931  ip60 = vlib_buffer_get_current (b0);
932  ip61 = vlib_buffer_get_current (b1);
933  }
934 
935  /* Setup packet for next IP feature */
936  vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
937  vnet_feature_next(vnet_buffer(b1)->sw_if_index[VLIB_RX], &next1, b1);
938 
939  if (is_ip4)
940  {
941  /* Treat IP frag packets as "experimental" protocol for now
942  until support of IP frag reassembly is implemented */
943  proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
944  proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
945  }
946  else
947  {
948  proto0 = ip60->protocol;
949  proto1 = ip61->protocol;
950  }
951 
952  /* Process packet 0 */
953  if (proto0 != IP_PROTOCOL_UDP)
954  goto exit0; /* not UDP packet */
955 
956  if (is_ip4)
957  udp0 = ip4_next_header (ip40);
958  else
959  udp0 = ip6_next_header (ip60);
960 
961  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
962  goto exit0; /* not GTPU packet */
963 
964  /* Validate DIP against VTEPs*/
965  if (is_ip4)
966  {
967  if (addr4.as_u32 != ip40->dst_address.as_u32)
968  {
969  if (!hash_get (gtm->vtep4, ip40->dst_address.as_u32))
970  goto exit0; /* no local VTEP for GTPU packet */
971  addr4 = ip40->dst_address;
972  }
973  }
974  else
975  {
976  if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
977  {
978  if (!hash_get_mem (gtm->vtep6, &ip60->dst_address))
979  goto exit0; /* no local VTEP for GTPU packet */
980  addr6 = ip60->dst_address;
981  }
982  }
983 
984  flags0 = b0->flags;
985  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
986 
987  /* Don't verify UDP checksum for packets with explicit zero checksum. */
988  good_udp0 |= udp0->checksum == 0;
989 
990  /* Verify UDP length */
991  if (is_ip4)
992  ip_len0 = clib_net_to_host_u16 (ip40->length);
993  else
994  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
995  udp_len0 = clib_net_to_host_u16 (udp0->length);
996  len_diff0 = ip_len0 - udp_len0;
997 
998  /* Verify UDP checksum */
999  if (PREDICT_FALSE (!good_udp0))
1000  {
1001  if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1002  {
1003  if (is_ip4)
1004  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1005  else
1006  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1007  good_udp0 =
1008  (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1009  }
1010  }
1011 
1012  if (is_ip4)
1013  {
1014  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1015  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1016  }
1017  else
1018  {
1019  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1020  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1021  }
1022 
1023  next0 = error0 ?
1025  b0->error = error0 ? error_node->errors[error0] : 0;
1026 
1027  /* gtpu-input node expect current at GTPU header */
1028  if (is_ip4)
1029  vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1030  else
1031  vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1032 
1033  exit0:
1034  /* Process packet 1 */
1035  if (proto1 != IP_PROTOCOL_UDP)
1036  goto exit1; /* not UDP packet */
1037 
1038  if (is_ip4)
1039  udp1 = ip4_next_header (ip41);
1040  else
1041  udp1 = ip6_next_header (ip61);
1042 
1043  if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1044  goto exit1; /* not GTPU packet */
1045 
1046  /* Validate DIP against VTEPs*/
1047  if (is_ip4)
1048  {
1049  if (addr4.as_u32 != ip41->dst_address.as_u32)
1050  {
1051  if (!hash_get (gtm->vtep4, ip41->dst_address.as_u32))
1052  goto exit1; /* no local VTEP for GTPU packet */
1053  addr4 = ip41->dst_address;
1054  }
1055  }
1056  else
1057  {
1058  if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
1059  {
1060  if (!hash_get_mem (gtm->vtep6, &ip61->dst_address))
1061  goto exit1; /* no local VTEP for GTPU packet */
1062  addr6 = ip61->dst_address;
1063  }
1064  }
1065 
1066  flags1 = b1->flags;
1067  good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1068 
1069  /* Don't verify UDP checksum for packets with explicit zero checksum. */
1070  good_udp1 |= udp1->checksum == 0;
1071 
1072  /* Verify UDP length */
1073  if (is_ip4)
1074  ip_len1 = clib_net_to_host_u16 (ip41->length);
1075  else
1076  ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1077  udp_len1 = clib_net_to_host_u16 (udp1->length);
1078  len_diff1 = ip_len1 - udp_len1;
1079 
1080  /* Verify UDP checksum */
1081  if (PREDICT_FALSE (!good_udp1))
1082  {
1083  if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1084  {
1085  if (is_ip4)
1086  flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1087  else
1088  flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1089  good_udp1 =
1090  (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1091  }
1092  }
1093 
1094  if (is_ip4)
1095  {
1096  error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1097  error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1098  }
1099  else
1100  {
1101  error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1102  error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1103  }
1104 
1105  next1 = error1 ?
1107  b1->error = error1 ? error_node->errors[error1] : 0;
1108 
1109  /* gtpu-input node expect current at GTPU header */
1110  if (is_ip4)
1111  vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
1112  else
1113  vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
1114 
1115  exit1:
1116  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1117  to_next, n_left_to_next,
1118  bi0, bi1, next0, next1);
1119  }
1120 
1121  while (n_left_from > 0 && n_left_to_next > 0)
1122  {
1123  vlib_buffer_t * b0;
1124  ip4_header_t * ip40;
1125  ip6_header_t * ip60;
1126  udp_header_t * udp0;
1127  u32 bi0, ip_len0, udp_len0, flags0, next0;
1128  i32 len_diff0;
1129  u8 error0, good_udp0, proto0;
1130 
1131  bi0 = to_next[0] = from[0];
1132  from += 1;
1133  n_left_from -= 1;
1134  to_next += 1;
1135  n_left_to_next -= 1;
1136 
1137  b0 = vlib_get_buffer (vm, bi0);
1138  if (is_ip4)
1139  ip40 = vlib_buffer_get_current (b0);
1140  else
1141  ip60 = vlib_buffer_get_current (b0);
1142 
1143  /* Setup packet for next IP feature */
1144  vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
1145 
1146  if (is_ip4)
1147  /* Treat IP4 frag packets as "experimental" protocol for now
1148  until support of IP frag reassembly is implemented */
1149  proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1150  else
1151  proto0 = ip60->protocol;
1152 
1153  if (proto0 != IP_PROTOCOL_UDP)
1154  goto exit; /* not UDP packet */
1155 
1156  if (is_ip4)
1157  udp0 = ip4_next_header (ip40);
1158  else
1159  udp0 = ip6_next_header (ip60);
1160 
1161  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_GTPU))
1162  goto exit; /* not GTPU packet */
1163 
1164  /* Validate DIP against VTEPs*/
1165  if (is_ip4)
1166  {
1167  if (addr4.as_u32 != ip40->dst_address.as_u32)
1168  {
1169  if (!hash_get (gtm->vtep4, ip40->dst_address.as_u32))
1170  goto exit; /* no local VTEP for GTPU packet */
1171  addr4 = ip40->dst_address;
1172  }
1173  }
1174  else
1175  {
1176  if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
1177  {
1178  if (!hash_get_mem (gtm->vtep6, &ip60->dst_address))
1179  goto exit; /* no local VTEP for GTPU packet */
1180  addr6 = ip60->dst_address;
1181  }
1182  }
1183 
1184  flags0 = b0->flags;
1185  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1186 
1187  /* Don't verify UDP checksum for packets with explicit zero checksum. */
1188  good_udp0 |= udp0->checksum == 0;
1189 
1190  /* Verify UDP length */
1191  if (is_ip4)
1192  ip_len0 = clib_net_to_host_u16 (ip40->length);
1193  else
1194  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1195  udp_len0 = clib_net_to_host_u16 (udp0->length);
1196  len_diff0 = ip_len0 - udp_len0;
1197 
1198  /* Verify UDP checksum */
1199  if (PREDICT_FALSE (!good_udp0))
1200  {
1201  if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1202  {
1203  if (is_ip4)
1204  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1205  else
1206  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1207  good_udp0 =
1208  (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1209  }
1210  }
1211 
1212  if (is_ip4)
1213  {
1214  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1215  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1216  }
1217  else
1218  {
1219  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1220  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1221  }
1222 
1223  next0 = error0 ?
1225  b0->error = error0 ? error_node->errors[error0] : 0;
1226 
1227  /* gtpu-input node expect current at GTPU header */
1228  if (is_ip4)
1229  vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1230  else
1231  vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1232 
1233  exit:
1234  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1235  to_next, n_left_to_next,
1236  bi0, next0);
1237  }
1238 
1239  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1240  }
1241 
1242  return frame->n_vectors;
1243 }
1244 
1245 static uword
1247  vlib_node_runtime_t * node,
1248  vlib_frame_t * frame)
1249 {
1250  return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1251 }
1252 
1254  .function = ip4_gtpu_bypass,
1255  .name = "ip4-gtpu-bypass",
1256  .vector_size = sizeof (u32),
1257 
1258  .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1259  .next_nodes = {
1260  [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1261  [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu4-input",
1262  },
1263 
1264  .format_buffer = format_ip4_header,
1265  .format_trace = format_ip4_forward_next_trace,
1266 };
1267 
1269 
1270 /* Dummy init function to get us linked in. */
1272 { return 0; }
1273 
1275 
1276 static uword
1278  vlib_node_runtime_t * node,
1279  vlib_frame_t * frame)
1280 {
1281  return ip_gtpu_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1282 }
1283 
1285  .function = ip6_gtpu_bypass,
1286  .name = "ip6-gtpu-bypass",
1287  .vector_size = sizeof (u32),
1288 
1289  .n_next_nodes = IP_GTPU_BYPASS_N_NEXT,
1290  .next_nodes = {
1291  [IP_GTPU_BYPASS_NEXT_DROP] = "error-drop",
1292  [IP_GTPU_BYPASS_NEXT_GTPU] = "gtpu6-input",
1293  },
1294 
1295  .format_buffer = format_ip6_header,
1296  .format_trace = format_ip6_forward_next_trace,
1297 };
1298 
1300 
1301 /* Dummy init function to get us linked in. */
1303 { return 0; }
1304 
#define CLIB_UNUSED(x)
Definition: clib.h:79
vnet_main_t * vnet_main
Definition: gtpu.h:231
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:213
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:500
ip4_address_t src_address
Definition: ip4_packet.h:169
u32 teid
Definition: gtpu.h:59
static u8 * format_gtpu_rx_trace(u8 *s, va_list *args)
Definition: gtpu_decap.c:32
vnet_interface_main_t interface_main
Definition: vnet.h:56
format_function_t format_ip4_header
Definition: format.h:89
#define PREDICT_TRUE(x)
Definition: clib.h:106
u64 as_u64[2]
Definition: ip6_packet.h:51
#define NULL
Definition: clib.h:55
static u32 validate_gtpu_fib(vlib_buffer_t *b, gtpu_tunnel_t *t, u32 is_ip4)
Definition: gtpu_decap.c:52
vlib_node_registration_t ip6_gtpu_bypass_node
(constructor) VLIB_REGISTER_NODE (ip6_gtpu_bypass_node)
Definition: gtpu_decap.c:1284
u32 * fib_index_by_sw_if_index
Table index indexed by software interface.
Definition: ip4.h:111
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
clib_error_t * ip6_gtpu_bypass_init(vlib_main_t *vm)
Definition: gtpu_decap.c:1302
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:451
uword * vtep4
Definition: gtpu.h:209
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:250
ip6_address_t src_address
Definition: ip6_packet.h:347
unsigned char u8
Definition: types.h:56
static uword gtpu6_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: gtpu_decap.c:796
i64 word
Definition: types.h:111
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:156
ip46_address_t src
Definition: gtpu.h:139
vlib_node_registration_t ip4_gtpu_bypass_node
(constructor) VLIB_REGISTER_NODE (ip4_gtpu_bypass_node)
Definition: gtpu_decap.c:1253
#define always_inline
Definition: clib.h:92
ip4_address_t dst_address
Definition: ip4_packet.h:169
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:810
static uword ip6_address_is_equal(ip6_address_t *a, ip6_address_t *b)
Definition: ip6_packet.h:214
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:184
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:238
unsigned int u32
Definition: types.h:88
static int ip4_is_fragment(ip4_header_t *i)
Definition: ip4_packet.h:210
#define VLIB_NODE_FUNCTION_MULTIARCH(node, fn)
Definition: node.h:194
#define hash_get(h, key)
Definition: hash.h:249
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:461
u32 decap_next_index
Definition: gtpu.h:146
vlib_node_registration_t ip4_input_node
Global ip4 input node.
Definition: ip4_input.c:316
uword * vtep6
Definition: gtpu.h:210
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:202
#define GTPU_V1_VER
Definition: gtpu.h:72
static uword ip4_address_is_multicast(ip4_address_t *a)
Definition: ip4_packet.h:315
static char * gtpu_error_strings[]
Definition: gtpu_decap.c:803
#define PREDICT_FALSE(x)
Definition: clib.h:105
static uword ip_gtpu_bypass_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 is_ip4)
Definition: gtpu_decap.c:864
static uword ip6_gtpu_bypass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: gtpu_decap.c:1277
u32 ip4_tcp_udp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip4_forward.c:1113
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
static_always_inline void vnet_feature_next(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:237
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
vlib_node_registration_t gtpu4_input_node
(constructor) VLIB_REGISTER_NODE (gtpu4_input_node)
Definition: gtpu_decap.c:22
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:135
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1168
clib_error_t * ip4_gtpu_bypass_init(vlib_main_t *vm)
Definition: gtpu_decap.c:1271
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:153
u16 n_vectors
Definition: node.h:380
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:77
vlib_main_t * vm
Definition: buffer.c:294
static void ip6_address_set_zero(ip6_address_t *a)
Definition: ip6_packet.h:249
static uword gtpu_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
Definition: gtpu_decap.c:71
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
#define clib_memcpy(a, b, c)
Definition: string.h:75
static uword gtpu4_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: gtpu_decap.c:788
#define GTPU_E_S_PN_BIT
Definition: gtpu.h:70
u32 sw_if_index
Definition: gtpu.h:152
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:374
#define foreach_gtpu_input_next
Definition: gtpu.h:176
signed int i32
Definition: types.h:81
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:492
ip6_main_t ip6_main
Definition: ip6_forward.c:2574
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:215
u32 encap_fib_index
Definition: gtpu.h:149
format_function_t format_ip6_header
Definition: format.h:103
#define GTPU_VER_MASK
Definition: gtpu.h:65
ip_vxan_bypass_next_t
Definition: gtpu_decap.c:857
uword * gtpu4_tunnel_by_key
Definition: gtpu.h:204
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
#define vec_elt(v, i)
Get vector value at index i.
struct _vlib_node_registration vlib_node_registration_t
Definition: defs.h:47
static uword ip6_address_is_multicast(ip6_address_t *a)
Definition: ip6_packet.h:151
u16 payload_length
Definition: ip6_packet.h:338
u32 ip6_tcp_udp_icmp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip6_forward.c:986
static void vnet_update_l2_len(vlib_buffer_t *b)
Definition: l2_input.h:221
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
Definition: ip4_forward.c:967
uword * gtpu6_tunnel_by_key
Definition: gtpu.h:205
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
u8 * format_ip4_forward_next_trace(u8 *s, va_list *args)
Definition: ip4_forward.c:919
Bits Octets 8 7 6 5 4 3 2 1 1 Version PT (*) E S PN 2 Message Type 3 Length (1st Octet) 4 Length...
Definition: gtpu.h:54
#define hash_get_mem(h, key)
Definition: hash.h:269
#define vnet_buffer(b)
Definition: buffer.h:360
gtpu_main_t gtpu_main
Definition: jvpp_gtpu.h:39
gtpu_tunnel_t * tunnels
Definition: gtpu.h:201
ip4_main_t ip4_main
Global ip4 main structure.
Definition: ip4_forward.c:832
u8 data[0]
Packet data.
Definition: buffer.h:172
u8 ver_flags
Definition: gtpu.h:56
u16 flags
Copy of main node flags.
Definition: node.h:486
static uword ip4_gtpu_bypass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: gtpu_decap.c:1246
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:295
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:62
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:111
u32 * fib_index_by_sw_if_index
Definition: ip6.h:176
vlib_node_registration_t gtpu6_input_node
(constructor) VLIB_REGISTER_NODE (gtpu6_input_node)
Definition: gtpu_decap.c:23
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
Definition: defs.h:46
ip6_address_t dst_address
Definition: ip6_packet.h:347
u8 * format_ip6_forward_next_trace(u8 *s, va_list *args)
Definition: ip6_forward.c:768