FD.io VPP  v19.04.4-rc0-5-ge88582fac
Vector Packet Processing
ip4_map.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * Defines used for testing various optimisation schemes
17  */
18 
19 #include "map.h"
20 #include <vnet/ip/ip_frag.h>
21 #include <vnet/ip/ip4_to_ip6.h>
22 
24 
26 {
28 #ifdef MAP_SKIP_IP6_LOOKUP
30 #endif
37 };
38 
40 {
45 };
46 
47 typedef struct
48 {
53 
54 u8 *
55 format_ip4_map_reass_trace (u8 * s, va_list * args)
56 {
57  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
58  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60  return format (s, "MAP domain index: %d L4 port: %u Status: %s",
61  t->map_domain_index, t->port,
62  t->cached ? "cached" : "forwarded");
63 }
64 
67  u32 * next, u8 * error)
68 {
69  u16 port = 0;
70 
71  if (d->psid_length > 0)
72  {
73  if (ip4_get_fragment_offset (ip) == 0)
74  {
75  if (PREDICT_FALSE
76  ((ip->ip_version_and_header_length != 0x45)
77  || clib_host_to_net_u16 (ip->length) < 28))
78  {
79  return 0;
80  }
81  port = ip4_get_port (ip, 0);
82  if (port)
83  {
84  /* Verify that port is not among the well-known ports */
85  if ((d->psid_offset > 0)
86  && (clib_net_to_host_u16 (port) <
87  (0x1 << (16 - d->psid_offset))))
88  {
89  *error = MAP_ERROR_ENCAP_SEC_CHECK;
90  }
91  else
92  {
93  if (ip4_get_fragment_more (ip))
94  *next = IP4_MAP_NEXT_REASS;
95  return (port);
96  }
97  }
98  else
99  {
100  *error = MAP_ERROR_BAD_PROTOCOL;
101  }
102  }
103  else
104  {
105  *next = IP4_MAP_NEXT_REASS;
106  }
107  }
108  return (0);
109 }
110 
111 /*
112  * ip4_map_vtcfl
113  */
116 {
117  map_main_t *mm = &map_main;
118  u8 tc = mm->tc_copy ? ip4->tos : mm->tc;
119  u32 vtcfl = 0x6 << 28;
120  vtcfl |= tc << 20;
121  vtcfl |= vnet_buffer (p)->ip.flow_hash & 0x000fffff;
122 
123  return (clib_host_to_net_u32 (vtcfl));
124 }
125 
128 {
129 #ifdef MAP_SKIP_IP6_LOOKUP
131  {
132  vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
134  return (true);
135  }
136 #endif
137  return (false);
138 }
139 
140 /*
141  * ip4_map_ttl
142  */
143 static inline void
145 {
146  i32 ttl = ip->ttl;
147 
148  /* Input node should have reject packets with ttl 0. */
149  ASSERT (ip->ttl > 0);
150 
151  u32 checksum = ip->checksum + clib_host_to_net_u16 (0x0100);
152  checksum += checksum >= 0xffff;
153  ip->checksum = checksum;
154  ttl -= 1;
155  ip->ttl = ttl;
156  *error = ttl <= 0 ? IP4_ERROR_TIME_EXPIRED : *error;
157 
158  /* Verify checksum. */
159  ASSERT (ip->checksum == ip4_header_checksum (ip));
160 }
161 
162 static u32
163 ip4_map_fragment (vlib_buffer_t * b, u16 mtu, bool df, u8 * error)
164 {
165  map_main_t *mm = &map_main;
166 
167  if (mm->frag_inner)
168  {
169  // TODO: Fix inner fragmentation after removed inner support from ip-frag.
170  ip_frag_set_vnet_buffer (b, /*sizeof (ip6_header_t), */ mtu,
173  return (IP4_MAP_NEXT_IP4_FRAGMENT);
174  }
175  else
176  {
177  if (df && !mm->frag_ignore_df)
178  {
179  icmp4_error_set_vnet_buffer (b, ICMP4_destination_unreachable,
180  ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
181  mtu);
182  vlib_buffer_advance (b, sizeof (ip6_header_t));
183  *error = MAP_ERROR_DF_SET;
184  return (IP4_MAP_NEXT_ICMP_ERROR);
185  }
188  return (IP4_MAP_NEXT_IP6_FRAGMENT);
189  }
190 }
191 
192 /*
193  * ip4_map
194  */
195 static uword
197 {
198  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
199  vlib_node_runtime_t *error_node =
201  from = vlib_frame_vector_args (frame);
202  n_left_from = frame->n_vectors;
203  next_index = node->cached_next_index;
204  map_main_t *mm = &map_main;
206  u32 thread_index = vm->thread_index;
207 
208  while (n_left_from > 0)
209  {
210  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
211 
212  /* Dual loop */
213  while (n_left_from >= 4 && n_left_to_next >= 2)
214  {
215  u32 pi0, pi1;
216  vlib_buffer_t *p0, *p1;
217  map_domain_t *d0, *d1;
218  u8 error0 = MAP_ERROR_NONE, error1 = MAP_ERROR_NONE;
219  ip4_header_t *ip40, *ip41;
220  u16 port0 = 0, port1 = 0;
221  ip6_header_t *ip6h0, *ip6h1;
222  u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
223  u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP, next1 =
225 
226  /* Prefetch next iteration. */
227  {
228  vlib_buffer_t *p2, *p3;
229 
230  p2 = vlib_get_buffer (vm, from[2]);
231  p3 = vlib_get_buffer (vm, from[3]);
232 
233  vlib_prefetch_buffer_header (p2, STORE);
234  vlib_prefetch_buffer_header (p3, STORE);
235  /* IPv4 + 8 = 28. possibly plus -40 */
236  CLIB_PREFETCH (p2->data - 40, 68, STORE);
237  CLIB_PREFETCH (p3->data - 40, 68, STORE);
238  }
239 
240  pi0 = to_next[0] = from[0];
241  pi1 = to_next[1] = from[1];
242  from += 2;
243  n_left_from -= 2;
244  to_next += 2;
245  n_left_to_next -= 2;
246 
247  p0 = vlib_get_buffer (vm, pi0);
248  p1 = vlib_get_buffer (vm, pi1);
249  ip40 = vlib_buffer_get_current (p0);
250  ip41 = vlib_buffer_get_current (p1);
251  d0 =
252  ip4_map_get_domain (&ip40->dst_address, &map_domain_index0,
253  &error0);
254  d1 =
255  ip4_map_get_domain (&ip41->dst_address, &map_domain_index1,
256  &error1);
257 
258  /*
259  * Shared IPv4 address
260  */
261  port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0);
262  port1 = ip4_map_port_and_security_check (d1, ip41, &next1, &error1);
263 
264  /* Decrement IPv4 TTL */
265  ip4_map_decrement_ttl (ip40, &error0);
266  ip4_map_decrement_ttl (ip41, &error1);
267  bool df0 =
269  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
270  bool df1 =
272  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
273 
274  /* MAP calc */
275  u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32);
276  u32 da41 = clib_net_to_host_u32 (ip41->dst_address.as_u32);
277  u16 dp40 = clib_net_to_host_u16 (port0);
278  u16 dp41 = clib_net_to_host_u16 (port1);
279  u64 dal60 = map_get_pfx (d0, da40, dp40);
280  u64 dal61 = map_get_pfx (d1, da41, dp41);
281  u64 dar60 = map_get_sfx (d0, da40, dp40);
282  u64 dar61 = map_get_sfx (d1, da41, dp41);
283  if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE
284  && next0 != IP4_MAP_NEXT_REASS)
285  error0 = MAP_ERROR_NO_BINDING;
286  if (dal61 == 0 && dar61 == 0 && error1 == MAP_ERROR_NONE
287  && next1 != IP4_MAP_NEXT_REASS)
288  error1 = MAP_ERROR_NO_BINDING;
289 
290  /* construct ipv6 header */
291  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
292  vlib_buffer_advance (p1, -sizeof (ip6_header_t));
293  ip6h0 = vlib_buffer_get_current (p0);
294  ip6h1 = vlib_buffer_get_current (p1);
295  vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
296  vnet_buffer (p1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
297 
299  ip4_map_vtcfl (ip40, p0);
301  ip4_map_vtcfl (ip41, p1);
302  ip6h0->payload_length = ip40->length;
303  ip6h1->payload_length = ip41->length;
304  ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
305  ip6h1->protocol = IP_PROTOCOL_IP_IN_IP;
306  ip6h0->hop_limit = 0x40;
307  ip6h1->hop_limit = 0x40;
308  ip6h0->src_address = d0->ip6_src;
309  ip6h1->src_address = d1->ip6_src;
310  ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60);
311  ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60);
312  ip6h1->dst_address.as_u64[0] = clib_host_to_net_u64 (dal61);
313  ip6h1->dst_address.as_u64[1] = clib_host_to_net_u64 (dar61);
314 
315  /*
316  * Determine next node. Can be one of:
317  * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
318  */
319  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
320  {
321  if (PREDICT_FALSE
322  (d0->mtu
323  && (clib_net_to_host_u16 (ip6h0->payload_length) +
324  sizeof (*ip6h0) > d0->mtu)))
325  {
326  next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0);
327  }
328  else
329  {
330  next0 =
332  ip40) ?
333  IP4_MAP_NEXT_IP6_REWRITE : next0;
335  thread_index,
336  map_domain_index0, 1,
337  clib_net_to_host_u16
338  (ip6h0->payload_length) +
339  40);
340  }
341  }
342  else
343  {
344  next0 = IP4_MAP_NEXT_DROP;
345  }
346 
347  /*
348  * Determine next node. Can be one of:
349  * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
350  */
351  if (PREDICT_TRUE (error1 == MAP_ERROR_NONE))
352  {
353  if (PREDICT_FALSE
354  (d1->mtu
355  && (clib_net_to_host_u16 (ip6h1->payload_length) +
356  sizeof (*ip6h1) > d1->mtu)))
357  {
358  next1 = ip4_map_fragment (p1, d1->mtu, df1, &error1);
359  }
360  else
361  {
362  next1 =
364  ip41) ?
365  IP4_MAP_NEXT_IP6_REWRITE : next1;
367  thread_index,
368  map_domain_index1, 1,
369  clib_net_to_host_u16
370  (ip6h1->payload_length) +
371  40);
372  }
373  }
374  else
375  {
376  next1 = IP4_MAP_NEXT_DROP;
377  }
378 
379  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
380  {
381  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
382  tr->map_domain_index = map_domain_index0;
383  tr->port = port0;
384  }
385  if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
386  {
387  map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
388  tr->map_domain_index = map_domain_index1;
389  tr->port = port1;
390  }
391 
392  p0->error = error_node->errors[error0];
393  p1->error = error_node->errors[error1];
394 
395  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
396  n_left_to_next, pi0, pi1, next0,
397  next1);
398  }
399 
400  while (n_left_from > 0 && n_left_to_next > 0)
401  {
402  u32 pi0;
403  vlib_buffer_t *p0;
404  map_domain_t *d0;
405  u8 error0 = MAP_ERROR_NONE;
406  ip4_header_t *ip40;
407  u16 port0 = 0;
408  ip6_header_t *ip6h0;
410  u32 map_domain_index0 = ~0;
411 
412  pi0 = to_next[0] = from[0];
413  from += 1;
414  n_left_from -= 1;
415  to_next += 1;
416  n_left_to_next -= 1;
417 
418  p0 = vlib_get_buffer (vm, pi0);
419  ip40 = vlib_buffer_get_current (p0);
420 
421  d0 =
422  ip4_map_get_domain (&ip40->dst_address, &map_domain_index0,
423  &error0);
424  if (!d0)
425  { /* Guess it wasn't for us */
426  vnet_feature_next (&next0, p0);
427  goto exit;
428  }
429 
430  /*
431  * Shared IPv4 address
432  */
433  port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0);
434 
435  /* Decrement IPv4 TTL */
436  ip4_map_decrement_ttl (ip40, &error0);
437  bool df0 =
439  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
440 
441  /* MAP calc */
442  u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32);
443  u16 dp40 = clib_net_to_host_u16 (port0);
444  u64 dal60 = map_get_pfx (d0, da40, dp40);
445  u64 dar60 = map_get_sfx (d0, da40, dp40);
446  if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE
447  && next0 != IP4_MAP_NEXT_REASS)
448  error0 = MAP_ERROR_NO_BINDING;
449 
450  /* construct ipv6 header */
451  vlib_buffer_advance (p0, -(sizeof (ip6_header_t)));
452  ip6h0 = vlib_buffer_get_current (p0);
453  vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
454 
456  ip4_map_vtcfl (ip40, p0);
457  ip6h0->payload_length = ip40->length;
458  ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
459  ip6h0->hop_limit = 0x40;
460  ip6h0->src_address = d0->ip6_src;
461  ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60);
462  ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60);
463 
464  /*
465  * Determine next node. Can be one of:
466  * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
467  */
468  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
469  {
470  if (PREDICT_FALSE
471  (d0->mtu
472  && (clib_net_to_host_u16 (ip6h0->payload_length) +
473  sizeof (*ip6h0) > d0->mtu)))
474  {
475  next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0);
476  }
477  else
478  {
479  next0 =
481  ip40) ?
482  IP4_MAP_NEXT_IP6_REWRITE : next0;
484  thread_index,
485  map_domain_index0, 1,
486  clib_net_to_host_u16
487  (ip6h0->payload_length) +
488  40);
489  }
490  }
491  else
492  {
493  next0 = IP4_MAP_NEXT_DROP;
494  }
495 
496  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
497  {
498  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
499  tr->map_domain_index = map_domain_index0;
500  tr->port = port0;
501  }
502 
503  p0->error = error_node->errors[error0];
504  exit:
505  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
506  n_left_to_next, pi0, next0);
507  }
508  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
509  }
510 
511  return frame->n_vectors;
512 }
513 
514 /*
515  * ip4_map_reass
516  */
517 static uword
519  vlib_node_runtime_t * node, vlib_frame_t * frame)
520 {
521  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
522  vlib_node_runtime_t *error_node =
524  from = vlib_frame_vector_args (frame);
525  n_left_from = frame->n_vectors;
526  next_index = node->cached_next_index;
527  map_main_t *mm = &map_main;
529  u32 thread_index = vm->thread_index;
530  u32 *fragments_to_drop = NULL;
531  u32 *fragments_to_loopback = NULL;
532 
533  while (n_left_from > 0)
534  {
535  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
536 
537  while (n_left_from > 0 && n_left_to_next > 0)
538  {
539  u32 pi0;
540  vlib_buffer_t *p0;
541  map_domain_t *d0;
542  u8 error0 = MAP_ERROR_NONE;
543  ip4_header_t *ip40;
544  i32 port0 = 0;
545  ip6_header_t *ip60;
547  u32 map_domain_index0 = ~0;
548  u8 cached = 0;
549 
550  pi0 = to_next[0] = from[0];
551  from += 1;
552  n_left_from -= 1;
553  to_next += 1;
554  n_left_to_next -= 1;
555 
556  p0 = vlib_get_buffer (vm, pi0);
557  ip60 = vlib_buffer_get_current (p0);
558  ip40 = (ip4_header_t *) (ip60 + 1);
559  d0 =
560  ip4_map_get_domain (&ip40->dst_address, &map_domain_index0,
561  &error0);
562 
565  ip40->dst_address.as_u32,
566  ip40->fragment_id,
567  ip40->protocol,
568  &fragments_to_drop);
569  if (PREDICT_FALSE (!r))
570  {
571  // Could not create a caching entry
572  error0 = MAP_ERROR_FRAGMENT_MEMORY;
573  }
574  else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
575  {
576  if (r->port >= 0)
577  {
578  // We know the port already
579  port0 = r->port;
580  }
581  else if (map_ip4_reass_add_fragment (r, pi0))
582  {
583  // Not enough space for caching
584  error0 = MAP_ERROR_FRAGMENT_MEMORY;
585  map_ip4_reass_free (r, &fragments_to_drop);
586  }
587  else
588  {
589  cached = 1;
590  }
591  }
592  else if ((port0 = ip4_get_port (ip40, 0)) == 0)
593  {
594  // Could not find port. We'll free the reassembly.
595  error0 = MAP_ERROR_BAD_PROTOCOL;
596  port0 = 0;
597  map_ip4_reass_free (r, &fragments_to_drop);
598  }
599  else
600  {
601  r->port = port0;
602  map_ip4_reass_get_fragments (r, &fragments_to_loopback);
603  }
604 
605 #ifdef MAP_IP4_REASS_COUNT_BYTES
606  if (!cached && r)
607  {
608  r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
609  if (!ip4_get_fragment_more (ip40))
610  r->expected_total =
611  ip4_get_fragment_offset (ip40) * 8 +
612  clib_host_to_net_u16 (ip40->length) - 20;
613  if (r->forwarded >= r->expected_total)
614  map_ip4_reass_free (r, &fragments_to_drop);
615  }
616 #endif
617 
619 
620  // NOTE: Most operations have already been performed by ip4_map
621  // All we need is the right destination address
622  ip60->dst_address.as_u64[0] =
623  map_get_pfx_net (d0, ip40->dst_address.as_u32, port0);
624  ip60->dst_address.as_u64[1] =
625  map_get_sfx_net (d0, ip40->dst_address.as_u32, port0);
626 
627  if (PREDICT_FALSE
628  (d0->mtu
629  && (clib_net_to_host_u16 (ip60->payload_length) +
630  sizeof (*ip60) > d0->mtu)))
631  {
632  // TODO: vnet_buffer (p0)->ip_frag.header_offset = sizeof (*ip60);
633  vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP6_LOOKUP;
634  vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
635  vnet_buffer (p0)->ip_frag.flags = IP_FRAG_FLAG_IP6_HEADER;
637  }
638 
639  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
640  {
642  vlib_add_trace (vm, node, p0, sizeof (*tr));
643  tr->map_domain_index = map_domain_index0;
644  tr->port = port0;
645  tr->cached = cached;
646  }
647 
648  if (cached)
649  {
650  //Dequeue the packet
651  n_left_to_next++;
652  to_next--;
653  }
654  else
655  {
656  if (error0 == MAP_ERROR_NONE)
658  thread_index,
659  map_domain_index0, 1,
660  clib_net_to_host_u16
661  (ip60->payload_length) + 40);
662  next0 =
663  (error0 == MAP_ERROR_NONE) ? next0 : IP4_MAP_REASS_NEXT_DROP;
664  p0->error = error_node->errors[error0];
665  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
666  n_left_to_next, pi0, next0);
667  }
668 
669  //Loopback when we reach the end of the inpu vector
670  if (n_left_from == 0 && vec_len (fragments_to_loopback))
671  {
672  from = vlib_frame_vector_args (frame);
673  u32 len = vec_len (fragments_to_loopback);
674  if (len <= VLIB_FRAME_SIZE)
675  {
676  clib_memcpy_fast (from, fragments_to_loopback,
677  sizeof (u32) * len);
678  n_left_from = len;
679  vec_reset_length (fragments_to_loopback);
680  }
681  else
682  {
683  clib_memcpy_fast (from, fragments_to_loopback +
684  (len - VLIB_FRAME_SIZE),
685  sizeof (u32) * VLIB_FRAME_SIZE);
686  n_left_from = VLIB_FRAME_SIZE;
687  _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
688  }
689  }
690  }
691  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
692  }
693 
694  map_send_all_to_node (vm, fragments_to_drop, node,
695  &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
697 
698  vec_free (fragments_to_drop);
699  vec_free (fragments_to_loopback);
700  return frame->n_vectors;
701 }
702 
703 static char *map_error_strings[] = {
704 #define _(sym,string) string,
706 #undef _
707 };
708 
709 
710 /* *INDENT-OFF* */
711 VNET_FEATURE_INIT (ip4_map_feature, static) =
712 {
713  .arc_name = "ip4-unicast",
714  .node_name = "ip4-map",
715  .runs_before =
716  VNET_FEATURES ("ip4-flow-classify"),
717 };
718 
720  .function = ip4_map,
721  .name = "ip4-map",
722  .vector_size = sizeof(u32),
723  .format_trace = format_map_trace,
724  .type = VLIB_NODE_TYPE_INTERNAL,
725 
726  .n_errors = MAP_N_ERROR,
727  .error_strings = map_error_strings,
728 
729  .n_next_nodes = IP4_MAP_N_NEXT,
730  .next_nodes = {
731  [IP4_MAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
732 #ifdef MAP_SKIP_IP6_LOOKUP
733  [IP4_MAP_NEXT_IP6_REWRITE] = "ip6-load-balance",
734 #endif
735  [IP4_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
736  [IP4_MAP_NEXT_IP6_FRAGMENT] = "ip6-frag",
737  [IP4_MAP_NEXT_REASS] = "ip4-map-reass",
738  [IP4_MAP_NEXT_ICMP_ERROR] = "ip4-icmp-error",
739  [IP4_MAP_NEXT_DROP] = "error-drop",
740  },
741 };
742 /* *INDENT-ON* */
743 
744 /* *INDENT-OFF* */
746  .function = ip4_map_reass,
747  .name = "ip4-map-reass",
748  .vector_size = sizeof(u32),
749  .format_trace = format_ip4_map_reass_trace,
750  .type = VLIB_NODE_TYPE_INTERNAL,
751 
752  .n_errors = MAP_N_ERROR,
753  .error_strings = map_error_strings,
754 
755  .n_next_nodes = IP4_MAP_REASS_N_NEXT,
756  .next_nodes = {
757  [IP4_MAP_REASS_NEXT_IP6_LOOKUP] = "ip6-lookup",
758  [IP4_MAP_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
759  [IP4_MAP_REASS_NEXT_DROP] = "error-drop",
760  },
761 };
762 /* *INDENT-ON* */
763 
764 /*
765  * fd.io coding-style-patch-verification: ON
766  *
767  * Local Variables:
768  * eval: (c-set-style "gnu")
769  * End:
770  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define map_ip4_reass_lock()
Definition: map.h:504
u8 psid_length
Definition: map.h:119
#define CLIB_UNUSED(x)
Definition: clib.h:82
u8 * format_ip4_map_reass_trace(u8 *s, va_list *args)
Definition: ip4_map.c:55
map_main_t map_main
Definition: map.c:26
void ip_frag_set_vnet_buffer(vlib_buffer_t *b, u16 mtu, u8 next_index, u8 flags)
Definition: ip_frag.c:241
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
static_always_inline u64 map_get_pfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:401
ip4_address_t src_address
Definition: ip4_packet.h:170
#define PREDICT_TRUE(x)
Definition: clib.h:112
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
dpo_id_t dpo
The Load-balance object index to use to forward.
Definition: map.h:252
#define NULL
Definition: clib.h:58
u8 tc
Definition: map.h:276
u32 thread_index
Definition: main.h:197
u8 data[0]
Packet data.
Definition: buffer.h:181
static_always_inline map_domain_t * ip4_map_get_domain(ip4_address_t *addr, u32 *map_domain_index, u8 *error)
Definition: map.h:464
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
vlib_node_registration_t ip4_map_node
(constructor) VLIB_REGISTER_NODE (ip4_map_node)
Definition: ip4_map.c:719
u16 flags_and_fragment_offset
Definition: ip4_packet.h:151
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:468
ip6_address_t src_address
Definition: ip6_packet.h:385
unsigned char u8
Definition: types.h:56
IPv4 to IPv6 translation.
u16 port
Definition: map.h:379
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
bool tc_copy
Definition: map.h:277
#define static_always_inline
Definition: clib.h:99
ip4_address_t dst_address
Definition: ip4_packet.h:170
vlib_combined_counter_main_t * domain_counters
Definition: map.h:269
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
static_always_inline void map_send_all_to_node(vlib_main_t *vm, u32 *pi_vector, vlib_node_runtime_t *node, vlib_error_t *error, u32 next)
Definition: map.h:600
int map_ip4_reass_add_fragment(map_ip4_reass_t *r, u32 pi)
Definition: map.c:1528
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:376
static u16 ip4_get_port(ip4_header_t *ip, u8 sender)
Get TCP/UDP port number or ICMP id from IPv4 packet.
Definition: ip4_to_ip6.h:51
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
static u32 ip4_map_fragment(vlib_buffer_t *b, u16 mtu, bool df, u8 *error)
Definition: ip4_map.c:163
static uword ip4_map_reass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map.c:518
bool frag_ignore_df
Definition: map.h:314
static_always_inline u64 map_get_pfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:415
unsigned short u16
Definition: types.h:57
ip4_map_reass_next_t
Definition: ip4_map.c:39
ip4_map_next_e
Definition: ip4_map.c:25
static int ip4_get_fragment_offset(const ip4_header_t *i)
Definition: ip4_packet.h:200
static_always_inline u64 map_get_sfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:447
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1447
#define PREDICT_FALSE(x)
Definition: clib.h:111
static_always_inline u32 ip4_map_vtcfl(ip4_header_t *ip4, vlib_buffer_t *p)
Definition: ip4_map.c:115
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
u16 expected_total
Definition: map.h:166
vlib_node_registration_t ip4_map_reass_node
(constructor) VLIB_REGISTER_NODE (ip4_map_reass_node)
Definition: ip4_map.c:23
u8 len
Definition: ip_types.api:49
map_main_pre_resolved_t pre_resolved[FIB_PROTOCOL_MAX]
Pre-resolved per-protocol global next-hops.
Definition: map.c:288
void map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop)
Definition: map.c:1404
static uword ip4_map(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map.c:196
static_always_inline bool ip4_map_ip6_lookup_bypass(vlib_buffer_t *p0, ip4_header_t *ip)
Definition: ip4_map.c:127
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:395
static_always_inline void map_ip4_reass_get_fragments(map_ip4_reass_t *r, u32 **pi)
Definition: map.h:508
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:312
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:295
void icmp4_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
Definition: icmp4.c:431
u8 psid_offset
Definition: map.h:118
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
u16 forwarded
Definition: map.h:167
static void ip4_map_decrement_ttl(ip4_header_t *ip, u8 *error)
Definition: ip4_map.c:144
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:458
#define foreach_map_error
Definition: map.h:349
ip6_address_t ip6_src
Definition: map.h:107
static int ip4_get_fragment_more(const ip4_header_t *i)
Definition: ip4_packet.h:206
static char * map_error_strings[]
Definition: ip4_map.c:703
signed int i32
Definition: types.h:77
#define IP_FRAG_FLAG_IP6_HEADER
Definition: ip_frag.h:41
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:513
#define ASSERT(truth)
bool frag_inner
Definition: map.h:313
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
#define map_ip4_reass_unlock()
Definition: map.h:505
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1367
VNET_FEATURE_INIT(ip4_map_feature, static)
#define VNET_FEATURES(...)
Definition: feature.h:435
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
struct _vlib_node_registration vlib_node_registration_t
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:372
Definition: defs.h:47
u16 mtu
Definition: map.h:113
u16 payload_length
Definition: ip6_packet.h:376
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:184
#define FIB_NODE_INDEX_INVALID
Definition: fib_types.h:31
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
i32 port
Definition: map.h:169
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
A collection of combined counters.
Definition: counter.h:188
#define vnet_buffer(b)
Definition: buffer.h:369
#define IP4_HEADER_FLAG_DONT_FRAGMENT
Definition: ip4_packet.h:153
static_always_inline u64 map_get_sfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:425
u32 map_domain_index
Definition: map.h:378
u8 ip_version_and_header_length
Definition: ip4_packet.h:138
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:247
static_always_inline u16 ip4_map_port_and_security_check(map_domain_t *d, ip4_header_t *ip, u32 *next, u8 *error)
Definition: ip4_map.c:66
ip6_address_t dst_address
Definition: ip6_packet.h:385