FD.io VPP  v18.07.1-19-g511ce25
Vector Packet Processing
ip4_map.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * Defines used for testing various optimisation schemes
17  */
18 #define MAP_ENCAP_DUAL 0
19 
20 #include "map.h"
21 #include <vnet/ip/ip_frag.h>
22 #include <vnet/ip/ip4_to_ip6.h>
23 
25 
27 {
29 #ifdef MAP_SKIP_IP6_LOOKUP
31 #endif
38 };
39 
41 {
46 };
47 
48 typedef struct
49 {
54 
55 u8 *
56 format_ip4_map_reass_trace (u8 * s, va_list * args)
57 {
58  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
61  return format (s, "MAP domain index: %d L4 port: %u Status: %s",
62  t->map_domain_index, t->port,
63  t->cached ? "cached" : "forwarded");
64 }
65 
68  u32 * next, u8 * error)
69 {
70  u16 port = 0;
71 
72  if (d->psid_length > 0)
73  {
74  if (ip4_get_fragment_offset (ip) == 0)
75  {
76  if (PREDICT_FALSE
77  ((ip->ip_version_and_header_length != 0x45)
78  || clib_host_to_net_u16 (ip->length) < 28))
79  {
80  return 0;
81  }
82  port = ip4_get_port (ip, 0);
83  if (port)
84  {
85  /* Verify that port is not among the well-known ports */
86  if ((d->psid_offset > 0)
87  && (clib_net_to_host_u16 (port) <
88  (0x1 << (16 - d->psid_offset))))
89  {
90  *error = MAP_ERROR_ENCAP_SEC_CHECK;
91  }
92  else
93  {
94  if (ip4_get_fragment_more (ip))
95  *next = IP4_MAP_NEXT_REASS;
96  return (port);
97  }
98  }
99  else
100  {
101  *error = MAP_ERROR_BAD_PROTOCOL;
102  }
103  }
104  else
105  {
106  *next = IP4_MAP_NEXT_REASS;
107  }
108  }
109  return (0);
110 }
111 
112 /*
113  * ip4_map_vtcfl
114  */
117 {
118  map_main_t *mm = &map_main;
119  u8 tc = mm->tc_copy ? ip4->tos : mm->tc;
120  u32 vtcfl = 0x6 << 28;
121  vtcfl |= tc << 20;
122  vtcfl |= vnet_buffer (p)->ip.flow_hash & 0x000fffff;
123 
124  return (clib_host_to_net_u32 (vtcfl));
125 }
126 
129 {
130 #ifdef MAP_SKIP_IP6_LOOKUP
132  {
133  vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
135  return (true);
136  }
137 #endif
138  return (false);
139 }
140 
141 /*
142  * ip4_map_ttl
143  */
144 static inline void
146 {
147  i32 ttl = ip->ttl;
148 
149  /* Input node should have reject packets with ttl 0. */
150  ASSERT (ip->ttl > 0);
151 
152  u32 checksum = ip->checksum + clib_host_to_net_u16 (0x0100);
153  checksum += checksum >= 0xffff;
154  ip->checksum = checksum;
155  ttl -= 1;
156  ip->ttl = ttl;
157  *error = ttl <= 0 ? IP4_ERROR_TIME_EXPIRED : *error;
158 
159  /* Verify checksum. */
160  ASSERT (ip->checksum == ip4_header_checksum (ip));
161 }
162 
163 static u32
164 ip4_map_fragment (vlib_buffer_t * b, u16 mtu, bool df, u8 * error)
165 {
166  map_main_t *mm = &map_main;
167 
168  if (mm->frag_inner)
169  {
170  ip_frag_set_vnet_buffer (b, sizeof (ip6_header_t), mtu,
173  return (IP4_MAP_NEXT_IP4_FRAGMENT);
174  }
175  else
176  {
177  if (df && !mm->frag_ignore_df)
178  {
179  icmp4_error_set_vnet_buffer (b, ICMP4_destination_unreachable,
180  ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
181  mtu);
182  vlib_buffer_advance (b, sizeof (ip6_header_t));
183  *error = MAP_ERROR_DF_SET;
184  return (IP4_MAP_NEXT_ICMP_ERROR);
185  }
188  return (IP4_MAP_NEXT_IP6_FRAGMENT);
189  }
190 }
191 
192 /*
193  * ip4_map
194  */
195 static uword
197 {
198  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
199  vlib_node_runtime_t *error_node =
201  from = vlib_frame_vector_args (frame);
202  n_left_from = frame->n_vectors;
203  next_index = node->cached_next_index;
204  map_main_t *mm = &map_main;
206  u32 thread_index = vm->thread_index;
207 
208  while (n_left_from > 0)
209  {
210  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
211 
212  /* Dual loop */
213  while (n_left_from >= 4 && n_left_to_next >= 2)
214  {
215  u32 pi0, pi1;
216  vlib_buffer_t *p0, *p1;
217  map_domain_t *d0, *d1;
218  u8 error0 = MAP_ERROR_NONE, error1 = MAP_ERROR_NONE;
219  ip4_header_t *ip40, *ip41;
220  u16 port0 = 0, port1 = 0;
221  ip6_header_t *ip6h0, *ip6h1;
222  u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
223  u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP, next1 =
225 
226  /* Prefetch next iteration. */
227  {
228  vlib_buffer_t *p2, *p3;
229 
230  p2 = vlib_get_buffer (vm, from[2]);
231  p3 = vlib_get_buffer (vm, from[3]);
232 
233  vlib_prefetch_buffer_header (p2, STORE);
234  vlib_prefetch_buffer_header (p3, STORE);
235  /* IPv4 + 8 = 28. possibly plus -40 */
236  CLIB_PREFETCH (p2->data - 40, 68, STORE);
237  CLIB_PREFETCH (p3->data - 40, 68, STORE);
238  }
239 
240  pi0 = to_next[0] = from[0];
241  pi1 = to_next[1] = from[1];
242  from += 2;
243  n_left_from -= 2;
244  to_next += 2;
245  n_left_to_next -= 2;
246 
247  p0 = vlib_get_buffer (vm, pi0);
248  p1 = vlib_get_buffer (vm, pi1);
249  ip40 = vlib_buffer_get_current (p0);
250  ip41 = vlib_buffer_get_current (p1);
251  map_domain_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
252  d0 = ip4_map_get_domain (map_domain_index0);
253  map_domain_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
254  d1 = ip4_map_get_domain (map_domain_index1);
255  ASSERT (d0);
256  ASSERT (d1);
257 
258  /*
259  * Shared IPv4 address
260  */
261  port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0);
262  port1 = ip4_map_port_and_security_check (d1, ip41, &next1, &error1);
263 
264  /* Decrement IPv4 TTL */
265  ip4_map_decrement_ttl (ip40, &error0);
266  ip4_map_decrement_ttl (ip41, &error1);
267  bool df0 =
269  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
270  bool df1 =
272  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
273 
274  /* MAP calc */
275  u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32);
276  u32 da41 = clib_net_to_host_u32 (ip41->dst_address.as_u32);
277  u16 dp40 = clib_net_to_host_u16 (port0);
278  u16 dp41 = clib_net_to_host_u16 (port1);
279  u64 dal60 = map_get_pfx (d0, da40, dp40);
280  u64 dal61 = map_get_pfx (d1, da41, dp41);
281  u64 dar60 = map_get_sfx (d0, da40, dp40);
282  u64 dar61 = map_get_sfx (d1, da41, dp41);
283  if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE
284  && next0 != IP4_MAP_NEXT_REASS)
285  error0 = MAP_ERROR_NO_BINDING;
286  if (dal61 == 0 && dar61 == 0 && error1 == MAP_ERROR_NONE
287  && next1 != IP4_MAP_NEXT_REASS)
288  error1 = MAP_ERROR_NO_BINDING;
289 
290  /* construct ipv6 header */
291  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
292  vlib_buffer_advance (p1, -sizeof (ip6_header_t));
293  ip6h0 = vlib_buffer_get_current (p0);
294  ip6h1 = vlib_buffer_get_current (p1);
295  vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
296  vnet_buffer (p1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
297 
299  ip4_map_vtcfl (ip40, p0);
301  ip4_map_vtcfl (ip41, p1);
302  ip6h0->payload_length = ip40->length;
303  ip6h1->payload_length = ip41->length;
304  ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
305  ip6h1->protocol = IP_PROTOCOL_IP_IN_IP;
306  ip6h0->hop_limit = 0x40;
307  ip6h1->hop_limit = 0x40;
308  ip6h0->src_address = d0->ip6_src;
309  ip6h1->src_address = d1->ip6_src;
310  ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60);
311  ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60);
312  ip6h1->dst_address.as_u64[0] = clib_host_to_net_u64 (dal61);
313  ip6h1->dst_address.as_u64[1] = clib_host_to_net_u64 (dar61);
314 
315  /*
316  * Determine next node. Can be one of:
317  * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
318  */
319  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
320  {
321  if (PREDICT_FALSE
322  (d0->mtu
323  && (clib_net_to_host_u16 (ip6h0->payload_length) +
324  sizeof (*ip6h0) > d0->mtu)))
325  {
326  next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0);
327  }
328  else
329  {
330  next0 =
332  ip40) ?
333  IP4_MAP_NEXT_IP6_REWRITE : next0;
335  thread_index,
336  map_domain_index0, 1,
337  clib_net_to_host_u16
338  (ip6h0->payload_length) +
339  40);
340  }
341  }
342  else
343  {
344  next0 = IP4_MAP_NEXT_DROP;
345  }
346 
347  /*
348  * Determine next node. Can be one of:
349  * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
350  */
351  if (PREDICT_TRUE (error1 == MAP_ERROR_NONE))
352  {
353  if (PREDICT_FALSE
354  (d1->mtu
355  && (clib_net_to_host_u16 (ip6h1->payload_length) +
356  sizeof (*ip6h1) > d1->mtu)))
357  {
358  next1 = ip4_map_fragment (p1, d1->mtu, df1, &error1);
359  }
360  else
361  {
362  next1 =
364  ip41) ?
365  IP4_MAP_NEXT_IP6_REWRITE : next1;
367  thread_index,
368  map_domain_index1, 1,
369  clib_net_to_host_u16
370  (ip6h1->payload_length) +
371  40);
372  }
373  }
374  else
375  {
376  next1 = IP4_MAP_NEXT_DROP;
377  }
378 
379  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
380  {
381  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
382  tr->map_domain_index = map_domain_index0;
383  tr->port = port0;
384  }
385  if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
386  {
387  map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
388  tr->map_domain_index = map_domain_index1;
389  tr->port = port1;
390  }
391 
392  p0->error = error_node->errors[error0];
393  p1->error = error_node->errors[error1];
394 
395  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
396  n_left_to_next, pi0, pi1, next0,
397  next1);
398  }
399 
400  while (n_left_from > 0 && n_left_to_next > 0)
401  {
402  u32 pi0;
403  vlib_buffer_t *p0;
404  map_domain_t *d0;
405  u8 error0 = MAP_ERROR_NONE;
406  ip4_header_t *ip40;
407  u16 port0 = 0;
408  ip6_header_t *ip6h0;
410  u32 map_domain_index0 = ~0;
411 
412  pi0 = to_next[0] = from[0];
413  from += 1;
414  n_left_from -= 1;
415  to_next += 1;
416  n_left_to_next -= 1;
417 
418  p0 = vlib_get_buffer (vm, pi0);
419  ip40 = vlib_buffer_get_current (p0);
420  map_domain_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
421  d0 = ip4_map_get_domain (map_domain_index0);
422  ASSERT (d0);
423 
424  /*
425  * Shared IPv4 address
426  */
427  port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0);
428 
429  /* Decrement IPv4 TTL */
430  ip4_map_decrement_ttl (ip40, &error0);
431  bool df0 =
433  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
434 
435  /* MAP calc */
436  u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32);
437  u16 dp40 = clib_net_to_host_u16 (port0);
438  u64 dal60 = map_get_pfx (d0, da40, dp40);
439  u64 dar60 = map_get_sfx (d0, da40, dp40);
440  if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE
441  && next0 != IP4_MAP_NEXT_REASS)
442  error0 = MAP_ERROR_NO_BINDING;
443 
444  /* construct ipv6 header */
445  vlib_buffer_advance (p0, -(sizeof (ip6_header_t)));
446  ip6h0 = vlib_buffer_get_current (p0);
447  vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
448 
450  ip4_map_vtcfl (ip40, p0);
451  ip6h0->payload_length = ip40->length;
452  ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
453  ip6h0->hop_limit = 0x40;
454  ip6h0->src_address = d0->ip6_src;
455  ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60);
456  ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60);
457 
458  /*
459  * Determine next node. Can be one of:
460  * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
461  */
462  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
463  {
464  if (PREDICT_FALSE
465  (d0->mtu
466  && (clib_net_to_host_u16 (ip6h0->payload_length) +
467  sizeof (*ip6h0) > d0->mtu)))
468  {
469  next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0);
470  }
471  else
472  {
473  next0 =
475  ip40) ?
476  IP4_MAP_NEXT_IP6_REWRITE : next0;
478  thread_index,
479  map_domain_index0, 1,
480  clib_net_to_host_u16
481  (ip6h0->payload_length) +
482  40);
483  }
484  }
485  else
486  {
487  next0 = IP4_MAP_NEXT_DROP;
488  }
489 
490  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
491  {
492  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
493  tr->map_domain_index = map_domain_index0;
494  tr->port = port0;
495  }
496 
497  p0->error = error_node->errors[error0];
498  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
499  n_left_to_next, pi0, next0);
500  }
501  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
502  }
503 
504  return frame->n_vectors;
505 }
506 
507 /*
508  * ip4_map_reass
509  */
510 static uword
512  vlib_node_runtime_t * node, vlib_frame_t * frame)
513 {
514  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
515  vlib_node_runtime_t *error_node =
517  from = vlib_frame_vector_args (frame);
518  n_left_from = frame->n_vectors;
519  next_index = node->cached_next_index;
520  map_main_t *mm = &map_main;
522  u32 thread_index = vm->thread_index;
523  u32 *fragments_to_drop = NULL;
524  u32 *fragments_to_loopback = NULL;
525 
526  while (n_left_from > 0)
527  {
528  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
529 
530  while (n_left_from > 0 && n_left_to_next > 0)
531  {
532  u32 pi0;
533  vlib_buffer_t *p0;
534  map_domain_t *d0;
535  u8 error0 = MAP_ERROR_NONE;
536  ip4_header_t *ip40;
537  i32 port0 = 0;
538  ip6_header_t *ip60;
540  u32 map_domain_index0;
541  u8 cached = 0;
542 
543  pi0 = to_next[0] = from[0];
544  from += 1;
545  n_left_from -= 1;
546  to_next += 1;
547  n_left_to_next -= 1;
548 
549  p0 = vlib_get_buffer (vm, pi0);
550  ip60 = vlib_buffer_get_current (p0);
551  ip40 = (ip4_header_t *) (ip60 + 1);
552  map_domain_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
553  d0 = ip4_map_get_domain (map_domain_index0);
554 
557  ip40->dst_address.as_u32,
558  ip40->fragment_id,
559  ip40->protocol,
560  &fragments_to_drop);
561  if (PREDICT_FALSE (!r))
562  {
563  // Could not create a caching entry
564  error0 = MAP_ERROR_FRAGMENT_MEMORY;
565  }
566  else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
567  {
568  if (r->port >= 0)
569  {
570  // We know the port already
571  port0 = r->port;
572  }
573  else if (map_ip4_reass_add_fragment (r, pi0))
574  {
575  // Not enough space for caching
576  error0 = MAP_ERROR_FRAGMENT_MEMORY;
577  map_ip4_reass_free (r, &fragments_to_drop);
578  }
579  else
580  {
581  cached = 1;
582  }
583  }
584  else if ((port0 = ip4_get_port (ip40, 0)) == 0)
585  {
586  // Could not find port. We'll free the reassembly.
587  error0 = MAP_ERROR_BAD_PROTOCOL;
588  port0 = 0;
589  map_ip4_reass_free (r, &fragments_to_drop);
590  }
591  else
592  {
593  r->port = port0;
594  map_ip4_reass_get_fragments (r, &fragments_to_loopback);
595  }
596 
597 #ifdef MAP_IP4_REASS_COUNT_BYTES
598  if (!cached && r)
599  {
600  r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
601  if (!ip4_get_fragment_more (ip40))
602  r->expected_total =
603  ip4_get_fragment_offset (ip40) * 8 +
604  clib_host_to_net_u16 (ip40->length) - 20;
605  if (r->forwarded >= r->expected_total)
606  map_ip4_reass_free (r, &fragments_to_drop);
607  }
608 #endif
609 
611 
612  // NOTE: Most operations have already been performed by ip4_map
613  // All we need is the right destination address
614  ip60->dst_address.as_u64[0] =
615  map_get_pfx_net (d0, ip40->dst_address.as_u32, port0);
616  ip60->dst_address.as_u64[1] =
617  map_get_sfx_net (d0, ip40->dst_address.as_u32, port0);
618 
619  if (PREDICT_FALSE
620  (d0->mtu
621  && (clib_net_to_host_u16 (ip60->payload_length) +
622  sizeof (*ip60) > d0->mtu)))
623  {
624  vnet_buffer (p0)->ip_frag.header_offset = sizeof (*ip60);
625  vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP6_LOOKUP;
626  vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
627  vnet_buffer (p0)->ip_frag.flags = IP_FRAG_FLAG_IP6_HEADER;
629  }
630 
631  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
632  {
634  vlib_add_trace (vm, node, p0, sizeof (*tr));
635  tr->map_domain_index = map_domain_index0;
636  tr->port = port0;
637  tr->cached = cached;
638  }
639 
640  if (cached)
641  {
642  //Dequeue the packet
643  n_left_to_next++;
644  to_next--;
645  }
646  else
647  {
648  if (error0 == MAP_ERROR_NONE)
650  thread_index,
651  map_domain_index0, 1,
652  clib_net_to_host_u16
653  (ip60->payload_length) + 40);
654  next0 =
655  (error0 == MAP_ERROR_NONE) ? next0 : IP4_MAP_REASS_NEXT_DROP;
656  p0->error = error_node->errors[error0];
657  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
658  n_left_to_next, pi0, next0);
659  }
660 
661  //Loopback when we reach the end of the inpu vector
662  if (n_left_from == 0 && vec_len (fragments_to_loopback))
663  {
664  from = vlib_frame_vector_args (frame);
665  u32 len = vec_len (fragments_to_loopback);
666  if (len <= VLIB_FRAME_SIZE)
667  {
668  clib_memcpy (from, fragments_to_loopback,
669  sizeof (u32) * len);
670  n_left_from = len;
671  vec_reset_length (fragments_to_loopback);
672  }
673  else
674  {
675  clib_memcpy (from,
676  fragments_to_loopback + (len -
678  sizeof (u32) * VLIB_FRAME_SIZE);
679  n_left_from = VLIB_FRAME_SIZE;
680  _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
681  }
682  }
683  }
684  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
685  }
686 
687  map_send_all_to_node (vm, fragments_to_drop, node,
688  &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
690 
691  vec_free (fragments_to_drop);
692  vec_free (fragments_to_loopback);
693  return frame->n_vectors;
694 }
695 
696 static char *map_error_strings[] = {
697 #define _(sym,string) string,
699 #undef _
700 };
701 
702 /* *INDENT-OFF* */
704  .function = ip4_map,
705  .name = "ip4-map",
706  .vector_size = sizeof(u32),
707  .format_trace = format_map_trace,
708  .type = VLIB_NODE_TYPE_INTERNAL,
709 
710  .n_errors = MAP_N_ERROR,
711  .error_strings = map_error_strings,
712 
713  .n_next_nodes = IP4_MAP_N_NEXT,
714  .next_nodes = {
715  [IP4_MAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
716 #ifdef MAP_SKIP_IP6_LOOKUP
717  [IP4_MAP_NEXT_IP6_REWRITE] = "ip6-load-balance",
718 #endif
719  [IP4_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
720  [IP4_MAP_NEXT_IP6_FRAGMENT] = "ip6-frag",
721  [IP4_MAP_NEXT_REASS] = "ip4-map-reass",
722  [IP4_MAP_NEXT_ICMP_ERROR] = "ip4-icmp-error",
723  [IP4_MAP_NEXT_DROP] = "error-drop",
724  },
725 };
726 /* *INDENT-ON* */
727 
728 /* *INDENT-OFF* */
730  .function = ip4_map_reass,
731  .name = "ip4-map-reass",
732  .vector_size = sizeof(u32),
733  .format_trace = format_ip4_map_reass_trace,
734  .type = VLIB_NODE_TYPE_INTERNAL,
735 
736  .n_errors = MAP_N_ERROR,
737  .error_strings = map_error_strings,
738 
739  .n_next_nodes = IP4_MAP_REASS_N_NEXT,
740  .next_nodes = {
741  [IP4_MAP_REASS_NEXT_IP6_LOOKUP] = "ip6-lookup",
742  [IP4_MAP_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
743  [IP4_MAP_REASS_NEXT_DROP] = "error-drop",
744  },
745 };
746 /* *INDENT-ON* */
747 
748 /*
749  * fd.io coding-style-patch-verification: ON
750  *
751  * Local Variables:
752  * eval: (c-set-style "gnu")
753  * End:
754  */
#define map_ip4_reass_lock()
Definition: map.h:477
u8 psid_length
Definition: map.h:97
#define CLIB_UNUSED(x)
Definition: clib.h:79
u8 * format_ip4_map_reass_trace(u8 *s, va_list *args)
Definition: ip4_map.c:56
map_main_t map_main
Definition: map.c:27
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:213
static_always_inline u64 map_get_pfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:360
ip4_address_t src_address
Definition: ip4_packet.h:169
void ip_frag_set_vnet_buffer(vlib_buffer_t *b, u16 offset, u16 mtu, u8 next_index, u8 flags)
Definition: ip_frag.c:190
#define PREDICT_TRUE(x)
Definition: clib.h:106
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
dpo_id_t dpo
The Load-balance object index to use to forward.
Definition: map.h:220
#define NULL
Definition: clib.h:55
u8 tc
Definition: map.h:243
u32 thread_index
Definition: main.h:179
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
vlib_node_registration_t ip4_map_node
(constructor) VLIB_REGISTER_NODE (ip4_map_node)
Definition: ip4_map.c:703
u16 flags_and_fragment_offset
Definition: ip4_packet.h:150
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:451
ip6_address_t src_address
Definition: ip6_packet.h:347
unsigned char u8
Definition: types.h:56
IPv4 to IPv6 translation.
u16 port
Definition: map.h:338
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
bool tc_copy
Definition: map.h:244
#define static_always_inline
Definition: clib.h:93
ip4_address_t dst_address
Definition: ip4_packet.h:169
vlib_combined_counter_main_t * domain_counters
Definition: map.h:236
static int ip4_get_fragment_offset(ip4_header_t *i)
Definition: ip4_packet.h:199
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:184
static_always_inline void map_send_all_to_node(vlib_main_t *vm, u32 *pi_vector, vlib_node_runtime_t *node, vlib_error_t *error, u32 next)
Definition: map.h:566
int map_ip4_reass_add_fragment(map_ip4_reass_t *r, u32 pi)
Definition: map.c:1597
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:364
static u16 ip4_get_port(ip4_header_t *ip, u8 sender)
Get TCP/UDP port number or ICMP id from IPv4 packet.
Definition: ip4_to_ip6.h:51
static u32 ip4_map_fragment(vlib_buffer_t *b, u16 mtu, bool df, u8 *error)
Definition: ip4_map.c:164
static uword ip4_map_reass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map.c:511
bool frag_ignore_df
Definition: map.h:281
static_always_inline u64 map_get_pfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:374
unsigned short u16
Definition: types.h:57
ip4_map_reass_next_t
Definition: ip4_map.c:40
ip4_map_next_e
Definition: ip4_map.c:26
static_always_inline u64 map_get_sfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:406
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:202
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1516
#define PREDICT_FALSE(x)
Definition: clib.h:105
static_always_inline u32 ip4_map_vtcfl(ip4_header_t *ip4, vlib_buffer_t *p)
Definition: ip4_map.c:116
static_always_inline map_domain_t * ip4_map_get_domain(u32 mdi)
Definition: map.h:425
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
u16 expected_total
Definition: map.h:134
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:135
vlib_node_registration_t ip4_map_reass_node
(constructor) VLIB_REGISTER_NODE (ip4_map_reass_node)
Definition: ip4_map.c:24
map_main_pre_resolved_t pre_resolved[FIB_PROTOCOL_MAX]
Pre-resolvd per-protocol global next-hops.
Definition: map.c:350
void map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop)
Definition: map.c:1473
static uword ip4_map(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map.c:196
static_always_inline bool ip4_map_ip6_lookup_bypass(vlib_buffer_t *p0, ip4_header_t *ip)
Definition: ip4_map.c:128
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:153
u16 n_vectors
Definition: node.h:380
static_always_inline void map_ip4_reass_get_fragments(map_ip4_reass_t *r, u32 **pi)
Definition: map.h:481
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:77
vlib_main_t * vm
Definition: buffer.c:294
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:339
void icmp4_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
Definition: icmp4.c:431
u8 psid_offset
Definition: map.h:96
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
u16 forwarded
Definition: map.h:135
#define clib_memcpy(a, b, c)
Definition: string.h:75
static void ip4_map_decrement_ttl(ip4_header_t *ip, u8 *error)
Definition: ip4_map.c:145
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
#define foreach_map_error
Definition: map.h:308
ip6_address_t ip6_src
Definition: map.h:85
static char * map_error_strings[]
Definition: ip4_map.c:696
signed int i32
Definition: types.h:81
#define IP_FRAG_FLAG_IP6_HEADER
Definition: ip_frag.h:41
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:492
#define ASSERT(truth)
bool frag_inner
Definition: map.h:280
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:215
#define map_ip4_reass_unlock()
Definition: map.h:478
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1436
static int ip4_get_fragment_more(ip4_header_t *i)
Definition: ip4_packet.h:205
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
struct _vlib_node_registration vlib_node_registration_t
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:334
Definition: defs.h:47
u16 mtu
Definition: map.h:91
u16 payload_length
Definition: ip6_packet.h:338
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:184
#define FIB_NODE_INDEX_INVALID
Definition: fib_types.h:31
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
i32 port
Definition: map.h:137
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
A collection of combined counters.
Definition: counter.h:181
#define vnet_buffer(b)
Definition: buffer.h:360
u8 data[0]
Packet data.
Definition: buffer.h:172
#define IP4_HEADER_FLAG_DONT_FRAGMENT
Definition: ip4_packet.h:152
static_always_inline u64 map_get_sfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:384
u32 map_domain_index
Definition: map.h:337
u8 ip_version_and_header_length
Definition: ip4_packet.h:137
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:111
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:246
static_always_inline u16 ip4_map_port_and_security_check(map_domain_t *d, ip4_header_t *ip, u32 *next, u8 *error)
Definition: ip4_map.c:67
ip6_address_t dst_address
Definition: ip6_packet.h:347