FD.io VPP  v16.06
Vector Packet Processing
ip6_map.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include "../ip/ip_frag.h"
18 
21 #ifdef MAP_SKIP_IP6_LOOKUP
23 #endif
32 };
33 
38 };
39 
45 };
46 
51 };
52 
56 
57 typedef struct {
62 
63 u8 *
65 {
66  CLIB_UNUSED(vlib_main_t *vm) = va_arg (*args, vlib_main_t *);
67  CLIB_UNUSED(vlib_node_t *node) = va_arg (*args, vlib_node_t *);
69  return format(s, "MAP domain index: %d L4 port: %u Status: %s", t->map_domain_index,
70  t->port, t->cached?"cached":"forwarded");
71 }
72 
73 typedef struct {
78 
79 u8 *
81 {
82  CLIB_UNUSED(vlib_main_t *vm) = va_arg (*args, vlib_main_t *);
83  CLIB_UNUSED(vlib_node_t *node) = va_arg (*args, vlib_node_t *);
85  return format(s, "Offset: %d Fragment length: %d Status: %s", t->offset, t->frag_len, t->out?"out":"in");
86 }
87 
88 /*
89  * ip6_map_sec_check
90  */
93 {
94  u16 sp4 = clib_net_to_host_u16(port);
95  u32 sa4 = clib_net_to_host_u32(ip4->src_address.as_u32);
96  u64 sal6 = map_get_pfx(d, sa4, sp4);
97  u64 sar6 = map_get_sfx(d, sa4, sp4);
98 
99  if (PREDICT_FALSE(sal6 != clib_net_to_host_u64(ip6->src_address.as_u64[0]) ||
100  sar6 != clib_net_to_host_u64(ip6->src_address.as_u64[1])))
101  return (false);
102  return (true);
103 }
104 
107 {
108  map_main_t *mm = &map_main;
109  if (d->ea_bits_len || d->rules) {
110  if (d->psid_length > 0) {
111  if (!ip4_is_fragment(ip4)) {
112  u16 port = ip4_map_get_port(ip4, MAP_SENDER);
113  if (port) {
114  if (mm->sec_check)
115  *error = ip6_map_sec_check(d, port, ip4, ip6) ? MAP_ERROR_NONE : MAP_ERROR_DECAP_SEC_CHECK;
116  } else {
117  *error = MAP_ERROR_BAD_PROTOCOL;
118  }
119  } else {
120  *next = mm->sec_check_frag ? IP6_MAP_NEXT_IP4_REASS : *next;
121  }
122  }
123  }
124 }
125 
128 {
129 #ifdef MAP_SKIP_IP6_LOOKUP
130  map_main_t *mm = &map_main;
131  u32 adj_index0 = mm->adj4_index;
132  if (adj_index0 > 0) {
134  ip_adjacency_t *adj = ip_get_adjacency(lm4, mm->adj4_index);
135  if (adj->n_adj > 1) {
137  adj_index0 += (hash_c0 & (adj->n_adj - 1));
138  }
139  vnet_buffer(p0)->ip.adj_index[VLIB_TX] = adj_index0;
140  return (true);
141  }
142 #endif
143  return (false);
144 }
145 
146 /*
147  * ip6_map
148  */
149 static uword
151  vlib_node_runtime_t *node,
152  vlib_frame_t *frame)
153 {
154  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
155  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip6_map_node.index);
156  map_main_t *mm = &map_main;
158  u32 cpu_index = os_get_cpu_number();
159 
160  from = vlib_frame_vector_args(frame);
161  n_left_from = frame->n_vectors;
162  next_index = node->cached_next_index;
163  while (n_left_from > 0) {
164  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
165 
166  /* Dual loop */
167  while (n_left_from >= 4 && n_left_to_next >= 2) {
168  u32 pi0, pi1;
169  vlib_buffer_t *p0, *p1;
170  u8 error0 = MAP_ERROR_NONE;
171  u8 error1 = MAP_ERROR_NONE;
172  map_domain_t *d0 = 0, *d1 = 0;
173  ip4_header_t *ip40, *ip41;
174  ip6_header_t *ip60, *ip61;
175  u16 port0 = 0, port1 = 0;
176  u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
179 
180  /* Prefetch next iteration. */
181  {
182  vlib_buffer_t *p2, *p3;
183 
184  p2 = vlib_get_buffer(vm, from[2]);
185  p3 = vlib_get_buffer(vm, from[3]);
186 
187  vlib_prefetch_buffer_header(p2, LOAD);
188  vlib_prefetch_buffer_header(p3, LOAD);
189 
190  /* IPv6 + IPv4 header + 8 bytes of ULP */
191  CLIB_PREFETCH(p2->data, 68, LOAD);
192  CLIB_PREFETCH(p3->data, 68, LOAD);
193  }
194 
195  pi0 = to_next[0] = from[0];
196  pi1 = to_next[1] = from[1];
197  from += 2;
198  n_left_from -= 2;
199  to_next +=2;
200  n_left_to_next -= 2;
201 
202  p0 = vlib_get_buffer(vm, pi0);
203  p1 = vlib_get_buffer(vm, pi1);
204  ip60 = vlib_buffer_get_current(p0);
205  ip61 = vlib_buffer_get_current(p1);
206  vlib_buffer_advance(p0, sizeof(ip6_header_t));
207  vlib_buffer_advance(p1, sizeof(ip6_header_t));
208  ip40 = vlib_buffer_get_current(p0);
209  ip41 = vlib_buffer_get_current(p1);
210 
211  /*
212  * Encapsulated IPv4 packet
213  * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
214  * - Lookup/Rewrite or Fragment node in case of packet > MTU
215  * Fragmented IPv6 packet
216  * ICMP IPv6 packet
217  * - Error -> Pass to ICMPv6/ICMPv4 relay
218  * - Info -> Pass to IPv6 local
219  * Anything else -> drop
220  */
221  if (PREDICT_TRUE(ip60->protocol == IP_PROTOCOL_IP_IN_IP && clib_net_to_host_u16(ip60->payload_length) > 20)) {
222  d0 = ip6_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], (ip4_address_t *)&ip40->src_address.as_u32,
223  &map_domain_index0, &error0);
224  } else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
225  clib_net_to_host_u16(ip60->payload_length) > sizeof(icmp46_header_t)) {
226  icmp46_header_t *icmp = (void *)(ip60 + 1);
227  next0 = (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply) ?
229  } else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION) {
230  next0 = IP6_MAP_NEXT_IP6_REASS;
231  } else {
232  error0 = MAP_ERROR_BAD_PROTOCOL;
233  }
234  if (PREDICT_TRUE(ip61->protocol == IP_PROTOCOL_IP_IN_IP && clib_net_to_host_u16(ip61->payload_length) > 20)) {
235  d1 = ip6_map_get_domain(vnet_buffer(p1)->ip.adj_index[VLIB_TX], (ip4_address_t *)&ip41->src_address.as_u32,
236  &map_domain_index1, &error1);
237  } else if (ip61->protocol == IP_PROTOCOL_ICMP6 &&
238  clib_net_to_host_u16(ip61->payload_length) > sizeof(icmp46_header_t)) {
239  icmp46_header_t *icmp = (void *)(ip61 + 1);
240  next1 = (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply) ?
242  } else if (ip61->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION) {
243  next1 = IP6_MAP_NEXT_IP6_REASS;
244  } else {
245  error1 = MAP_ERROR_BAD_PROTOCOL;
246  }
247 
248  if (d0) {
249  /* MAP inbound security check */
250  ip6_map_security_check(d0, ip40, ip60, &next0, &error0);
251 
252  if (PREDICT_TRUE(error0 == MAP_ERROR_NONE &&
253  next0 == IP6_MAP_NEXT_IP4_LOOKUP)) {
254  if (PREDICT_FALSE(d0->mtu && (clib_host_to_net_u16(ip40->length) > d0->mtu))) {
255  vnet_buffer(p0)->ip_frag.header_offset = 0;
256  vnet_buffer(p0)->ip_frag.flags = 0;
257  vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
258  vnet_buffer(p0)->ip_frag.mtu = d0->mtu;
260  } else {
261  next0 = ip6_map_ip4_lookup_bypass(p0, ip40) ? IP6_MAP_NEXT_IP4_REWRITE : next0;
262  }
263  vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index, map_domain_index0, 1,
264  clib_net_to_host_u16(ip40->length));
265  }
266  }
267  if (d1) {
268  /* MAP inbound security check */
269  ip6_map_security_check(d1, ip41, ip61, &next1, &error1);
270 
271  if (PREDICT_TRUE(error1 == MAP_ERROR_NONE &&
272  next1 == IP6_MAP_NEXT_IP4_LOOKUP)) {
273  if (PREDICT_FALSE(d1->mtu && (clib_host_to_net_u16(ip41->length) > d1->mtu))) {
274  vnet_buffer(p1)->ip_frag.header_offset = 0;
275  vnet_buffer(p1)->ip_frag.flags = 0;
276  vnet_buffer(p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
277  vnet_buffer(p1)->ip_frag.mtu = d0->mtu;
279  } else {
280  next1 = ip6_map_ip4_lookup_bypass(p1, ip41) ? IP6_MAP_NEXT_IP4_REWRITE : next1;
281  }
282  vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index, map_domain_index1, 1,
283  clib_net_to_host_u16(ip41->length));
284  }
285  }
286 
288  map_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
289  tr->map_domain_index = map_domain_index0;
290  tr->port = port0;
291  }
292 
294  map_trace_t *tr = vlib_add_trace(vm, node, p1, sizeof(*tr));
295  tr->map_domain_index = map_domain_index1;
296  tr->port = port1;
297  }
298 
299  if (error0 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled) {
300  /* Set ICMP parameters */
301  vlib_buffer_advance(p0, -sizeof(ip6_header_t));
302  icmp6_error_set_vnet_buffer(p0, ICMP6_destination_unreachable,
303  ICMP6_destination_unreachable_source_address_failed_policy, 0);
304  next0 = IP6_MAP_NEXT_ICMP;
305  } else {
306  next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
307  }
308 
309  if (error1 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled) {
310  /* Set ICMP parameters */
311  vlib_buffer_advance(p1, -sizeof(ip6_header_t));
312  icmp6_error_set_vnet_buffer(p1, ICMP6_destination_unreachable,
313  ICMP6_destination_unreachable_source_address_failed_policy, 0);
314  next1 = IP6_MAP_NEXT_ICMP;
315  } else {
316  next1 = (error1 == MAP_ERROR_NONE) ? next1 : IP6_MAP_NEXT_DROP;
317  }
318 
319  /* Reset packet */
320  if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
321  vlib_buffer_advance(p0, -sizeof(ip6_header_t));
322  if (next1 == IP6_MAP_NEXT_IP6_LOCAL)
323  vlib_buffer_advance(p1, -sizeof(ip6_header_t));
324 
325  p0->error = error_node->errors[error0];
326  p1->error = error_node->errors[error1];
327  vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, pi0, pi1, next0, next1);
328  }
329 
330  /* Single loop */
331  while (n_left_from > 0 && n_left_to_next > 0) {
332  u32 pi0;
333  vlib_buffer_t *p0;
334  u8 error0 = MAP_ERROR_NONE;
335  map_domain_t *d0 = 0;
336  ip4_header_t *ip40;
337  ip6_header_t *ip60;
338  i32 port0 = 0;
339  u32 map_domain_index0 = ~0;
341 
342  pi0 = to_next[0] = from[0];
343  from += 1;
344  n_left_from -= 1;
345  to_next +=1;
346  n_left_to_next -= 1;
347 
348  p0 = vlib_get_buffer(vm, pi0);
349  ip60 = vlib_buffer_get_current(p0);
350  vlib_buffer_advance(p0, sizeof(ip6_header_t));
351  ip40 = vlib_buffer_get_current(p0);
352 
353  /*
354  * Encapsulated IPv4 packet
355  * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
356  * - Lookup/Rewrite or Fragment node in case of packet > MTU
357  * Fragmented IPv6 packet
358  * ICMP IPv6 packet
359  * - Error -> Pass to ICMPv6/ICMPv4 relay
360  * - Info -> Pass to IPv6 local
361  * Anything else -> drop
362  */
363  if (PREDICT_TRUE(ip60->protocol == IP_PROTOCOL_IP_IN_IP && clib_net_to_host_u16(ip60->payload_length) > 20)) {
364  d0 = ip6_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], (ip4_address_t *)&ip40->src_address.as_u32,
365  &map_domain_index0, &error0);
366  } else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
367  clib_net_to_host_u16(ip60->payload_length) > sizeof(icmp46_header_t)) {
368  icmp46_header_t *icmp = (void *)(ip60 + 1);
369  next0 = (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply) ?
371  } else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION &&
372  (((ip6_frag_hdr_t *)(ip60+1))->next_hdr == IP_PROTOCOL_IP_IN_IP)) {
373  next0 = IP6_MAP_NEXT_IP6_REASS;
374  } else {
375  error0 = MAP_ERROR_BAD_PROTOCOL;
376  }
377 
378  if (d0) {
379  /* MAP inbound security check */
380  ip6_map_security_check(d0, ip40, ip60, &next0, &error0);
381 
382  if (PREDICT_TRUE(error0 == MAP_ERROR_NONE &&
383  next0 == IP6_MAP_NEXT_IP4_LOOKUP)) {
384  if (PREDICT_FALSE(d0->mtu && (clib_host_to_net_u16(ip40->length) > d0->mtu))) {
385  vnet_buffer(p0)->ip_frag.header_offset = 0;
386  vnet_buffer(p0)->ip_frag.flags = 0;
387  vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
388  vnet_buffer(p0)->ip_frag.mtu = d0->mtu;
390  } else {
391  next0 = ip6_map_ip4_lookup_bypass(p0, ip40) ? IP6_MAP_NEXT_IP4_REWRITE : next0;
392  }
393  vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index, map_domain_index0, 1,
394  clib_net_to_host_u16(ip40->length));
395  }
396  }
397 
399  map_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
400  tr->map_domain_index = map_domain_index0;
401  tr->port = (u16)port0;
402  }
403 
404  if (mm->icmp6_enabled &&
405  (error0 == MAP_ERROR_DECAP_SEC_CHECK || error0 == MAP_ERROR_NO_DOMAIN)) {
406  /* Set ICMP parameters */
407  vlib_buffer_advance(p0, -sizeof(ip6_header_t));
408  icmp6_error_set_vnet_buffer(p0, ICMP6_destination_unreachable,
409  ICMP6_destination_unreachable_source_address_failed_policy, 0);
410  next0 = IP6_MAP_NEXT_ICMP;
411  } else {
412  next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
413  }
414 
415  /* Reset packet */
416  if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
417  vlib_buffer_advance(p0, -sizeof(ip6_header_t));
418 
419  p0->error = error_node->errors[error0];
420  vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
421  }
422  vlib_put_next_frame(vm, node, next_index, n_left_to_next);
423  }
424 
425  return frame->n_vectors;
426 }
427 
428 
431  u32 **fragments_ready, u32 **fragments_to_drop)
432 {
433  ip4_header_t *ip40;
434  ip6_header_t *ip60;
435  ip6_frag_hdr_t *frag0;
436  vlib_buffer_t *p0;
437 
439  return;
440 
441  //The IP header is here, we need to check for packets
442  //that can be forwarded
443  int i;
445  if (r->fragments[i].pi == ~0 ||
446  ((!r->fragments[i].next_data_len) && (r->fragments[i].next_data_offset != (0xffff))))
447  continue;
448 
449  p0 = vlib_get_buffer(vm, r->fragments[i].pi);
450  ip60 = vlib_buffer_get_current(p0);
451  frag0 = (ip6_frag_hdr_t *)(ip60 + 1);
452  ip40 = (ip4_header_t *)(frag0 + 1);
453 
454  if (ip6_frag_hdr_offset(frag0)) {
455  //Not first fragment, add the IPv4 header
456  clib_memcpy(ip40, &r->ip4_header, 20);
457  }
458 
459 #ifdef MAP_IP6_REASS_COUNT_BYTES
460  r->forwarded += clib_net_to_host_u16(ip60->payload_length) - sizeof(*frag0);
461 #endif
462 
463  if (ip6_frag_hdr_more(frag0)) {
464  //Not last fragment, we copy end of next
466  p0->current_length += 20;
467  ip60->payload_length = u16_net_add(ip60->payload_length, 20);
468  }
469 
470  if (!ip4_is_fragment(ip40)) {
471  ip40->fragment_id = frag_id_6to4(frag0->identification);
472  ip40->flags_and_fragment_offset = clib_host_to_net_u16(ip6_frag_hdr_offset(frag0));
473  } else {
474  ip40->flags_and_fragment_offset = clib_host_to_net_u16(ip4_get_fragment_offset(ip40) + ip6_frag_hdr_offset(frag0));
475  }
476 
477  if (ip6_frag_hdr_more(frag0))
478  ip40->flags_and_fragment_offset |= clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS);
479 
480  ip40->length = clib_host_to_net_u16(p0->current_length - sizeof(*ip60) - sizeof(*frag0));
481  ip40->checksum = ip4_header_checksum(ip40);
482 
484  map_ip6_map_ip6_reass_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
485  tr->offset = ip4_get_fragment_offset(ip40);
486  tr->frag_len = clib_net_to_host_u16(ip40->length) - sizeof(*ip40);
487  tr->out = 1;
488  }
489 
490  vec_add1(*fragments_ready, r->fragments[i].pi);
491  r->fragments[i].pi = ~0;
492  r->fragments[i].next_data_len = 0;
493  r->fragments[i].next_data_offset = 0;
495 
496  //TODO: Best solution would be that ip6_map handles extension headers
497  // and ignores atomic fragment. But in the meantime, let's just copy the header.
498 
499  u8 protocol = frag0->next_hdr;
500  memmove(u8_ptr_add(ip40, - sizeof(*ip60)), ip60, sizeof(*ip60));
501  ((ip6_header_t *)u8_ptr_add(ip40, - sizeof(*ip60)))->protocol = protocol;
502  vlib_buffer_advance(p0, sizeof(*frag0));
503  }
504 }
505 
506 void
508 {
509  vlib_main_t *vm = vlib_get_main();
512 }
513 
514 void
516 {
517  vlib_main_t *vm = vlib_get_main();
520 }
521 
522 /*
523  * ip6_reass
524  * TODO: We should count the number of successfully
525  * transmitted fragment bytes and compare that to the last fragment
526  * offset such that we can free the reassembly structure when all fragments
527  * have been forwarded.
528  */
529 static uword
531  vlib_node_runtime_t *node,
532  vlib_frame_t *frame)
533 {
534  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
536  u32 *fragments_to_drop = NULL;
537  u32 *fragments_ready = NULL;
538 
539  from = vlib_frame_vector_args(frame);
540  n_left_from = frame->n_vectors;
541  next_index = node->cached_next_index;
542  while (n_left_from > 0) {
543  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
544 
545  /* Single loop */
546  while (n_left_from > 0 && n_left_to_next > 0) {
547  u32 pi0;
548  vlib_buffer_t *p0;
549  u8 error0 = MAP_ERROR_NONE;
550  ip6_header_t *ip60;
551  ip6_frag_hdr_t *frag0;
552  u16 offset;
553  u16 next_offset;
554  u16 frag_len;
555 
556  pi0 = to_next[0] = from[0];
557  from += 1;
558  n_left_from -= 1;
559  to_next +=1;
560  n_left_to_next -= 1;
561 
562  p0 = vlib_get_buffer(vm, pi0);
563  ip60 = vlib_buffer_get_current(p0);
564  frag0 = (ip6_frag_hdr_t *)(ip60 + 1);
565  offset = clib_host_to_net_u16(frag0->fragment_offset_and_more) & (~7);
566  frag_len = clib_net_to_host_u16(ip60->payload_length) - sizeof(*frag0);
567  next_offset = ip6_frag_hdr_more(frag0) ? (offset + frag_len) : (0xffff);
568 
569  //FIXME: Support other extension headers, maybe
570 
572  map_ip6_map_ip6_reass_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
573  tr->offset = offset;
574  tr->frag_len = frag_len;
575  tr->out = 0;
576  }
577 
580  frag0->identification, frag0->next_hdr, &fragments_to_drop);
581  //FIXME: Use better error codes
582  if (PREDICT_FALSE(!r)) {
583  // Could not create a caching entry
584  error0 = MAP_ERROR_FRAGMENT_MEMORY;
585  } else if (PREDICT_FALSE((frag_len <= 20 &&
586  (ip6_frag_hdr_more(frag0) || (!offset))))) {
587  //Very small fragment are restricted to the last one and
588  //can't be the first one
589  error0 = MAP_ERROR_FRAGMENT_MALFORMED;
590  } else if (map_ip6_reass_add_fragment(r, pi0, offset, next_offset, (u8 *)(frag0 + 1), frag_len)) {
591  map_ip6_reass_free(r, &fragments_to_drop);
592  error0 = MAP_ERROR_FRAGMENT_MEMORY;
593  } else {
594 #ifdef MAP_IP6_REASS_COUNT_BYTES
595  if (!ip6_frag_hdr_more(frag0))
596  r->expected_total = offset + frag_len;
597 #endif
598  ip6_map_ip6_reass_prepare(vm, node, r, &fragments_ready, &fragments_to_drop);
599 #ifdef MAP_IP6_REASS_COUNT_BYTES
600  if(r->forwarded >= r->expected_total)
601  map_ip6_reass_free(r, &fragments_to_drop);
602 #endif
603  }
605 
606  if (error0 == MAP_ERROR_NONE) {
607  if (frag_len > 20) {
608  //Dequeue the packet
609  n_left_to_next++;
610  to_next--;
611  } else {
612  //All data from that packet was copied no need to keep it, but this is not an error
613  p0->error = error_node->errors[MAP_ERROR_NONE];
614  vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, IP6_MAP_IP6_REASS_NEXT_DROP);
615  }
616  } else {
617  p0->error = error_node->errors[error0];
618  vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, IP6_MAP_IP6_REASS_NEXT_DROP);
619  }
620  }
621  vlib_put_next_frame(vm, node, next_index, n_left_to_next);
622  }
623 
624  map_send_all_to_node(vm, fragments_ready, node,
625  &error_node->errors[MAP_ERROR_NONE],
627  map_send_all_to_node(vm, fragments_to_drop, node,
628  &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
630 
631  vec_free(fragments_to_drop);
632  vec_free(fragments_ready);
633  return frame->n_vectors;
634 }
635 
636 /*
637  * ip6_ip4_virt_reass
638  */
639 static uword
641  vlib_node_runtime_t *node,
642  vlib_frame_t *frame)
643 {
644  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
646  map_main_t *mm = &map_main;
648  u32 cpu_index = os_get_cpu_number();
649  u32 *fragments_to_drop = NULL;
650  u32 *fragments_to_loopback = NULL;
651 
652  from = vlib_frame_vector_args(frame);
653  n_left_from = frame->n_vectors;
654  next_index = node->cached_next_index;
655  while (n_left_from > 0) {
656  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
657 
658  /* Single loop */
659  while (n_left_from > 0 && n_left_to_next > 0) {
660  u32 pi0;
661  vlib_buffer_t *p0;
662  u8 error0 = MAP_ERROR_NONE;
663  map_domain_t *d0;
664  ip4_header_t *ip40;
665  ip6_header_t *ip60;
666  i32 port0 = 0;
667  u32 map_domain_index0 = ~0;
669  u8 cached = 0;
670 
671  pi0 = to_next[0] = from[0];
672  from += 1;
673  n_left_from -= 1;
674  to_next +=1;
675  n_left_to_next -= 1;
676 
677  p0 = vlib_get_buffer(vm, pi0);
678  ip40 = vlib_buffer_get_current(p0);
679  ip60 = ((ip6_header_t *)ip40) - 1;
680 
681  d0 = ip6_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], (ip4_address_t *)&ip40->src_address.as_u32,
682  &map_domain_index0, &error0);
683 
685  //This node only deals with fragmented ip4
687  ip40->fragment_id, ip40->protocol, &fragments_to_drop);
688  if (PREDICT_FALSE(!r)) {
689  // Could not create a caching entry
690  error0 = MAP_ERROR_FRAGMENT_MEMORY;
691  } else if (PREDICT_TRUE(ip4_get_fragment_offset(ip40))) {
692  // This is a fragment
693  if (r->port >= 0) {
694  // We know the port already
695  port0 = r->port;
696  } else if (map_ip4_reass_add_fragment(r, pi0)) {
697  // Not enough space for caching
698  error0 = MAP_ERROR_FRAGMENT_MEMORY;
699  map_ip4_reass_free(r, &fragments_to_drop);
700  } else {
701  cached = 1;
702  }
703  } else if ((port0 = ip4_get_port(ip40, MAP_SENDER, p0->current_length)) < 0) {
704  // Could not find port from first fragment. Stop reassembling.
705  error0 = MAP_ERROR_BAD_PROTOCOL;
706  port0 = 0;
707  map_ip4_reass_free(r, &fragments_to_drop);
708  } else {
709  // Found port. Remember it and loopback saved fragments
710  r->port = port0;
711  map_ip4_reass_get_fragments(r, &fragments_to_loopback);
712  }
713 
714 #ifdef MAP_IP4_REASS_COUNT_BYTES
715  if (!cached && r) {
716  r->forwarded += clib_host_to_net_u16(ip40->length) - 20;
717  if (!ip4_get_fragment_more(ip40))
718  r->expected_total = ip4_get_fragment_offset(ip40) * 8 + clib_host_to_net_u16(ip40->length) - 20;
719  if(r->forwarded >= r->expected_total)
720  map_ip4_reass_free(r, &fragments_to_drop);
721  }
722 #endif
723 
725 
726  if(PREDICT_TRUE(error0 == MAP_ERROR_NONE))
727  error0 = ip6_map_sec_check(d0, port0, ip40, ip60) ? MAP_ERROR_NONE : MAP_ERROR_DECAP_SEC_CHECK;
728 
729  if (PREDICT_FALSE(d0->mtu && (clib_host_to_net_u16(ip40->length) > d0->mtu) &&
730  error0 == MAP_ERROR_NONE && !cached)) {
731  vnet_buffer(p0)->ip_frag.header_offset = 0;
732  vnet_buffer(p0)->ip_frag.flags = 0;
733  vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
734  vnet_buffer(p0)->ip_frag.mtu = d0->mtu;
736  }
737 
739  map_ip6_map_ip4_reass_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
740  tr->map_domain_index = map_domain_index0;
741  tr->port = port0;
742  tr->cached = cached;
743  }
744 
745  if (cached) {
746  //Dequeue the packet
747  n_left_to_next++;
748  to_next--;
749  } else {
750  if (error0 == MAP_ERROR_NONE)
751  vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index, map_domain_index0, 1,
752  clib_net_to_host_u16(ip40->length));
753  next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_IP4_REASS_NEXT_DROP;
754  p0->error = error_node->errors[error0];
755  vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
756  }
757 
758  //Loopback when we reach the end of the inpu vector
759  if(n_left_from == 0 && vec_len(fragments_to_loopback)) {
760  from = vlib_frame_vector_args(frame);
761  u32 len = vec_len(fragments_to_loopback);
762  if(len <= VLIB_FRAME_SIZE) {
763  clib_memcpy(from, fragments_to_loopback, sizeof(u32)*len);
764  n_left_from = len;
765  vec_reset_length(fragments_to_loopback);
766  } else {
767  clib_memcpy(from, fragments_to_loopback + (len - VLIB_FRAME_SIZE), sizeof(u32)*VLIB_FRAME_SIZE);
768  n_left_from = VLIB_FRAME_SIZE;
769  _vec_len(fragments_to_loopback) = len - VLIB_FRAME_SIZE;
770  }
771  }
772  }
773  vlib_put_next_frame(vm, node, next_index, n_left_to_next);
774  }
775  map_send_all_to_node(vm, fragments_to_drop, node,
776  &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
778 
779  vec_free(fragments_to_drop);
780  vec_free(fragments_to_loopback);
781  return frame->n_vectors;
782 }
783 
784 /*
785  * ip6_icmp_relay
786  */
787 static uword
789  vlib_node_runtime_t *node,
790  vlib_frame_t *frame)
791 {
792  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
794  map_main_t *mm = &map_main;
795  u32 cpu_index = os_get_cpu_number();
796  u16 *fragment_ids, *fid;
797 
798  from = vlib_frame_vector_args(frame);
799  n_left_from = frame->n_vectors;
800  next_index = node->cached_next_index;
801 
802  /* Get random fragment IDs for replies. */
803  fid = fragment_ids = clib_random_buffer_get_data (&vm->random_buffer, n_left_from * sizeof (fragment_ids[0]));
804 
805  while (n_left_from > 0) {
806  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
807 
808  /* Single loop */
809  while (n_left_from > 0 && n_left_to_next > 0) {
810  u32 pi0;
811  vlib_buffer_t *p0;
812  u8 error0 = MAP_ERROR_NONE;
813  ip6_header_t *ip60;
815  u32 mtu;
816 
817  pi0 = to_next[0] = from[0];
818  from += 1;
819  n_left_from -= 1;
820  to_next +=1;
821  n_left_to_next -= 1;
822 
823  p0 = vlib_get_buffer(vm, pi0);
824  ip60 = vlib_buffer_get_current(p0);
825  u16 tlen = clib_net_to_host_u16(ip60->payload_length);
826 
827  /*
828  * In:
829  * IPv6 header (40)
830  * ICMPv6 header (8)
831  * IPv6 header (40)
832  * Original IPv4 header / packet
833  * Out:
834  * New IPv4 header
835  * New ICMP header
836  * Original IPv4 header / packet
837  */
838 
839  /* Need at least ICMP(8) + IPv6(40) + IPv4(20) + L4 header(8) */
840  if (tlen < 76) {
841  error0 = MAP_ERROR_ICMP_RELAY;
842  goto error;
843  }
844 
845  icmp46_header_t *icmp60 = (icmp46_header_t *)(ip60 + 1);
846  ip6_header_t *inner_ip60 = (ip6_header_t *)(icmp60 + 2);
847 
848  if (inner_ip60->protocol != IP_PROTOCOL_IP_IN_IP) {
849  error0 = MAP_ERROR_ICMP_RELAY;
850  goto error;
851  }
852 
853  ip4_header_t *inner_ip40 = (ip4_header_t *)(inner_ip60 + 1);
854  vlib_buffer_advance(p0, 60); /* sizeof ( IPv6 + ICMP + IPv6 - IPv4 - ICMP ) */
855  ip4_header_t *new_ip40 = vlib_buffer_get_current(p0);
856  icmp46_header_t *new_icmp40 = (icmp46_header_t *)(new_ip40 + 1);
857 
858  /*
859  * Relay according to RFC2473, section 8.3
860  */
861  switch (icmp60->type) {
862  case ICMP6_destination_unreachable:
863  case ICMP6_time_exceeded:
864  case ICMP6_parameter_problem:
865  /* Type 3 - destination unreachable, Code 1 - host unreachable */
866  new_icmp40->type = ICMP4_destination_unreachable;
867  new_icmp40->code = ICMP4_destination_unreachable_destination_unreachable_host;
868  break;
869 
870  case ICMP6_packet_too_big:
871  /* Type 3 - destination unreachable, Code 4 - packet too big */
872  /* Potential TODO: Adjust domain tunnel MTU based on the value received here */
873  mtu = clib_net_to_host_u32(*((u32 *)(icmp60 + 1)));
874 
875  /* Check DF flag */
876  if (!(inner_ip40->flags_and_fragment_offset & clib_host_to_net_u16(IP4_HEADER_FLAG_DONT_FRAGMENT))) {
877  error0 = MAP_ERROR_ICMP_RELAY;
878  goto error;
879  }
880 
881  new_icmp40->type = ICMP4_destination_unreachable;
882  new_icmp40->code = ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set;
883  *((u32 *)(new_icmp40 + 1)) = clib_host_to_net_u32(mtu < 1280 ? 1280 : mtu);
884  break;
885 
886  default:
887  error0 = MAP_ERROR_ICMP_RELAY;
888  break;
889  }
890 
891  /*
892  * Ensure the total ICMP packet is no longer than 576 bytes (RFC1812)
893  */
894  new_ip40->ip_version_and_header_length = 0x45;
895  new_ip40->tos = 0;
896  u16 nlen = (tlen - 20) > 576 ? 576 : tlen - 20;
897  new_ip40->length = clib_host_to_net_u16(nlen);
898  new_ip40->fragment_id = fid[0]; fid++;
899  new_ip40->ttl = 64;
900  new_ip40->protocol = IP_PROTOCOL_ICMP;
901  new_ip40->src_address = mm->icmp4_src_address;
902  new_ip40->dst_address = inner_ip40->src_address;
903  new_ip40->checksum = ip4_header_checksum(new_ip40);
904 
905  new_icmp40->checksum = 0;
906  ip_csum_t sum = ip_incremental_checksum(0, new_icmp40, nlen - 20);
907  new_icmp40->checksum = ~ip_csum_fold(sum);
908 
909  vlib_increment_simple_counter(&mm->icmp_relayed, cpu_index, 0, 1);
910 
911  error:
913  map_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
914  tr->map_domain_index = 0;
915  tr->port = 0;
916  }
917 
918  next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_ICMP_RELAY_NEXT_DROP;
919  p0->error = error_node->errors[error0];
920  vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
921  }
922  vlib_put_next_frame(vm, node, next_index, n_left_to_next);
923  }
924 
925  return frame->n_vectors;
926 
927 }
928 
929 static char *map_error_strings[] = {
930 #define _(sym,string) string,
932 #undef _
933 };
934 
936  .function = ip6_map,
937  .name = "ip6-map",
938  .vector_size = sizeof(u32),
939  .format_trace = format_map_trace,
941 
942  .n_errors = MAP_N_ERROR,
943  .error_strings = map_error_strings,
944 
945  .n_next_nodes = IP6_MAP_N_NEXT,
946  .next_nodes = {
947  [IP6_MAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
948 #ifdef MAP_SKIP_IP6_LOOKUP
949  [IP6_MAP_NEXT_IP4_REWRITE] = "ip4-rewrite-transit",
950 #endif
951  [IP6_MAP_NEXT_IP6_REASS] = "ip6-map-ip6-reass",
952  [IP6_MAP_NEXT_IP4_REASS] = "ip6-map-ip4-reass",
953  [IP6_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
954  [IP6_MAP_NEXT_IP6_ICMP_RELAY] = "ip6-map-icmp-relay",
955  [IP6_MAP_NEXT_IP6_LOCAL] = "ip6-local",
956  [IP6_MAP_NEXT_DROP] = "error-drop",
957  [IP6_MAP_NEXT_ICMP] = "ip6-icmp-error",
958  },
959 };
960 
962  .function = ip6_map_ip6_reass,
963  .name = "ip6-map-ip6-reass",
964  .vector_size = sizeof(u32),
965  .format_trace = format_ip6_map_ip6_reass_trace,
967  .n_errors = MAP_N_ERROR,
968  .error_strings = map_error_strings,
969  .n_next_nodes = IP6_MAP_IP6_REASS_N_NEXT,
970  .next_nodes = {
971  [IP6_MAP_IP6_REASS_NEXT_IP6_MAP] = "ip6-map",
972  [IP6_MAP_IP6_REASS_NEXT_DROP] = "error-drop",
973  },
974 };
975 
977  .function = ip6_map_ip4_reass,
978  .name = "ip6-map-ip4-reass",
979  .vector_size = sizeof(u32),
980  .format_trace = format_ip6_map_ip4_reass_trace,
982  .n_errors = MAP_N_ERROR,
983  .error_strings = map_error_strings,
984  .n_next_nodes = IP6_MAP_IP4_REASS_N_NEXT,
985  .next_nodes = {
986  [IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP] = "ip4-lookup",
988  [IP6_MAP_IP4_REASS_NEXT_DROP] = "error-drop",
989  },
990 };
991 
993  .function = ip6_map_icmp_relay,
994  .name = "ip6-map-icmp-relay",
995  .vector_size = sizeof(u32),
996  .format_trace = format_map_trace, //FIXME
998  .n_errors = MAP_N_ERROR,
999  .error_strings = map_error_strings,
1000  .n_next_nodes = IP6_ICMP_RELAY_N_NEXT,
1001  .next_nodes = {
1002  [IP6_ICMP_RELAY_NEXT_IP4_LOOKUP] = "ip4-lookup",
1003  [IP6_ICMP_RELAY_NEXT_DROP] = "error-drop",
1004  },
1005 };
u16 forwarded
Definition: map.h:174
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Definition: main.c:459
#define map_ip4_reass_lock()
Definition: map.h:433
u8 psid_length
Definition: map.h:94
always_inline void * clib_random_buffer_get_data(clib_random_buffer_t *b, uword n_bytes)
Definition: random_buffer.h:75
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:267
#define CLIB_UNUSED(x)
Definition: clib.h:79
static_always_inline bool ip6_map_sec_check(map_domain_t *d, u16 port, ip4_header_t *ip4, ip6_header_t *ip6)
Definition: ip6_map.c:92
static_always_inline u64 map_get_pfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:316
ip4_address_t src_address
Definition: ip4_packet.h:138
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
ip6_map_ip4_reass_next_e
Definition: ip6_map.c:40
#define PREDICT_TRUE(x)
Definition: clib.h:98
always_inline int ip4_get_fragment_offset(ip4_header_t *i)
Definition: ip4_packet.h:162
u64 as_u64[2]
Definition: ip6_packet.h:50
#define NULL
Definition: clib.h:55
static vlib_node_registration_t ip6_map_icmp_relay_node
(constructor) VLIB_REGISTER_NODE (ip6_map_icmp_relay_node)
Definition: ip6_map.c:55
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:480
struct _vlib_node_registration vlib_node_registration_t
ip6_icmp_relay_next_e
Definition: ip6_map.c:47
bool sec_check_frag
Definition: map.h:205
ip_lookup_main_t lookup_main
Definition: ip4.h:129
uword ip_csum_t
Definition: ip_packet.h:86
always_inline int ip4_is_fragment(ip4_header_t *i)
Definition: ip4_packet.h:170
u16 flags_and_fragment_offset
Definition: ip4_packet.h:121
static char * map_error_strings[]
Definition: ip6_map.c:929
vlib_error_t * errors
Definition: node.h:378
vlib_node_registration_t ip6_map_ip4_reass_node
(constructor) VLIB_REGISTER_NODE (ip6_map_ip4_reass_node)
Definition: ip6_map.c:53
#define u16_net_add(u, val)
Definition: map.h:507
static_always_inline void ip6_map_ip6_reass_prepare(vlib_main_t *vm, vlib_node_runtime_t *node, map_ip6_reass_t *r, u32 **fragments_ready, u32 **fragments_to_drop)
Definition: ip6_map.c:430
ip6_address_t src_address
Definition: ip6_packet.h:293
#define frag_id_6to4(id)
Definition: map.h:509
always_inline void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:184
always_inline vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
vlib_node_registration_t ip6_map_node
(constructor) VLIB_REGISTER_NODE (ip6_map_node)
Definition: ip6_map.c:935
always_inline int ip4_get_fragment_more(ip4_header_t *i)
Definition: ip4_packet.h:166
map_ip6_fragment_t fragments[MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY]
Definition: map.h:181
u16 port
Definition: map.h:294
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define map_ip6_reass_unlock()
Definition: map.h:457
#define static_always_inline
Definition: clib.h:85
static_always_inline void ip6_map_security_check(map_domain_t *d, ip4_header_t *ip4, ip6_header_t *ip6, u32 *next, u8 *error)
Definition: ip6_map.c:106
ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_checksum.c:43
ip4_address_t dst_address
Definition: ip4_packet.h:138
vlib_combined_counter_main_t * domain_counters
Definition: map.h:190
always_inline void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 cpu_index, u32 index, u32 packet_increment, u32 byte_increment)
Definition: counter.h:210
ip4_address_t icmp4_src_address
Definition: map.h:209
int i32
Definition: types.h:81
vlib_node_registration_t ip6_map_ip6_reass_node
(constructor) VLIB_REGISTER_NODE (ip6_map_ip6_reass_node)
Definition: ip6_map.c:54
static_always_inline void map_send_all_to_node(vlib_main_t *vm, u32 *pi_vector, vlib_node_runtime_t *node, vlib_error_t *error, u32 next)
Definition: map.h:544
vlib_simple_counter_main_t icmp_relayed
Definition: map.h:210
int map_ip4_reass_add_fragment(map_ip4_reass_t *r, u32 pi)
Definition: map.c:1239
ip6_address_t * rules
Definition: map.h:84
unsigned long u64
Definition: types.h:89
u8 ea_bits_len
Definition: map.h:92
always_inline void * vlib_frame_vector_args(vlib_frame_t *f)
Definition: node_funcs.h:202
u8 * format_ip6_map_ip4_reass_trace(u8 *s, va_list *args)
Definition: ip6_map.c:64
void icmp6_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
Definition: icmp6.c:492
#define ip6_frag_hdr_more(hdr)
Definition: ip6_packet.h:388
always_inline u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:138
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:81
always_inline void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 cpu_index, u32 index, u32 increment)
Definition: counter.h:70
uword os_get_cpu_number(void)
Definition: unix-misc.c:206
static_always_inline map_domain_t * ip6_map_get_domain(u32 adj_index, ip4_address_t *addr, u32 *map_domain_index, u8 *error)
Definition: map.h:393
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1168
#define PREDICT_FALSE(x)
Definition: clib.h:97
#define VLIB_FRAME_SIZE
Definition: node.h:292
map_main_t map_main
Definition: map.h:297
i32 ip4_get_port(ip4_header_t *ip, map_dir_e dir, u16 buffer_len)
Definition: map.c:66
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Definition: buffer_node.h:43
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Definition: buffer_node.h:83
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:265
u16 expected_total
Definition: map.h:125
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:129
always_inline u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:194
void map_ip4_drop_pi(u32 pi)
Definition: ip6_map.c:515
u16 ip4_map_get_port(ip4_header_t *ip, map_dir_e dir)
Definition: ip4_map.c:65
void map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop)
Definition: map.c:1134
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
Definition: ip4_packet.h:122
u8 next_data_len
Definition: map.h:165
u16 n_vectors
Definition: node.h:307
u16 next_data_offset
Definition: map.h:164
static_always_inline void map_ip4_reass_get_fragments(map_ip4_reass_t *r, u32 **pi)
Definition: map.h:437
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
map_ip6_reass_t * map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1318
u8 next_data[20]
Definition: map.h:166
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:298
u16 forwarded
Definition: map.h:126
static uword ip6_map_icmp_relay(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:788
#define clib_memcpy(a, b, c)
Definition: string.h:63
bool icmp6_enabled
Definition: map.h:206
static uword ip6_map_ip4_reass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:640
#define foreach_map_error
Definition: map.h:264
u32 adj4_index
Definition: map.h:195
static uword ip6_map(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:150
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:385
u16 cached_next_index
Definition: node.h:422
#define u8_ptr_add(ptr, index)
Definition: map.h:506
unsigned int u32
Definition: types.h:88
bool sec_check
Definition: map.h:204
#define vnet_buffer(b)
Definition: buffer.h:300
ip6_map_next_e
Definition: ip6_map.c:19
u8 * format(u8 *s, char *fmt,...)
Definition: format.c:405
static_always_inline bool ip6_map_ip4_lookup_bypass(vlib_buffer_t *p0, ip4_header_t *ip)
Definition: ip6_map.c:127
ip4_header_t ip4_header
Definition: map.h:180
#define map_ip4_reass_unlock()
Definition: map.h:434
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1101
#define IP_FLOW_HASH_DEFAULT
Definition: lookup.h:136
u8 * format_ip6_map_ip6_reass_trace(u8 *s, va_list *args)
Definition: ip6_map.c:80
#define VLIB_BUFFER_IS_TRACED
Definition: buffer.h:91
ip6_map_ip6_reass_next_e
Definition: ip6_map.c:34
always_inline ip_adjacency_t * ip_get_adjacency(ip_lookup_main_t *lm, u32 adj_index)
Definition: lookup.h:423
u64 uword
Definition: types.h:112
Definition: defs.h:46
unsigned short u16
Definition: types.h:57
static uword ip6_map_ip6_reass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:530
u16 mtu
Definition: map.h:88
u16 payload_length
Definition: ip6_packet.h:284
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
always_inline void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:197
unsigned char u8
Definition: types.h:56
#define map_ip6_reass_lock()
Definition: map.h:456
always_inline u32 ip4_compute_flow_hash(ip4_header_t *ip, u32 flow_hash_config)
Definition: ip4.h:397
i32 port
Definition: map.h:128
always_inline void vlib_set_next_frame_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, u32 next_index, u32 buffer_index)
Definition: node_funcs.h:292
always_inline void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
void map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop)
Definition: map.c:1276
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:162
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:140
ip4_main_t ip4_main
Definition: ip4_forward.c:1394
always_inline vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Definition: node_funcs.h:61
#define MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY
Definition: map.h:70
u8 data[0]
Packet data.
Definition: buffer.h:150
#define IP4_HEADER_FLAG_DONT_FRAGMENT
Definition: ip4_packet.h:123
static_always_inline u64 map_get_sfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:340
u32 map_domain_index
Definition: map.h:293
u8 ip_version_and_header_length
Definition: ip4_packet.h:108
Definition: map.h:24
u32 ip6_reass_buffered_counter
Definition: map.h:257
int map_ip6_reass_add_fragment(map_ip6_reass_t *r, u32 pi, u16 data_offset, u16 next_data_offset, u8 *data_start, u16 data_len)
Definition: map.c:1390
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:84
u16 expected_total
Definition: map.h:173
always_inline vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:69
clib_random_buffer_t random_buffer
Definition: main.h:153
void map_ip6_drop_pi(u32 pi)
Definition: ip6_map.c:507
ip6_address_t dst_address
Definition: ip6_packet.h:293