FD.io VPP  v20.05.1-6-gf53edbc3b
Vector Packet Processing
ip_frag.c
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------
2  * Copyright (c) 2009-2014 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  *---------------------------------------------------------------------------
15  */
16 /*
17  * IPv4 Fragmentation Node
18  *
19  *
20  */
21 
22 #include "ip_frag.h"
23 
24 #include <vnet/ip/ip.h>
25 
26 typedef struct
27 {
33 
34 static u8 *
35 format_ip_frag_trace (u8 * s, va_list * args)
36 {
37  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
38  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
39  ip_frag_trace_t *t = va_arg (*args, ip_frag_trace_t *);
40  s = format (s, "IPv%s mtu: %u fragments: %u next: %d",
41  t->ipv6 ? "6" : "4", t->mtu, t->n_fragments, t->next);
42  return s;
43 }
44 
46 
47 static void
49 {
50  vnet_buffer (to)->sw_if_index[VLIB_RX] =
51  vnet_buffer (from)->sw_if_index[VLIB_RX];
52  vnet_buffer (to)->sw_if_index[VLIB_TX] =
53  vnet_buffer (from)->sw_if_index[VLIB_TX];
54 
55  /* Copy adj_index in case DPO based node is sending for the
56  * fragmentation, the packet would be sent back to the proper
57  * DPO next node and Index
58  */
59  vnet_buffer (to)->ip.adj_index[VLIB_RX] =
60  vnet_buffer (from)->ip.adj_index[VLIB_RX];
61  vnet_buffer (to)->ip.adj_index[VLIB_TX] =
62  vnet_buffer (from)->ip.adj_index[VLIB_TX];
63 
64  /* Copy QoS Bits */
65  if (PREDICT_TRUE (from->flags & VNET_BUFFER_F_QOS_DATA_VALID))
66  {
67  vnet_buffer2 (to)->qos = vnet_buffer2 (from)->qos;
68  to->flags |= VNET_BUFFER_F_QOS_DATA_VALID;
69  }
70 }
71 
72 static vlib_buffer_t *
74 {
76  if (vlib_buffer_alloc (vm, bi, 1) != 1)
77  return 0;
78 
79  vlib_buffer_t *b = vlib_get_buffer (vm, *bi);
81  vlib_buffer_copy_trace_flag (vm, org_b, *bi);
82 
83  return b;
84 }
85 
86 /*
87  * Limitation: Does follow buffer chains in the packet to fragment,
88  * but does not generate buffer chains. I.e. a fragment is always
89  * contained with in a single buffer and limited to the max buffer
90  * size.
91  * from_bi: current pointer must point to IPv4 header
92  */
95  u16 l2unfragmentablesize, u32 ** buffer)
96 {
97  vlib_buffer_t *from_b;
99  u16 len, max, rem, ip_frag_id, ip_frag_offset;
100  u8 *org_from_packet, more;
101 
102  from_b = vlib_get_buffer (vm, from_bi);
103  org_from_packet = vlib_buffer_get_current (from_b);
104  ip4 = vlib_buffer_get_current (from_b) + l2unfragmentablesize;
105 
106  rem = clib_net_to_host_u16 (ip4->length) - sizeof (ip4_header_t);
107  max =
109  sizeof (ip4_header_t)) & ~0x7;
110 
111  if (rem >
112  (vlib_buffer_length_in_chain (vm, from_b) - sizeof (ip4_header_t)))
113  {
114  return IP_FRAG_ERROR_MALFORMED;
115  }
116 
117  if (mtu < sizeof (ip4_header_t))
118  {
119  return IP_FRAG_ERROR_CANT_FRAGMENT_HEADER;
120  }
121 
122  if (ip4->flags_and_fragment_offset &
123  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT))
124  {
125  return IP_FRAG_ERROR_DONT_FRAGMENT_SET;
126  }
127 
128  if (ip4_is_fragment (ip4))
129  {
130  ip_frag_id = ip4->fragment_id;
131  ip_frag_offset = ip4_get_fragment_offset (ip4);
132  more =
133  !(!(ip4->flags_and_fragment_offset &
134  clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)));
135  }
136  else
137  {
138  ip_frag_id = (++running_fragment_id);
139  ip_frag_offset = 0;
140  more = 0;
141  }
142 
143  u8 *from_data = (void *) (ip4 + 1);
144  vlib_buffer_t *org_from_b = from_b;
145  u16 fo = 0;
146  u16 left_in_from_buffer =
147  from_b->current_length - (l2unfragmentablesize + sizeof (ip4_header_t));
148  u16 ptr = 0;
149 
150  /* Do the actual fragmentation */
151  while (rem)
152  {
153  u32 to_bi;
154  vlib_buffer_t *to_b;
155  ip4_header_t *to_ip4;
156  u8 *to_data;
157 
158  len = (rem > max ? max : rem);
159  if (len != rem) /* Last fragment does not need to divisible by 8 */
160  len &= ~0x7;
161  if ((to_b = frag_buffer_alloc (org_from_b, &to_bi)) == 0)
162  {
163  return IP_FRAG_ERROR_MEMORY;
164  }
165  vec_add1 (*buffer, to_bi);
166  frag_set_sw_if_index (to_b, org_from_b);
167 
168  /* Copy ip4 header */
169  to_data = vlib_buffer_get_current (to_b);
170  clib_memcpy_fast (to_data, org_from_packet,
171  l2unfragmentablesize + sizeof (ip4_header_t));
172  to_ip4 = (ip4_header_t *) (to_data + l2unfragmentablesize);
173  to_data = (void *) (to_ip4 + 1);
174  vnet_buffer (to_b)->l3_hdr_offset = to_b->current_data;
175  vlib_buffer_copy_trace_flag (vm, from_b, to_bi);
176  to_b->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
177 
178  if (from_b->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID)
179  {
180  vnet_buffer (to_b)->l4_hdr_offset =
181  (vnet_buffer (to_b)->l3_hdr_offset +
182  (vnet_buffer (from_b)->l4_hdr_offset -
183  vnet_buffer (from_b)->l3_hdr_offset));
184  to_b->flags |= VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
185  }
186 
187  /* Spin through from buffers filling up the to buffer */
188  u16 left_in_to_buffer = len, to_ptr = 0;
189  while (1)
190  {
191  u16 bytes_to_copy;
192 
193  /* Figure out how many bytes we can safely copy */
194  bytes_to_copy = left_in_to_buffer <= left_in_from_buffer ?
195  left_in_to_buffer : left_in_from_buffer;
196  clib_memcpy_fast (to_data + to_ptr, from_data + ptr, bytes_to_copy);
197  left_in_to_buffer -= bytes_to_copy;
198  ptr += bytes_to_copy;
199  left_in_from_buffer -= bytes_to_copy;
200  if (left_in_to_buffer == 0)
201  break;
202 
203  ASSERT (left_in_from_buffer <= 0);
204  /* Move buffer */
205  if (!(from_b->flags & VLIB_BUFFER_NEXT_PRESENT))
206  {
207  return IP_FRAG_ERROR_MALFORMED;
208  }
209  from_b = vlib_get_buffer (vm, from_b->next_buffer);
210  from_data = (u8 *) vlib_buffer_get_current (from_b);
211  ptr = 0;
212  left_in_from_buffer = from_b->current_length;
213  to_ptr += bytes_to_copy;
214  }
215 
216  to_b->flags |= VNET_BUFFER_F_IS_IP4;
217  to_b->current_length =
218  len + sizeof (ip4_header_t) + l2unfragmentablesize;
219 
220  to_ip4->fragment_id = ip_frag_id;
221  to_ip4->flags_and_fragment_offset =
222  clib_host_to_net_u16 ((fo >> 3) + ip_frag_offset);
223  to_ip4->flags_and_fragment_offset |=
224  clib_host_to_net_u16 (((len != rem) || more) << 13);
225  to_ip4->length = clib_host_to_net_u16 (len + sizeof (ip4_header_t));
226  to_ip4->checksum = ip4_header_checksum (to_ip4);
227 
228  /* we've just done the IP checksum .. */
229  to_b->flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
230 
231  rem -= len;
232  fo += len;
233  }
234 
235  return IP_FRAG_ERROR_NONE;
236 }
237 
238 void
240 {
241  vnet_buffer (b)->ip_frag.mtu = mtu;
242  vnet_buffer (b)->ip_frag.next_index = next_index;
243  vnet_buffer (b)->ip_frag.flags = flags;
244 }
245 
246 
247 static inline uword
249  vlib_frame_t * frame, u32 node_index, bool is_ip6)
250 {
251  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
252  vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, node_index);
253  from = vlib_frame_vector_args (frame);
254  n_left_from = frame->n_vectors;
255  next_index = node->cached_next_index;
256  u32 frag_sent = 0, small_packets = 0;
257  u32 *buffer = 0;
258 
259  while (n_left_from > 0)
260  {
261  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
262 
263  while (n_left_from > 0 && n_left_to_next > 0)
264  {
265  u32 pi0, *frag_from, frag_left;
266  vlib_buffer_t *p0;
267  ip_frag_error_t error0;
268  int next0;
269 
270  /*
271  * Note: The packet is not enqueued now. It is instead put
272  * in a vector where other fragments will be put as well.
273  */
274  pi0 = from[0];
275  from += 1;
276  n_left_from -= 1;
277 
278  p0 = vlib_get_buffer (vm, pi0);
279  u16 mtu = vnet_buffer (p0)->ip_frag.mtu;
280  if (is_ip6)
281  error0 = ip6_frag_do_fragment (vm, pi0, mtu, 0, &buffer);
282  else
283  error0 = ip4_frag_do_fragment (vm, pi0, mtu, 0, &buffer);
284 
285  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
286  {
287  ip_frag_trace_t *tr =
288  vlib_add_trace (vm, node, p0, sizeof (*tr));
289  tr->mtu = mtu;
290  tr->ipv6 = is_ip6 ? 1 : 0;
291  tr->n_fragments = vec_len (buffer);
292  tr->next = vnet_buffer (p0)->ip_frag.next_index;
293  }
294 
295  if (!is_ip6 && error0 == IP_FRAG_ERROR_DONT_FRAGMENT_SET)
296  {
297  icmp4_error_set_vnet_buffer (p0, ICMP4_destination_unreachable,
298  ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
299  vnet_buffer (p0)->ip_frag.mtu);
300  next0 = IP_FRAG_NEXT_ICMP_ERROR;
301  }
302  else
303  {
304  next0 = (error0 == IP_FRAG_ERROR_NONE ?
305  vnet_buffer (p0)->ip_frag.next_index :
307  }
308 
309  if (error0 == IP_FRAG_ERROR_NONE)
310  {
311  /* Free original buffer chain */
312  frag_sent += vec_len (buffer);
313  small_packets += (vec_len (buffer) == 1);
314  vlib_buffer_free_one (vm, pi0); /* Free original packet */
315  }
316  else
317  {
318  vlib_error_count (vm, node_index, error0, 1);
319  vec_add1 (buffer, pi0); /* Get rid of the original buffer */
320  }
321 
322  /* Send fragments that were added in the frame */
323  frag_from = buffer;
324  frag_left = vec_len (buffer);
325 
326  while (frag_left > 0)
327  {
328  while (frag_left > 0 && n_left_to_next > 0)
329  {
330  u32 i;
331  i = to_next[0] = frag_from[0];
332  frag_from += 1;
333  frag_left -= 1;
334  to_next += 1;
335  n_left_to_next -= 1;
336 
337  vlib_get_buffer (vm, i)->error = error_node->errors[error0];
338  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
339  to_next, n_left_to_next, i,
340  next0);
341  }
342  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
343  vlib_get_next_frame (vm, node, next_index, to_next,
344  n_left_to_next);
345  }
346  vec_reset_length (buffer);
347  }
348  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
349  }
350  vec_free (buffer);
351 
352  vlib_node_increment_counter (vm, node_index,
353  IP_FRAG_ERROR_FRAGMENT_SENT, frag_sent);
354  vlib_node_increment_counter (vm, node_index,
355  IP_FRAG_ERROR_SMALL_PACKET, small_packets);
356 
357  return frame->n_vectors;
358 }
359 
360 
361 
362 static uword
364 {
365  return frag_node_inline (vm, node, frame, ip4_frag_node.index,
366  0 /* is_ip6 */ );
367 }
368 
369 static uword
371 {
372  return frag_node_inline (vm, node, frame, ip6_frag_node.index,
373  1 /* is_ip6 */ );
374 }
375 
376 /*
377  * Fragments the packet given in from_bi. Fragments are returned in the buffer vector.
378  * Caller must ensure the original packet is freed.
379  * from_bi: current pointer must point to IPv6 header
380  */
383  u16 l2unfragmentablesize, u32 ** buffer)
384 {
385  vlib_buffer_t *from_b;
386  ip6_header_t *ip6;
387  u16 len, max, rem, ip_frag_id;
388  u8 *org_from_packet;
389 
390  from_b = vlib_get_buffer (vm, from_bi);
391  org_from_packet = vlib_buffer_get_current (from_b);
392  ip6 = vlib_buffer_get_current (from_b) + l2unfragmentablesize;
393 
394  rem = clib_net_to_host_u16 (ip6->payload_length);
395  max = (mtu - sizeof (ip6_header_t) - sizeof (ip6_frag_hdr_t)) & ~0x7; // TODO: Is max correct??
396 
397  if (rem >
398  (vlib_buffer_length_in_chain (vm, from_b) - sizeof (ip6_header_t)))
399  {
400  return IP_FRAG_ERROR_MALFORMED;
401  }
402 
403  /* TODO: Look through header chain for fragmentation header */
404  if (ip6->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
405  {
406  return IP_FRAG_ERROR_MALFORMED;
407  }
408 
409  u8 *from_data = (void *) (ip6 + 1);
410  vlib_buffer_t *org_from_b = from_b;
411  u16 fo = 0;
412  u16 left_in_from_buffer =
413  from_b->current_length - (l2unfragmentablesize + sizeof (ip6_header_t));
414  u16 ptr = 0;
415 
416  ip_frag_id = ++running_fragment_id; // Fix
417 
418  /* Do the actual fragmentation */
419  while (rem)
420  {
421  u32 to_bi;
422  vlib_buffer_t *to_b;
423  ip6_header_t *to_ip6;
424  ip6_frag_hdr_t *to_frag_hdr;
425  u8 *to_data;
426 
427  len =
428  (rem >
429  (mtu - sizeof (ip6_header_t) - sizeof (ip6_frag_hdr_t)) ? max : rem);
430  if (len != rem) /* Last fragment does not need to divisible by 8 */
431  len &= ~0x7;
432  if ((to_b = frag_buffer_alloc (org_from_b, &to_bi)) == 0)
433  {
434  return IP_FRAG_ERROR_MEMORY;
435  }
436  vec_add1 (*buffer, to_bi);
437  frag_set_sw_if_index (to_b, org_from_b);
438 
439  /* Copy ip6 header */
440  clib_memcpy_fast (to_b->data, org_from_packet,
441  l2unfragmentablesize + sizeof (ip6_header_t));
442  to_ip6 = vlib_buffer_get_current (to_b);
443  to_frag_hdr = (ip6_frag_hdr_t *) (to_ip6 + 1);
444  to_data = (void *) (to_frag_hdr + 1);
445 
446  vnet_buffer (to_b)->l3_hdr_offset = to_b->current_data;
447  to_b->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
448 
449  if (from_b->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID)
450  {
451  vnet_buffer (to_b)->l4_hdr_offset =
452  (vnet_buffer (to_b)->l3_hdr_offset +
453  (vnet_buffer (from_b)->l4_hdr_offset -
454  vnet_buffer (from_b)->l3_hdr_offset));
455  to_b->flags |= VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
456  }
457  to_b->flags |= VNET_BUFFER_F_IS_IP6;
458 
459  /* Spin through from buffers filling up the to buffer */
460  u16 left_in_to_buffer = len, to_ptr = 0;
461  while (1)
462  {
463  u16 bytes_to_copy;
464 
465  /* Figure out how many bytes we can safely copy */
466  bytes_to_copy = left_in_to_buffer <= left_in_from_buffer ?
467  left_in_to_buffer : left_in_from_buffer;
468  clib_memcpy_fast (to_data + to_ptr, from_data + ptr, bytes_to_copy);
469  left_in_to_buffer -= bytes_to_copy;
470  ptr += bytes_to_copy;
471  left_in_from_buffer -= bytes_to_copy;
472  if (left_in_to_buffer == 0)
473  break;
474 
475  ASSERT (left_in_from_buffer <= 0);
476  /* Move buffer */
477  if (!(from_b->flags & VLIB_BUFFER_NEXT_PRESENT))
478  {
479  return IP_FRAG_ERROR_MALFORMED;
480  }
481  from_b = vlib_get_buffer (vm, from_b->next_buffer);
482  from_data = (u8 *) vlib_buffer_get_current (from_b);
483  ptr = 0;
484  left_in_from_buffer = from_b->current_length;
485  to_ptr += bytes_to_copy;
486  }
487 
488  to_b->current_length =
489  len + sizeof (ip6_header_t) + sizeof (ip6_frag_hdr_t);
490  to_ip6->payload_length =
491  clib_host_to_net_u16 (len + sizeof (ip6_frag_hdr_t));
492  to_ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
493  to_frag_hdr->fragment_offset_and_more =
494  ip6_frag_hdr_offset_and_more ((fo >> 3), len != rem);
495  to_frag_hdr->identification = ip_frag_id;
496  to_frag_hdr->next_hdr = ip6->protocol;
497  to_frag_hdr->rsv = 0;
498 
499  rem -= len;
500  fo += len;
501  }
502 
503  return IP_FRAG_ERROR_NONE;
504 }
505 
506 static char *ip4_frag_error_strings[] = {
507 #define _(sym,string) string,
509 #undef _
510 };
511 
512 /* *INDENT-OFF* */
514  .function = ip4_frag,
515  .name = IP4_FRAG_NODE_NAME,
516  .vector_size = sizeof (u32),
517  .format_trace = format_ip_frag_trace,
519 
520  .n_errors = IP_FRAG_N_ERROR,
521  .error_strings = ip4_frag_error_strings,
522 
523  .n_next_nodes = IP_FRAG_N_NEXT,
524  .next_nodes = {
525  [IP_FRAG_NEXT_IP_REWRITE] = "ip4-rewrite",
526  [IP_FRAG_NEXT_IP_REWRITE_MIDCHAIN] = "ip4-midchain",
527  [IP_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
528  [IP_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
529  [IP_FRAG_NEXT_ICMP_ERROR] = "ip4-icmp-error",
530  [IP_FRAG_NEXT_DROP] = "ip4-drop"
531  },
532 };
533 /* *INDENT-ON* */
534 
535 /* *INDENT-OFF* */
537  .function = ip6_frag,
538  .name = IP6_FRAG_NODE_NAME,
539  .vector_size = sizeof (u32),
540  .format_trace = format_ip_frag_trace,
542 
543  .n_errors = IP_FRAG_N_ERROR,
544  .error_strings = ip4_frag_error_strings,
545 
546  .n_next_nodes = IP_FRAG_N_NEXT,
547  .next_nodes = {
548  [IP_FRAG_NEXT_IP_REWRITE] = "ip6-rewrite",
549  [IP_FRAG_NEXT_IP_REWRITE_MIDCHAIN] = "ip6-midchain",
550  [IP_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
551  [IP_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
552  [IP_FRAG_NEXT_ICMP_ERROR] = "error-drop",
553  [IP_FRAG_NEXT_DROP] = "ip6-drop"
554  },
555 };
556 /* *INDENT-ON* */
557 
558 /*
559  * fd.io coding-style-patch-verification: ON
560  *
561  * Local Variables:
562  * eval: (c-set-style "gnu")
563  * End:
564  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
static uword frag_node_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 node_index, bool is_ip6)
Definition: ip_frag.c:248
#define clib_min(x, y)
Definition: clib.h:319
#define CLIB_UNUSED(x)
Definition: clib.h:86
#define foreach_ip_frag_error
Definition: ip_frag.h:60
void ip_frag_set_vnet_buffer(vlib_buffer_t *b, u16 mtu, u8 next_index, u8 flags)
Definition: ip_frag.c:239
#define IP6_FRAG_NODE_NAME
Definition: ip_frag.h:44
#define vnet_buffer2(b)
Definition: buffer.h:482
#define PREDICT_TRUE(x)
Definition: clib.h:119
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:590
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
u16 flags_and_fragment_offset
Definition: ip4_packet.h:151
static void frag_set_sw_if_index(vlib_buffer_t *to, vlib_buffer_t *from)
Definition: ip_frag.c:48
u16 n_fragments
Definition: ip_frag.c:31
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:472
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:402
unsigned char u8
Definition: types.h:56
static u8 * format_ip_frag_trace(u8 *s, va_list *args)
Definition: ip_frag.c:35
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static int ip4_is_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:213
static u32 running_fragment_id
Definition: ip_frag.c:45
vl_api_ip6_address_t ip6
Definition: one.api:424
static uword ip4_frag(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip_frag.c:363
unsigned int u32
Definition: types.h:88
bool is_ip6
Definition: ip.api:43
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define IP4_FRAG_NODE_NAME
Definition: ip_frag.h:43
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define PREDICT_FALSE(x)
Definition: clib.h:118
vl_api_ip4_address_t ip4
Definition: one.api:376
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
vlib_main_t * vm
Definition: in2out_ed.c:1599
vlib_node_registration_t ip6_frag_node
(constructor) VLIB_REGISTER_NODE (ip6_frag_node)
Definition: ip_frag.c:536
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
u8 len
Definition: ip_types.api:92
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
Definition: ip4_packet.h:152
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static void vlib_buffer_copy_trace_flag(vlib_main_t *vm, vlib_buffer_t *b, u32 bi_target)
Definition: trace_funcs.h:178
u32 flags
Definition: vhost_user.h:248
u16 n_vectors
Definition: node.h:399
ip_frag_error_t ip6_frag_do_fragment(vlib_main_t *vm, u32 from_bi, u16 mtu, u16 l2unfragmentablesize, u32 **buffer)
Definition: ip_frag.c:382
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
static u16 ip4_get_fragment_offset(const ip4_header_t *i)
Definition: ip4_packet.h:200
ip_frag_error_t
Definition: ip_frag.h:71
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:380
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
u8 data[]
Packet data.
Definition: buffer.h:181
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1599
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:517
#define ASSERT(truth)
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:301
vlib_node_registration_t ip4_frag_node
(constructor) VLIB_REGISTER_NODE (ip4_frag_node)
Definition: ip_frag.c:513
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:492
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static char * ip4_frag_error_strings[]
Definition: ip_frag.c:506
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
ip_frag_error_t ip4_frag_do_fragment(vlib_main_t *vm, u32 from_bi, u16 mtu, u16 l2unfragmentablesize, u32 **buffer)
Definition: ip_frag.c:94
#define vnet_buffer(b)
Definition: buffer.h:417
static uword ip6_frag(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip_frag.c:370
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1600
#define IP4_HEADER_FLAG_DONT_FRAGMENT
Definition: ip4_packet.h:153
#define ip6_frag_hdr_offset_and_more(offset, more)
Definition: ip6_packet.h:673
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
Definition: buffer_funcs.h:970
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:677
static_always_inline void icmp4_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
Definition: icmp4.h:51
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:247
static vlib_buffer_t * frag_buffer_alloc(vlib_buffer_t *org_b, u32 *bi)
Definition: ip_frag.c:73
Definition: defs.h:46