FD.io VPP  v18.10-34-gcce845e
Vector Packet Processing
ip_frag.c
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------
2  * Copyright (c) 2009-2014 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  *---------------------------------------------------------------------------
15  */
16 /*
17  * IPv4 Fragmentation Node
18  *
19  *
20  */
21 
22 #include "ip_frag.h"
23 
24 #include <vnet/ip/ip.h>
25 
26 
27 typedef struct
28 {
34 
35 static u8 *
36 format_ip_frag_trace (u8 * s, va_list * args)
37 {
38  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
39  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
40  ip_frag_trace_t *t = va_arg (*args, ip_frag_trace_t *);
41  s = format (s, "IPv%s mtu: %u fragments: %u",
42  t->ipv6 ? "6" : "4", t->mtu, t->n_fragments);
43  return s;
44 }
45 
47 
48 static void
50 {
51  vnet_buffer (to)->sw_if_index[VLIB_RX] =
52  vnet_buffer (from)->sw_if_index[VLIB_RX];
53  vnet_buffer (to)->sw_if_index[VLIB_TX] =
54  vnet_buffer (from)->sw_if_index[VLIB_TX];
55 
56  /* Copy adj_index in case DPO based node is sending for the
57  * fragmentation, the packet would be sent back to the proper
58  * DPO next node and Index
59  */
60  vnet_buffer (to)->ip.adj_index[VLIB_RX] =
61  vnet_buffer (from)->ip.adj_index[VLIB_RX];
62  vnet_buffer (to)->ip.adj_index[VLIB_TX] =
63  vnet_buffer (from)->ip.adj_index[VLIB_TX];
64 }
65 
66 static vlib_buffer_t *
68 {
70  if (vlib_buffer_alloc (vm, bi, 1) != 1)
71  return 0;
72 
73  vlib_buffer_t *b = vlib_get_buffer (vm, *bi);
78  vlib_buffer_copy_trace_flag (vm, org_b, *bi);
79 
80  return b;
81 }
82 
83 /*
84  * Limitation: Does follow buffer chains in the packet to fragment,
85  * but does not generate buffer chains. I.e. a fragment is always
86  * contained with in a single buffer and limited to the max buffer
87  * size.
88  */
89 void
90 ip4_frag_do_fragment (vlib_main_t * vm, u32 from_bi, u32 ** buffer,
91  ip_frag_error_t * error)
92 {
93  vlib_buffer_t *from_b;
94  ip4_header_t *ip4;
95  u16 mtu, len, max, rem, ip_frag_id, ip_frag_offset;
96  u8 *org_from_packet, more;
97 
98  from_b = vlib_get_buffer (vm, from_bi);
99  mtu = vnet_buffer (from_b)->ip_frag.mtu;
100  org_from_packet = vlib_buffer_get_current (from_b);
101  ip4 = (ip4_header_t *) vlib_buffer_get_current (from_b);
102 
103  rem = clib_net_to_host_u16 (ip4->length) - sizeof (ip4_header_t);
104  max =
105  (clib_min (mtu, VLIB_BUFFER_DATA_SIZE) - sizeof (ip4_header_t)) & ~0x7;
106 
107  if (rem >
108  (vlib_buffer_length_in_chain (vm, from_b) - sizeof (ip4_header_t)))
109  {
110  *error = IP_FRAG_ERROR_MALFORMED;
111  return;
112  }
113 
114  if (mtu < sizeof (ip4_header_t))
115  {
116  *error = IP_FRAG_ERROR_CANT_FRAGMENT_HEADER;
117  return;
118  }
119 
120  if (ip4->flags_and_fragment_offset &
121  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT))
122  {
123  *error = IP_FRAG_ERROR_DONT_FRAGMENT_SET;
124  return;
125  }
126 
127  if (ip4_is_fragment (ip4))
128  {
129  ip_frag_id = ip4->fragment_id;
130  ip_frag_offset = ip4_get_fragment_offset (ip4);
131  more =
132  !(!(ip4->flags_and_fragment_offset &
133  clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)));
134  }
135  else
136  {
137  ip_frag_id = (++running_fragment_id);
138  ip_frag_offset = 0;
139  more = 0;
140  }
141 
142  u8 *from_data = (void *) (ip4 + 1);
143  vlib_buffer_t *org_from_b = from_b;
144  u16 fo = 0;
145  u16 left_in_from_buffer = from_b->current_length - sizeof (ip4_header_t);
146  u16 ptr = 0;
147 
148  /* Do the actual fragmentation */
149  while (rem)
150  {
151  u32 to_bi;
152  vlib_buffer_t *to_b;
153  ip4_header_t *to_ip4;
154  u8 *to_data;
155 
156  len = (rem > max ? max : rem);
157  if (len != rem) /* Last fragment does not need to divisible by 8 */
158  len &= ~0x7;
159  if ((to_b = frag_buffer_alloc (org_from_b, &to_bi)) == 0)
160  {
161  *error = IP_FRAG_ERROR_MEMORY;
162  return;
163  }
164  vec_add1 (*buffer, to_bi);
165  frag_set_sw_if_index (to_b, org_from_b);
166 
167  /* Copy ip4 header */
168  clib_memcpy (to_b->data, org_from_packet, sizeof (ip4_header_t));
169  to_ip4 = vlib_buffer_get_current (to_b);
170  to_data = (void *) (to_ip4 + 1);
171 
172  /* Spin through from buffers filling up the to buffer */
173  u16 left_in_to_buffer = len, to_ptr = 0;
174  while (1)
175  {
176  u16 bytes_to_copy;
177 
178  /* Figure out how many bytes we can safely copy */
179  bytes_to_copy = left_in_to_buffer <= left_in_from_buffer ?
180  left_in_to_buffer : left_in_from_buffer;
181  clib_memcpy (to_data + to_ptr, from_data + ptr, bytes_to_copy);
182  left_in_to_buffer -= bytes_to_copy;
183  ptr += bytes_to_copy;
184  left_in_from_buffer -= bytes_to_copy;
185  if (left_in_to_buffer == 0)
186  break;
187 
188  ASSERT (left_in_from_buffer <= 0);
189  /* Move buffer */
190  if (!(from_b->flags & VLIB_BUFFER_NEXT_PRESENT))
191  {
192  *error = IP_FRAG_ERROR_MALFORMED;
193  return;
194  }
195  from_b = vlib_get_buffer (vm, from_b->next_buffer);
196  from_data = (u8 *) vlib_buffer_get_current (from_b);
197  ptr = 0;
198  left_in_from_buffer = from_b->current_length;
199  to_ptr += bytes_to_copy;
200  }
201 
202  to_b->current_length = len + sizeof (ip4_header_t);
203 
204  to_ip4->fragment_id = ip_frag_id;
205  to_ip4->flags_and_fragment_offset =
206  clib_host_to_net_u16 ((fo >> 3) + ip_frag_offset);
207  to_ip4->flags_and_fragment_offset |=
208  clib_host_to_net_u16 (((len != rem) || more) << 13);
209  to_ip4->length = clib_host_to_net_u16 (len + sizeof (ip4_header_t));
210  to_ip4->checksum = ip4_header_checksum (to_ip4);
211 
212  if (vnet_buffer (org_from_b)->ip_frag.flags & IP_FRAG_FLAG_IP4_HEADER)
213  {
214  /* Encapsulating ipv4 header */
215  ip4_header_t *encap_header4 =
217  encap_header4->length = clib_host_to_net_u16 (to_b->current_length);
218  encap_header4->checksum = ip4_header_checksum (encap_header4);
219  }
220  else if (vnet_buffer (org_from_b)->
221  ip_frag.flags & IP_FRAG_FLAG_IP6_HEADER)
222  {
223  /* Encapsulating ipv6 header */
224  ip6_header_t *encap_header6 =
226  encap_header6->payload_length =
227  clib_host_to_net_u16 (to_b->current_length -
228  sizeof (*encap_header6));
229  }
230  rem -= len;
231  fo += len;
232  }
233 }
234 
235 void
237 {
238  vnet_buffer (b)->ip_frag.mtu = mtu;
239  vnet_buffer (b)->ip_frag.next_index = next_index;
240  vnet_buffer (b)->ip_frag.flags = flags;
241 }
242 
243 
244 static inline uword
246  vlib_frame_t * frame, u32 node_index, bool is_ip6)
247 {
248  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
249  vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, node_index);
250  from = vlib_frame_vector_args (frame);
251  n_left_from = frame->n_vectors;
252  next_index = node->cached_next_index;
253  u32 frag_sent = 0, small_packets = 0;
254  u32 *buffer = 0;
255 
256  while (n_left_from > 0)
257  {
258  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
259 
260  while (n_left_from > 0 && n_left_to_next > 0)
261  {
262  u32 pi0, *frag_from, frag_left;
263  vlib_buffer_t *p0;
264  ip_frag_error_t error0;
265  int next0;
266 
267  /*
268  * Note: The packet is not enqueued now. It is instead put
269  * in a vector where other fragments will be put as well.
270  */
271  pi0 = from[0];
272  from += 1;
273  n_left_from -= 1;
274  error0 = IP_FRAG_ERROR_NONE;
275 
276  p0 = vlib_get_buffer (vm, pi0);
277  if (is_ip6)
278  ip6_frag_do_fragment (vm, pi0, &buffer, &error0);
279  else
280  ip4_frag_do_fragment (vm, pi0, &buffer, &error0);
281 
282  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
283  {
284  ip_frag_trace_t *tr =
285  vlib_add_trace (vm, node, p0, sizeof (*tr));
286  tr->mtu = vnet_buffer (p0)->ip_frag.mtu;
287  tr->ipv6 = is_ip6 ? 1 : 0;
288  tr->n_fragments = vec_len (buffer);
289  tr->next = vnet_buffer (p0)->ip_frag.next_index;
290  }
291 
292  if (!is_ip6 && error0 == IP_FRAG_ERROR_DONT_FRAGMENT_SET)
293  {
294  icmp4_error_set_vnet_buffer (p0, ICMP4_destination_unreachable,
295  ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
296  vnet_buffer (p0)->ip_frag.mtu);
297  next0 = IP4_FRAG_NEXT_ICMP_ERROR;
298  }
299  else
300  {
301  if (is_ip6)
302  next0 =
303  (error0 ==
304  IP_FRAG_ERROR_NONE) ? vnet_buffer (p0)->
305  ip_frag.next_index : IP6_FRAG_NEXT_DROP;
306  else
307  next0 =
308  (error0 ==
309  IP_FRAG_ERROR_NONE) ? vnet_buffer (p0)->
310  ip_frag.next_index : IP4_FRAG_NEXT_DROP;
311  }
312 
313  if (error0 == IP_FRAG_ERROR_NONE)
314  {
315  /* Free original buffer chain */
316  frag_sent += vec_len (buffer);
317  small_packets += (vec_len (buffer) == 1);
318  vlib_buffer_free_one (vm, pi0); /* Free original packet */
319  }
320  else
321  {
322  vlib_error_count (vm, node_index, error0, 1);
323  vec_add1 (buffer, pi0); /* Get rid of the original buffer */
324  }
325 
326  /* Send fragments that were added in the frame */
327  frag_from = buffer;
328  frag_left = vec_len (buffer);
329 
330  while (frag_left > 0)
331  {
332  while (frag_left > 0 && n_left_to_next > 0)
333  {
334  u32 i;
335  i = to_next[0] = frag_from[0];
336  frag_from += 1;
337  frag_left -= 1;
338  to_next += 1;
339  n_left_to_next -= 1;
340 
341  vlib_get_buffer (vm, i)->error = error_node->errors[error0];
342  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
343  to_next, n_left_to_next, i,
344  next0);
345  }
346  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
347  vlib_get_next_frame (vm, node, next_index, to_next,
348  n_left_to_next);
349  }
350  vec_reset_length (buffer);
351  }
352  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
353  }
354  vec_free (buffer);
355 
356  vlib_node_increment_counter (vm, node_index,
357  IP_FRAG_ERROR_FRAGMENT_SENT, frag_sent);
358  vlib_node_increment_counter (vm, node_index,
359  IP_FRAG_ERROR_SMALL_PACKET, small_packets);
360 
361  return frame->n_vectors;
362 }
363 
364 
365 
366 static uword
368 {
369  return frag_node_inline (vm, node, frame, ip4_frag_node.index,
370  0 /* is_ip6 */ );
371 }
372 
373 static uword
375 {
376  return frag_node_inline (vm, node, frame, ip6_frag_node.index,
377  1 /* is_ip6 */ );
378 }
379 
380 /*
381  * Fragments the packet given in from_bi. Fragments are returned in the buffer vector.
382  * Caller must ensure the original packet is freed.
383  */
384 void
385 ip6_frag_do_fragment (vlib_main_t * vm, u32 from_bi, u32 ** buffer,
386  ip_frag_error_t * error)
387 {
388  vlib_buffer_t *from_b;
389  ip6_header_t *ip6;
390  u16 mtu, len, max, rem, ip_frag_id;
391 
392  from_b = vlib_get_buffer (vm, from_bi);
393  mtu = vnet_buffer (from_b)->ip_frag.mtu;
394  ip6 = (ip6_header_t *) vlib_buffer_get_current (from_b);
395 
396  rem = clib_net_to_host_u16 (ip6->payload_length);
397  max = (mtu - sizeof (ip6_header_t) - sizeof (ip6_frag_hdr_t)) & ~0x7; // TODO: Is max correct??
398 
399  if (rem >
400  (vlib_buffer_length_in_chain (vm, from_b) - sizeof (ip6_header_t)))
401  {
402  *error = IP_FRAG_ERROR_MALFORMED;
403  return;
404  }
405 
406  /* TODO: Look through header chain for fragmentation header */
407  if (ip6->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
408  {
409  *error = IP_FRAG_ERROR_MALFORMED;
410  return;
411  }
412 
413  u8 *from_data = (void *) (ip6 + 1);
414  vlib_buffer_t *org_from_b = from_b;
415  u16 fo = 0;
416  u16 left_in_from_buffer = from_b->current_length - sizeof (ip6_header_t);
417  u16 ptr = 0;
418 
419  ip_frag_id = ++running_fragment_id; // Fix
420 
421  /* Do the actual fragmentation */
422  while (rem)
423  {
424  u32 to_bi;
425  vlib_buffer_t *to_b;
426  ip6_header_t *to_ip6;
427  ip6_frag_hdr_t *to_frag_hdr;
428  u8 *to_data;
429 
430  len =
431  (rem >
432  (mtu - sizeof (ip6_header_t) - sizeof (ip6_frag_hdr_t)) ? max : rem);
433  if (len != rem) /* Last fragment does not need to divisible by 8 */
434  len &= ~0x7;
435  if ((to_b = frag_buffer_alloc (org_from_b, &to_bi)) == 0)
436  {
437  *error = IP_FRAG_ERROR_MEMORY;
438  return;
439  }
440  vec_add1 (*buffer, to_bi);
441  frag_set_sw_if_index (to_b, org_from_b);
442 
443  /* Copy ip6 header */
444  clib_memcpy (to_b->data, ip6, sizeof (ip6_header_t));
445  to_ip6 = vlib_buffer_get_current (to_b);
446  to_frag_hdr = (ip6_frag_hdr_t *) (to_ip6 + 1);
447  to_data = (void *) (to_frag_hdr + 1);
448 
449  /* Spin through from buffers filling up the to buffer */
450  u16 left_in_to_buffer = len, to_ptr = 0;
451  while (1)
452  {
453  u16 bytes_to_copy;
454 
455  /* Figure out how many bytes we can safely copy */
456  bytes_to_copy = left_in_to_buffer <= left_in_from_buffer ?
457  left_in_to_buffer : left_in_from_buffer;
458  clib_memcpy (to_data + to_ptr, from_data + ptr, bytes_to_copy);
459  left_in_to_buffer -= bytes_to_copy;
460  ptr += bytes_to_copy;
461  left_in_from_buffer -= bytes_to_copy;
462  if (left_in_to_buffer == 0)
463  break;
464 
465  ASSERT (left_in_from_buffer <= 0);
466  /* Move buffer */
467  if (!(from_b->flags & VLIB_BUFFER_NEXT_PRESENT))
468  {
469  *error = IP_FRAG_ERROR_MALFORMED;
470  return;
471  }
472  from_b = vlib_get_buffer (vm, from_b->next_buffer);
473  from_data = (u8 *) vlib_buffer_get_current (from_b);
474  ptr = 0;
475  left_in_from_buffer = from_b->current_length;
476  to_ptr += bytes_to_copy;
477  }
478 
479  to_b->current_length =
480  len + sizeof (ip6_header_t) + sizeof (ip6_frag_hdr_t);
481  to_ip6->payload_length =
482  clib_host_to_net_u16 (len + sizeof (ip6_frag_hdr_t));
483  to_ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
484  to_frag_hdr->fragment_offset_and_more =
485  ip6_frag_hdr_offset_and_more ((fo >> 3), len != rem);
486  to_frag_hdr->identification = ip_frag_id;
487  to_frag_hdr->next_hdr = ip6->protocol;
488  to_frag_hdr->rsv = 0;
489 
490  rem -= len;
491  fo += len;
492  }
493 }
494 
495 static char *ip4_frag_error_strings[] = {
496 #define _(sym,string) string,
498 #undef _
499 };
500 
501 /* *INDENT-OFF* */
503  .function = ip4_frag,
504  .name = IP4_FRAG_NODE_NAME,
505  .vector_size = sizeof (u32),
506  .format_trace = format_ip_frag_trace,
507  .type = VLIB_NODE_TYPE_INTERNAL,
508 
509  .n_errors = IP_FRAG_N_ERROR,
510  .error_strings = ip4_frag_error_strings,
511 
512  .n_next_nodes = IP4_FRAG_N_NEXT,
513  .next_nodes = {
514  [IP4_FRAG_NEXT_IP4_REWRITE] = "ip4-rewrite",
515  [IP4_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
516  [IP4_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
517  [IP4_FRAG_NEXT_ICMP_ERROR] = "ip4-icmp-error",
518  [IP4_FRAG_NEXT_DROP] = "ip4-drop"
519  },
520 };
521 /* *INDENT-ON* */
522 
523 /* *INDENT-OFF* */
525  .function = ip6_frag,
526  .name = IP6_FRAG_NODE_NAME,
527  .vector_size = sizeof (u32),
528  .format_trace = format_ip_frag_trace,
529  .type = VLIB_NODE_TYPE_INTERNAL,
530 
531  .n_errors = IP_FRAG_N_ERROR,
532  .error_strings = ip4_frag_error_strings,
533 
534  .n_next_nodes = IP6_FRAG_N_NEXT,
535  .next_nodes = {
536  [IP6_FRAG_NEXT_IP6_REWRITE] = "ip6-rewrite",
537  [IP6_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
538  [IP6_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
539  [IP6_FRAG_NEXT_DROP] = "ip6-drop"
540  },
541 };
542 /* *INDENT-ON* */
543 
544 /*
545  * fd.io coding-style-patch-verification: ON
546  *
547  * Local Variables:
548  * eval: (c-set-style "gnu")
549  * End:
550  */
static uword frag_node_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 node_index, bool is_ip6)
Definition: ip_frag.c:245
#define clib_min(x, y)
Definition: clib.h:291
#define CLIB_UNUSED(x)
Definition: clib.h:81
#define foreach_ip_frag_error
Definition: ip_frag.h:68
void ip_frag_set_vnet_buffer(vlib_buffer_t *b, u16 mtu, u8 next_index, u8 flags)
Definition: ip_frag.c:236
#define IP6_FRAG_NODE_NAME
Definition: ip_frag.h:44
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:523
int i
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
u16 flags_and_fragment_offset
Definition: ip4_packet.h:150
static void frag_set_sw_if_index(vlib_buffer_t *to, vlib_buffer_t *from)
Definition: ip_frag.c:49
u16 n_fragments
Definition: ip_frag.c:32
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:472
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:263
unsigned char u8
Definition: types.h:56
static u8 * format_ip_frag_trace(u8 *s, va_list *args)
Definition: ip_frag.c:36
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static int ip4_is_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:212
static u32 running_fragment_id
Definition: ip_frag.c:46
void ip4_frag_do_fragment(vlib_main_t *vm, u32 from_bi, u32 **buffer, ip_frag_error_t *error)
Definition: ip_frag.c:90
static uword ip4_frag(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip_frag.c:367
void ip6_frag_do_fragment(vlib_main_t *vm, u32 from_bi, u32 **buffer, ip_frag_error_t *error)
Definition: ip_frag.c:385
unsigned int u32
Definition: types.h:88
#define fl(x, y)
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
#define IP4_FRAG_NODE_NAME
Definition: ip_frag.h:43
unsigned short u16
Definition: types.h:57
static int ip4_get_fragment_offset(const ip4_header_t *i)
Definition: ip4_packet.h:199
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:205
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX
Definition: buffer.h:440
#define PREDICT_FALSE(x)
Definition: clib.h:107
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
vlib_node_registration_t ip6_frag_node
(constructor) VLIB_REGISTER_NODE (ip6_frag_node)
Definition: ip_frag.c:524
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:138
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1176
u32 flags
Definition: vhost_user.h:115
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
Definition: ip4_packet.h:151
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:155
static void vlib_buffer_copy_trace_flag(vlib_main_t *vm, vlib_buffer_t *b, u32 bi_target)
Definition: trace_funcs.h:147
u16 n_vectors
Definition: node.h:401
vlib_main_t * vm
Definition: buffer.c:294
ip_frag_error_t
Definition: ip_frag.h:79
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:339
void icmp4_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
Definition: icmp4.c:431
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
#define clib_memcpy(a, b, c)
Definition: string.h:75
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:455
#define VLIB_BUFFER_DATA_SIZE
Definition: buffer.h:51
#define IP_FRAG_FLAG_IP6_HEADER
Definition: ip_frag.h:41
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:513
#define ASSERT(truth)
#define IP_FRAG_FLAG_IP4_HEADER
Definition: ip_frag.h:40
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:129
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:369
vlib_node_registration_t ip4_frag_node
(constructor) VLIB_REGISTER_NODE (ip4_frag_node)
Definition: ip_frag.c:502
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:547
u64 uword
Definition: types.h:112
static char * ip4_frag_error_strings[]
Definition: ip_frag.c:495
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
static void vlib_buffer_init_for_free_list(vlib_buffer_t *dst, vlib_buffer_free_list_t *fl)
#define vnet_buffer(b)
Definition: buffer.h:344
u8 data[0]
Packet data.
Definition: buffer.h:175
static uword ip6_frag(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip_frag.c:374
#define IP4_HEADER_FLAG_DONT_FRAGMENT
Definition: ip4_packet.h:152
#define ip6_frag_hdr_offset_and_more(offset, more)
Definition: ip6_packet.h:582
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
Definition: buffer_funcs.h:588
static vlib_buffer_free_list_t * vlib_buffer_get_free_list(vlib_main_t *vm, vlib_buffer_free_list_index_t free_list_index)
Definition: buffer_funcs.h:675
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:116
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:503
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:58
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:246
static vlib_buffer_t * frag_buffer_alloc(vlib_buffer_t *org_b, u32 *bi)
Definition: ip_frag.c:67
Definition: defs.h:46