FD.io VPP  v19.08.3-2-gbabecb413
Vector Packet Processing
nat44_classify.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief Classify for one armed NAT44 (in+out interface)
18  */
19 
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/fib/ip4_fib.h>
23 #include <nat/nat.h>
24 #include <nat/nat_reass.h>
25 #include <nat/nat_inlines.h>
26 
27 #define foreach_nat44_classify_error \
28 _(MAX_REASS, "Maximum reassemblies exceeded") \
29 _(MAX_FRAG, "Maximum fragments per reassembly exceeded") \
30 _(NEXT_IN2OUT, "next in2out") \
31 _(NEXT_OUT2IN, "next out2in") \
32 _(FRAG_CACHED, "fragment cached")
33 
34 typedef enum
35 {
36 #define _(sym,str) NAT44_CLASSIFY_ERROR_##sym,
38 #undef _
41 
42 static char *nat44_classify_error_strings[] = {
43 #define _(sym,string) string,
45 #undef _
46 };
47 
48 typedef enum
49 {
55 
56 typedef struct
57 {
61 
62 static u8 *
63 format_nat44_classify_trace (u8 * s, va_list * args)
64 {
65  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
66  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
67  nat44_classify_trace_t *t = va_arg (*args, nat44_classify_trace_t *);
68  char *next;
69 
70  if (t->cached)
71  s = format (s, "nat44-classify: fragment cached");
72  else
73  {
74  next = t->next_in2out ? "nat44-in2out" : "nat44-out2in";
75  s = format (s, "nat44-classify: next %s", next);
76  }
77 
78  return s;
79 }
80 
81 static inline uword
83  vlib_node_runtime_t * node,
84  vlib_frame_t * frame)
85 {
86  u32 n_left_from, *from, *to_next;
87  nat44_classify_next_t next_index;
88  snat_main_t *sm = &snat_main;
90  u32 thread_index = vm->thread_index;
91  u32 *fragments_to_drop = 0;
92  u32 *fragments_to_loopback = 0;
93  u32 next_in2out = 0, next_out2in = 0, frag_cached = 0;
94 
95  from = vlib_frame_vector_args (frame);
96  n_left_from = frame->n_vectors;
97  next_index = node->cached_next_index;
98 
99  while (n_left_from > 0)
100  {
101  u32 n_left_to_next;
102 
103  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
104 
105  while (n_left_from > 0 && n_left_to_next > 0)
106  {
107  u32 bi0;
108  vlib_buffer_t *b0;
110  ip4_header_t *ip0;
111  snat_address_t *ap;
112  snat_session_key_t m_key0;
113  clib_bihash_kv_8_8_t kv0, value0;
114  udp_header_t *udp0;
115  nat_reass_ip4_t *reass0;
116  u8 cached0 = 0;
117 
118  /* speculatively enqueue b0 to the current next frame */
119  bi0 = from[0];
120  to_next[0] = bi0;
121  from += 1;
122  to_next += 1;
123  n_left_from -= 1;
124  n_left_to_next -= 1;
125 
126  b0 = vlib_get_buffer (vm, bi0);
127  ip0 = vlib_buffer_get_current (b0);
128  udp0 = ip4_next_header (ip0);
129 
130  /* *INDENT-OFF* */
131  vec_foreach (ap, sm->addresses)
132  {
133  if (ip0->dst_address.as_u32 == ap->addr.as_u32)
134  {
136  goto enqueue0;
137  }
138  }
139  /* *INDENT-ON* */
140 
142  {
143  m_key0.addr = ip0->dst_address;
144  m_key0.port = 0;
145  m_key0.protocol = 0;
146  m_key0.fib_index = 0;
147  kv0.key = m_key0.as_u64;
148  /* try to classify the fragment based on IP header alone */
149  if (!clib_bihash_search_8_8 (&sm->static_mapping_by_external,
150  &kv0, &value0))
151  {
152  m = pool_elt_at_index (sm->static_mappings, value0.value);
153  if (m->local_addr.as_u32 != m->external_addr.as_u32)
155  goto enqueue0;
156  }
157  if (!ip4_is_fragment (ip0) || ip4_is_first_fragment (ip0))
158  {
159  /* process leading fragment/whole packet (with L4 header) */
160  m_key0.port = clib_net_to_host_u16 (udp0->dst_port);
161  m_key0.protocol = ip_proto_to_snat_proto (ip0->protocol);
162  kv0.key = m_key0.as_u64;
163  if (!clib_bihash_search_8_8
164  (&sm->static_mapping_by_external, &kv0, &value0))
165  {
166  m =
168  if (m->local_addr.as_u32 != m->external_addr.as_u32)
170  }
171  if (ip4_is_fragment (ip0))
172  {
174  ip0->dst_address,
175  ip0->fragment_id,
176  ip0->protocol,
177  1,
178  &fragments_to_drop);
179  if (PREDICT_FALSE (!reass0))
180  {
181  next0 = NAT44_CLASSIFY_NEXT_DROP;
182  b0->error =
183  node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
184  nat_elog_notice ("maximum reassemblies exceeded");
185  goto enqueue0;
186  }
187  /* save classification for future fragments and set past
188  * fragments to be looped over and reprocessed */
189  if (next0 == NAT44_CLASSIFY_NEXT_OUT2IN)
190  reass0->classify_next =
192  else
193  reass0->classify_next =
195  nat_ip4_reass_get_frags (reass0,
196  &fragments_to_loopback);
197  }
198  }
199  else
200  {
201  /* process non-first fragment */
203  ip0->dst_address,
204  ip0->fragment_id,
205  ip0->protocol,
206  1,
207  &fragments_to_drop);
208  if (PREDICT_FALSE (!reass0))
209  {
210  next0 = NAT44_CLASSIFY_NEXT_DROP;
211  b0->error =
212  node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
213  nat_elog_notice ("maximum reassemblies exceeded");
214  goto enqueue0;
215  }
216  if (reass0->classify_next == NAT_REASS_IP4_CLASSIFY_NONE)
217  /* first fragment still hasn't arrived */
218  {
220  (thread_index, reass0, bi0, &fragments_to_drop))
221  {
222  b0->error =
223  node->errors[NAT44_CLASSIFY_ERROR_MAX_FRAG];
225  ("maximum fragments per reassembly exceeded");
226  next0 = NAT44_CLASSIFY_NEXT_DROP;
227  goto enqueue0;
228  }
229  cached0 = 1;
230  goto enqueue0;
231  }
232  else if (reass0->classify_next ==
235  else if (reass0->classify_next ==
238  }
239  }
240 
241  enqueue0:
243  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
244  {
246  vlib_add_trace (vm, node, b0, sizeof (*t));
247  t->cached = cached0;
248  if (!cached0)
249  t->next_in2out = next0 == NAT44_CLASSIFY_NEXT_IN2OUT ? 1 : 0;
250  }
251 
252  if (cached0)
253  {
254  n_left_to_next++;
255  to_next--;
256  frag_cached++;
257  }
258  else
259  {
260  next_in2out += next0 == NAT44_CLASSIFY_NEXT_IN2OUT;
261  next_out2in += next0 == NAT44_CLASSIFY_NEXT_OUT2IN;
262 
263  /* verify speculative enqueue, maybe switch current next frame */
264  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
265  to_next, n_left_to_next,
266  bi0, next0);
267  }
268 
269  if (n_left_from == 0 && vec_len (fragments_to_loopback))
270  {
271  from = vlib_frame_vector_args (frame);
272  u32 len = vec_len (fragments_to_loopback);
273  if (len <= VLIB_FRAME_SIZE)
274  {
275  clib_memcpy_fast (from, fragments_to_loopback,
276  sizeof (u32) * len);
277  n_left_from = len;
278  vec_reset_length (fragments_to_loopback);
279  }
280  else
281  {
282  clib_memcpy_fast (from, fragments_to_loopback +
283  (len - VLIB_FRAME_SIZE),
284  sizeof (u32) * VLIB_FRAME_SIZE);
285  n_left_from = VLIB_FRAME_SIZE;
286  _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
287  }
288  }
289  }
290 
291  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
292  }
293 
294  nat_send_all_to_node (vm, fragments_to_drop, node, 0,
296 
297  vec_free (fragments_to_drop);
298 
300  NAT44_CLASSIFY_ERROR_NEXT_IN2OUT, next_in2out);
302  NAT44_CLASSIFY_ERROR_NEXT_OUT2IN, next_out2in);
304  NAT44_CLASSIFY_ERROR_FRAG_CACHED, frag_cached);
305 
306  return frame->n_vectors;
307 }
308 
309 static inline uword
311  vlib_node_runtime_t * node,
312  vlib_frame_t * frame)
313 {
314  u32 n_left_from, *from, *to_next;
315  nat44_classify_next_t next_index;
316  snat_main_t *sm = &snat_main;
318  u32 thread_index = vm->thread_index;
319  snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
320  u32 *fragments_to_drop = 0;
321  u32 *fragments_to_loopback = 0;
322  u32 next_in2out = 0, next_out2in = 0, frag_cached = 0;
323  u8 in_loopback = 0;
324 
325  from = vlib_frame_vector_args (frame);
326  n_left_from = frame->n_vectors;
327  next_index = node->cached_next_index;
328 
329  while (n_left_from > 0)
330  {
331  u32 n_left_to_next;
332 
333  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
334 
335  while (n_left_from > 0 && n_left_to_next > 0)
336  {
337  u32 bi0;
338  vlib_buffer_t *b0;
339  u32 next0 =
340  NAT_NEXT_IN2OUT_ED_FAST_PATH, sw_if_index0, rx_fib_index0;
341  ip4_header_t *ip0;
342  snat_address_t *ap;
343  snat_session_key_t m_key0;
344  clib_bihash_kv_8_8_t kv0, value0;
345  clib_bihash_kv_16_8_t ed_kv0, ed_value0;
346  udp_header_t *udp0;
347  nat_reass_ip4_t *reass0;
348  u8 cached0 = 0;
349 
350  /* speculatively enqueue b0 to the current next frame */
351  bi0 = from[0];
352  to_next[0] = bi0;
353  from += 1;
354  to_next += 1;
355  n_left_from -= 1;
356  n_left_to_next -= 1;
357 
358  b0 = vlib_get_buffer (vm, bi0);
359  ip0 = vlib_buffer_get_current (b0);
360  udp0 = ip4_next_header (ip0);
361 
362  if (!in_loopback)
363  {
364  u32 arc_next = 0;
365 
366  vnet_feature_next (&arc_next, b0);
367  nat_buffer_opaque (b0)->arc_next = arc_next;
368  }
369 
370  if (ip0->protocol != IP_PROTOCOL_ICMP)
371  {
372  if (!ip4_is_fragment (ip0) || ip4_is_first_fragment (ip0))
373  {
374  /* process leading fragment/whole packet (with L4 header) */
375  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
376  rx_fib_index0 =
378  sw_if_index0);
379  make_ed_kv (&ed_kv0, &ip0->src_address,
380  &ip0->dst_address, ip0->protocol,
381  rx_fib_index0, udp0->src_port, udp0->dst_port);
382  if (ip4_is_fragment (ip0))
383  {
384  reass0 =
386  ip0->dst_address,
387  ip0->fragment_id,
388  ip0->protocol, 1,
389  &fragments_to_drop);
390  if (PREDICT_FALSE (!reass0))
391  {
392  next0 = NAT_NEXT_DROP;
393  b0->error =
394  node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
395  nat_elog_notice ("maximum reassemblies exceeded");
396  goto enqueue0;
397  }
398  if (!clib_bihash_search_16_8
399  (&tsm->in2out_ed, &ed_kv0, &ed_value0))
400  {
401  /* session exists so classify as IN2OUT,
402  * save this information for future fragments and set
403  * past fragments to be looped over and reprocessed */
404  reass0->sess_index = ed_value0.value;
405  reass0->classify_next =
407  nat_ip4_reass_get_frags (reass0,
408  &fragments_to_loopback);
409  goto enqueue0;
410  }
411  else
412  {
413  /* session doesn't exist so continue in the code,
414  * save this information for future fragments and set
415  * past fragments to be looped over and reprocessed */
416  reass0->flags |=
418  nat_ip4_reass_get_frags (reass0,
419  &fragments_to_loopback);
420  }
421  }
422  else
423  {
424  /* process whole packet */
425  if (!clib_bihash_search_16_8
426  (&tsm->in2out_ed, &ed_kv0, &ed_value0))
427  goto enqueue0;
428  /* session doesn't exist so continue in code */
429  }
430  }
431  else
432  {
433  /* process non-first fragment */
435  ip0->dst_address,
436  ip0->fragment_id,
437  ip0->protocol,
438  1,
439  &fragments_to_drop);
440  if (PREDICT_FALSE (!reass0))
441  {
442  next0 = NAT_NEXT_DROP;
443  b0->error =
444  node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
445  nat_elog_notice ("maximum reassemblies exceeded");
446  goto enqueue0;
447  }
448  /* check if first fragment has arrived */
449  if (reass0->classify_next == NAT_REASS_IP4_CLASSIFY_NONE
450  && !(reass0->flags &
452  {
453  /* first fragment still hasn't arrived, cache this fragment */
455  (thread_index, reass0, bi0, &fragments_to_drop))
456  {
457  b0->error =
458  node->errors[NAT44_CLASSIFY_ERROR_MAX_FRAG];
460  ("maximum fragments per reassembly exceeded");
461  next0 = NAT_NEXT_DROP;
462  goto enqueue0;
463  }
464  cached0 = 1;
465  goto enqueue0;
466  }
467  if (reass0->classify_next ==
469  goto enqueue0;
470  /* flag NAT_REASS_FLAG_CLASSIFY_ED_CONTINUE is set
471  * so keep the default next0 and continue in code to
472  * potentially find other classification for this packet */
473  }
474  }
475 
476  /* *INDENT-OFF* */
477  vec_foreach (ap, sm->addresses)
478  {
479  if (ip0->dst_address.as_u32 == ap->addr.as_u32)
480  {
482  goto enqueue0;
483  }
484  }
485  /* *INDENT-ON* */
486 
488  {
489  m_key0.addr = ip0->dst_address;
490  m_key0.port = 0;
491  m_key0.protocol = 0;
492  m_key0.fib_index = 0;
493  kv0.key = m_key0.as_u64;
494  /* try to classify the fragment based on IP header alone */
495  if (!clib_bihash_search_8_8 (&sm->static_mapping_by_external,
496  &kv0, &value0))
497  {
498  m = pool_elt_at_index (sm->static_mappings, value0.value);
499  if (m->local_addr.as_u32 != m->external_addr.as_u32)
501  goto enqueue0;
502  }
503  if (!ip4_is_fragment (ip0) || ip4_is_first_fragment (ip0))
504  {
505  /* process leading fragment/whole packet (with L4 header) */
506  m_key0.port = clib_net_to_host_u16 (udp0->dst_port);
507  m_key0.protocol = ip_proto_to_snat_proto (ip0->protocol);
508  kv0.key = m_key0.as_u64;
509  if (!clib_bihash_search_8_8
510  (&sm->static_mapping_by_external, &kv0, &value0))
511  {
512  m =
514  if (m->local_addr.as_u32 != m->external_addr.as_u32)
516  }
517  if (ip4_is_fragment (ip0))
518  {
520  ip0->dst_address,
521  ip0->fragment_id,
522  ip0->protocol,
523  1,
524  &fragments_to_drop);
525  if (PREDICT_FALSE (!reass0))
526  {
527  next0 = NAT_NEXT_DROP;
528  b0->error =
529  node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
530  nat_elog_notice ("maximum reassemblies exceeded");
531  goto enqueue0;
532  }
533  /* save classification for future fragments and set past
534  * fragments to be looped over and reprocessed */
535  if (next0 == NAT_NEXT_OUT2IN_ED_FAST_PATH)
536  reass0->classify_next = NAT_NEXT_OUT2IN_ED_REASS;
537  else
538  reass0->classify_next = NAT_NEXT_IN2OUT_ED_REASS;
539  nat_ip4_reass_get_frags (reass0,
540  &fragments_to_loopback);
541  }
542  }
543  else
544  {
545  /* process non-first fragment */
547  ip0->dst_address,
548  ip0->fragment_id,
549  ip0->protocol,
550  1,
551  &fragments_to_drop);
552  if (PREDICT_FALSE (!reass0))
553  {
554  next0 = NAT_NEXT_DROP;
555  b0->error =
556  node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
557  nat_elog_notice ("maximum reassemblies exceeded");
558  goto enqueue0;
559  }
560  if (reass0->classify_next == NAT_REASS_IP4_CLASSIFY_NONE)
561  /* first fragment still hasn't arrived */
562  {
564  (thread_index, reass0, bi0, &fragments_to_drop))
565  {
566  b0->error =
567  node->errors[NAT44_CLASSIFY_ERROR_MAX_FRAG];
569  ("maximum fragments per reassembly exceeded");
570  next0 = NAT_NEXT_DROP;
571  goto enqueue0;
572  }
573  cached0 = 1;
574  goto enqueue0;
575  }
576  else if (reass0->classify_next ==
579  else if (reass0->classify_next ==
582  }
583  }
584 
585  enqueue0:
587  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
588  {
590  vlib_add_trace (vm, node, b0, sizeof (*t));
591  t->cached = cached0;
592  if (!cached0)
593  t->next_in2out =
594  next0 == NAT_NEXT_IN2OUT_ED_FAST_PATH ? 1 : 0;
595  }
596 
597  if (cached0)
598  {
599  n_left_to_next++;
600  to_next--;
601  frag_cached++;
602  }
603  else
604  {
605  next_in2out += next0 == NAT_NEXT_IN2OUT_ED_FAST_PATH;
606  next_out2in += next0 == NAT_NEXT_OUT2IN_ED_FAST_PATH;
607 
608  /* verify speculative enqueue, maybe switch current next frame */
609  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
610  to_next, n_left_to_next,
611  bi0, next0);
612  }
613 
614  if (n_left_from == 0 && vec_len (fragments_to_loopback))
615  {
616  in_loopback = 1;
617  from = vlib_frame_vector_args (frame);
618  u32 len = vec_len (fragments_to_loopback);
619  if (len <= VLIB_FRAME_SIZE)
620  {
621  clib_memcpy_fast (from, fragments_to_loopback,
622  sizeof (u32) * len);
623  n_left_from = len;
624  vec_reset_length (fragments_to_loopback);
625  }
626  else
627  {
628  clib_memcpy_fast (from, fragments_to_loopback +
629  (len - VLIB_FRAME_SIZE),
630  sizeof (u32) * VLIB_FRAME_SIZE);
631  n_left_from = VLIB_FRAME_SIZE;
632  _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
633  }
634  }
635  }
636 
637  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
638  }
639 
640  nat_send_all_to_node (vm, fragments_to_drop, node, 0,
642 
643  vec_free (fragments_to_drop);
644 
646  NAT44_CLASSIFY_ERROR_NEXT_IN2OUT, next_in2out);
648  NAT44_CLASSIFY_ERROR_NEXT_OUT2IN, next_out2in);
650  NAT44_CLASSIFY_ERROR_FRAG_CACHED, frag_cached);
651 
652  return frame->n_vectors;
653 }
654 
656  vlib_node_runtime_t * node,
657  vlib_frame_t * frame)
658 {
659  return nat44_classify_node_fn_inline (vm, node, frame);
660 }
661 
662 /* *INDENT-OFF* */
664  .name = "nat44-classify",
665  .vector_size = sizeof (u32),
666  .format_trace = format_nat44_classify_trace,
669  .error_strings = nat44_classify_error_strings,
670  .n_next_nodes = NAT44_CLASSIFY_N_NEXT,
671  .next_nodes = {
672  [NAT44_CLASSIFY_NEXT_IN2OUT] = "nat44-in2out",
673  [NAT44_CLASSIFY_NEXT_OUT2IN] = "nat44-out2in",
674  [NAT44_CLASSIFY_NEXT_DROP] = "error-drop",
675  },
676 };
677 /* *INDENT-ON* */
678 
680  vlib_node_runtime_t * node,
681  vlib_frame_t * frame)
682 {
683  return nat44_ed_classify_node_fn_inline (vm, node, frame);
684 }
685 
686 /* *INDENT-OFF* */
688  .name = "nat44-ed-classify",
689  .vector_size = sizeof (u32),
690  .sibling_of = "nat-default",
691  .format_trace = format_nat44_classify_trace,
693 };
694 /* *INDENT-ON* */
695 
697  vlib_node_runtime_t * node,
698  vlib_frame_t * frame)
699 {
700  return nat44_classify_node_fn_inline (vm, node, frame);
701 }
702 
703 /* *INDENT-OFF* */
705  .name = "nat44-det-classify",
706  .vector_size = sizeof (u32),
707  .format_trace = format_nat44_classify_trace,
709  .n_next_nodes = NAT44_CLASSIFY_N_NEXT,
710  .next_nodes = {
711  [NAT44_CLASSIFY_NEXT_IN2OUT] = "nat44-det-in2out",
712  [NAT44_CLASSIFY_NEXT_OUT2IN] = "nat44-det-out2in",
713  [NAT44_CLASSIFY_NEXT_DROP] = "error-drop",
714  },
715 };
716 /* *INDENT-ON* */
717 
719  vlib_node_runtime_t * node,
720  vlib_frame_t * frame)
721 {
722  return nat44_classify_node_fn_inline (vm, node, frame);
723 }
724 
725 /* *INDENT-OFF* */
727  .name = "nat44-handoff-classify",
728  .vector_size = sizeof (u32),
729  .format_trace = format_nat44_classify_trace,
731  .n_next_nodes = NAT44_CLASSIFY_N_NEXT,
732  .next_nodes = {
733  [NAT44_CLASSIFY_NEXT_IN2OUT] = "nat44-in2out-worker-handoff",
734  [NAT44_CLASSIFY_NEXT_OUT2IN] = "nat44-out2in-worker-handoff",
735  [NAT44_CLASSIFY_NEXT_DROP] = "error-drop",
736  },
737 };
738 
739 /* *INDENT-ON* */
740 
741 /*
742  * fd.io coding-style-patch-verification: ON
743  *
744  * Local Variables:
745  * eval: (c-set-style "gnu")
746  * End:
747  */
ip4_address_t external_addr
Definition: nat.h:445
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define nat_buffer_opaque(b)
Definition: nat.h:73
nat44_classify_next_t
int nat_ip4_reass_add_fragment(u32 thread_index, nat_reass_ip4_t *reass, u32 bi, u32 **bi_to_drop)
Cache fragment.
Definition: nat_reass.c:392
#define CLIB_UNUSED(x)
Definition: clib.h:83
ip4_address_t src_address
Definition: ip4_packet.h:170
#define nat_elog_notice(nat_elog_str)
Definition: nat.h:1017
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
vlib_node_registration_t nat44_ed_classify_node
(constructor) VLIB_REGISTER_NODE (nat44_ed_classify_node)
u32 fib_table_get_index_for_sw_if_index(fib_protocol_t proto, u32 sw_if_index)
Get the index of the FIB bound to the interface.
Definition: fib_table.c:972
u32 thread_index
Definition: main.h:218
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static u8 * format_nat44_classify_trace(u8 *s, va_list *args)
#define VLIB_NODE_FN(node)
Definition: node.h:202
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
unsigned char u8
Definition: types.h:56
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static int ip4_is_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:213
ip4_address_t dst_address
Definition: ip4_packet.h:170
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:241
unsigned int u32
Definition: types.h:88
#define NAT_REASS_FLAG_CLASSIFY_ED_CONTINUE
Definition: nat_reass.h:34
ip4_address_t local_addr
Definition: nat.h:443
#define VLIB_FRAME_SIZE
Definition: node.h:378
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
u64 key
the key
Definition: bihash_8_8.h:35
u16 protocol
Definition: nat.h:94
snat_static_mapping_t * static_mappings
Definition: nat.h:573
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define PREDICT_FALSE(x)
Definition: clib.h:112
clib_bihash_8_8_t static_mapping_by_external
Definition: nat.h:570
vlib_node_registration_t nat44_handoff_classify_node
(constructor) VLIB_REGISTER_NODE (nat44_handoff_classify_node)
u32 node_index
Node index.
Definition: node.h:496
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
u8 len
Definition: ip_types.api:90
static uword nat44_classify_node_fn_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
snat_main_t snat_main
Definition: nat.c:39
u64 value
the value
Definition: bihash_8_8.h:36
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:397
vlib_main_t * vm
Definition: buffer.c:323
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:302
8 octet key, 8 octet key value pair
Definition: bihash_8_8.h:33
#define ARRAY_LEN(x)
Definition: clib.h:63
ip4_address_t addr
Definition: nat.h:92
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:456
vlib_node_registration_t nat44_classify_node
(constructor) VLIB_REGISTER_NODE (nat44_classify_node)
static void make_ed_kv(clib_bihash_kv_16_8_t *kv, ip4_address_t *l_addr, ip4_address_t *r_addr, u8 proto, u32 fib_index, u16 l_port, u16 r_port)
Definition: nat_inlines.h:437
static char * nat44_classify_error_strings[]
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:515
ip4_address_t addr
Definition: nat.h:358
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
static int ip4_is_first_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:220
static u32 ip_proto_to_snat_proto(u8 ip_proto)
Definition: nat_inlines.h:147
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
snat_main_per_thread_data_t * per_thread_data
Definition: nat.h:564
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
nat_reass_ip4_t * nat_ip4_reass_find_or_create(ip4_address_t src, ip4_address_t dst, u16 frag_id, u8 proto, u8 reset_timeout, u32 **bi_to_drop)
Find or create reassembly.
Definition: nat_reass.c:274
snat_address_t * addresses
Definition: nat.h:580
vlib_node_registration_t nat44_det_classify_node
(constructor) VLIB_REGISTER_NODE (nat44_det_classify_node)
nat44_classify_error_t
#define vnet_buffer(b)
Definition: buffer.h:365
#define vec_foreach(var, vec)
Vector iterator.
u16 flags
Copy of main node flags.
Definition: node.h:509
static void nat_send_all_to_node(vlib_main_t *vm, u32 *bi_vector, vlib_node_runtime_t *node, vlib_error_t *error, u32 next)
Definition: nat_inlines.h:225
clib_bihash_16_8_t in2out_ed
Definition: nat.h:502
void nat_ip4_reass_get_frags(nat_reass_ip4_t *reass, u32 **bi)
Get cached fragments.
Definition: nat_reass.c:424
NAT plugin virtual fragmentation reassembly.
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
#define foreach_nat44_classify_error
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static uword nat44_ed_classify_node_fn_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: defs.h:46
u16 fib_index
Definition: nat.h:94
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128