FD.io VPP  v21.01.1
Vector Packet Processing
nat44_handoff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief NAT44 worker handoff
18  */
19 
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/handoff.h>
23 #include <vnet/fib/ip4_fib.h>
24 #include <vppinfra/error.h>
25 #include <nat/nat.h>
26 #include <nat/nat_inlines.h>
27 
28 typedef struct
29 {
35 
36 static char *nat44_handoff_error_strings[] = {
37 #define _(sym,string) string,
39 #undef _
40 };
41 
42 static u8 *
43 format_nat44_handoff_trace (u8 * s, va_list * args)
44 {
45  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
46  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
47  nat44_handoff_trace_t *t = va_arg (*args, nat44_handoff_trace_t *);
48  char *tag, *output;
49 
50  tag = t->in2out ? "IN2OUT" : "OUT2IN";
51  output = t->output ? "OUTPUT-FEATURE" : "";
52  s =
53  format (s, "NAT44_%s_WORKER_HANDOFF %s: next-worker %d trace index %d",
54  tag, output, t->next_worker_index, t->trace_index);
55 
56  return s;
57 }
58 
59 static inline uword
62  vlib_frame_t * frame, u8 is_output,
63  u8 is_in2out)
64 {
65  u32 n_enq, n_left_from, *from, do_handoff = 0, same_worker = 0;
66 
67  u16 thread_indices[VLIB_FRAME_SIZE], *ti = thread_indices;
68  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
69  snat_main_t *sm = &snat_main;
70 
71  u32 fq_index, thread_index = vm->thread_index;
72 
73  from = vlib_frame_vector_args (frame);
74  n_left_from = frame->n_vectors;
75 
76  vlib_get_buffers (vm, from, b, n_left_from);
77 
78  if (is_in2out)
79  {
80  fq_index = is_output ? sm->fq_in2out_output_index : sm->fq_in2out_index;
81  }
82  else
83  {
84  fq_index = sm->fq_out2in_index;
85  }
86 
87  while (n_left_from >= 4)
88  {
89  u32 arc_next0, arc_next1, arc_next2, arc_next3;
90  u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
91  u32 rx_fib_index0, rx_fib_index1, rx_fib_index2, rx_fib_index3;
92  u32 iph_offset0 = 0, iph_offset1 = 0, iph_offset2 = 0, iph_offset3 = 0;
93  ip4_header_t *ip0, *ip1, *ip2, *ip3;
94 
95  if (PREDICT_TRUE (n_left_from >= 8))
96  {
97  vlib_prefetch_buffer_header (b[4], LOAD);
98  vlib_prefetch_buffer_header (b[5], LOAD);
99  vlib_prefetch_buffer_header (b[6], LOAD);
100  vlib_prefetch_buffer_header (b[7], LOAD);
101  CLIB_PREFETCH (&b[4]->data, CLIB_CACHE_LINE_BYTES, LOAD);
102  CLIB_PREFETCH (&b[5]->data, CLIB_CACHE_LINE_BYTES, LOAD);
103  CLIB_PREFETCH (&b[6]->data, CLIB_CACHE_LINE_BYTES, LOAD);
104  CLIB_PREFETCH (&b[7]->data, CLIB_CACHE_LINE_BYTES, LOAD);
105  }
106 
107  if (is_output)
108  {
109  iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
110  iph_offset1 = vnet_buffer (b[1])->ip.save_rewrite_length;
111  iph_offset2 = vnet_buffer (b[2])->ip.save_rewrite_length;
112  iph_offset3 = vnet_buffer (b[3])->ip.save_rewrite_length;
113  }
114 
115  ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) +
116  iph_offset0);
117  ip1 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[1]) +
118  iph_offset1);
119  ip2 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[2]) +
120  iph_offset2);
121  ip3 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[3]) +
122  iph_offset3);
123 
124  vnet_feature_next (&arc_next0, b[0]);
125  vnet_feature_next (&arc_next1, b[1]);
126  vnet_feature_next (&arc_next2, b[2]);
127  vnet_feature_next (&arc_next3, b[3]);
128 
129  vnet_buffer2 (b[0])->nat.arc_next = arc_next0;
130  vnet_buffer2 (b[1])->nat.arc_next = arc_next1;
131  vnet_buffer2 (b[2])->nat.arc_next = arc_next2;
132  vnet_buffer2 (b[3])->nat.arc_next = arc_next3;
133 
134  sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
135  sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
136  sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
137  sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
138 
139  rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
140  rx_fib_index1 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index1);
141  rx_fib_index2 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index2);
142  rx_fib_index3 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index3);
143 
144  if (is_in2out)
145  {
146  ti[0] = sm->worker_in2out_cb (ip0, rx_fib_index0, is_output);
147  ti[1] = sm->worker_in2out_cb (ip1, rx_fib_index1, is_output);
148  ti[2] = sm->worker_in2out_cb (ip2, rx_fib_index2, is_output);
149  ti[3] = sm->worker_in2out_cb (ip3, rx_fib_index3, is_output);
150  }
151  else
152  {
153  ti[0] = sm->worker_out2in_cb (b[0], ip0, rx_fib_index0, is_output);
154  ti[1] = sm->worker_out2in_cb (b[1], ip1, rx_fib_index1, is_output);
155  ti[2] = sm->worker_out2in_cb (b[2], ip2, rx_fib_index2, is_output);
156  ti[3] = sm->worker_out2in_cb (b[3], ip3, rx_fib_index3, is_output);
157  }
158 
159  if (ti[0] == thread_index)
160  same_worker++;
161  else
162  do_handoff++;
163 
164  if (ti[1] == thread_index)
165  same_worker++;
166  else
167  do_handoff++;
168 
169  if (ti[2] == thread_index)
170  same_worker++;
171  else
172  do_handoff++;
173 
174  if (ti[3] == thread_index)
175  same_worker++;
176  else
177  do_handoff++;
178 
179  b += 4;
180  ti += 4;
181  n_left_from -= 4;
182  }
183 
184  while (n_left_from > 0)
185  {
186  u32 arc_next0;
187  u32 sw_if_index0;
188  u32 rx_fib_index0;
189  u32 iph_offset0 = 0;
190  ip4_header_t *ip0;
191 
192 
193  if (is_output)
194  iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
195 
196  ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) +
197  iph_offset0);
198 
199  vnet_feature_next (&arc_next0, b[0]);
200  vnet_buffer2 (b[0])->nat.arc_next = arc_next0;
201 
202  sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
203  rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
204 
205  if (is_in2out)
206  {
207  ti[0] = sm->worker_in2out_cb (ip0, rx_fib_index0, is_output);
208  }
209  else
210  {
211  ti[0] = sm->worker_out2in_cb (b[0], ip0, rx_fib_index0, is_output);
212  }
213 
214  if (ti[0] == thread_index)
215  same_worker++;
216  else
217  do_handoff++;
218 
219  b += 1;
220  ti += 1;
221  n_left_from -= 1;
222  }
223 
224  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
225  {
226  u32 i;
227  b = bufs;
228  ti = thread_indices;
229 
230  for (i = 0; i < frame->n_vectors; i++)
231  {
232  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
233  {
235  vlib_add_trace (vm, node, b[0], sizeof (*t));
236  t->next_worker_index = ti[0];
238  t->in2out = is_in2out;
239  t->output = is_output;
240 
241  b += 1;
242  ti += 1;
243  }
244  else
245  break;
246  }
247  }
248 
249  n_enq = vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
250  frame->n_vectors, 1);
251 
252  if (n_enq < frame->n_vectors)
253  {
255  NAT44_HANDOFF_ERROR_CONGESTION_DROP,
256  frame->n_vectors - n_enq);
257  }
258 
260  NAT44_HANDOFF_ERROR_SAME_WORKER, same_worker);
262  NAT44_HANDOFF_ERROR_DO_HANDOFF, do_handoff);
263  return frame->n_vectors;
264 }
265 
269 {
270  return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 1);
271 }
272 
273 /* *INDENT-OFF* */
275  .name = "nat44-in2out-worker-handoff",
276  .vector_size = sizeof (u32),
277  .sibling_of = "nat-default",
278  .format_trace = format_nat44_handoff_trace,
281  .error_strings = nat44_handoff_error_strings,
282 };
283 /* *INDENT-ON* */
284 
287  node,
289 {
290  return nat44_worker_handoff_fn_inline (vm, node, frame, 1, 1);
291 }
292 
293 /* *INDENT-OFF* */
295  .name = "nat44-in2out-output-worker-handoff",
296  .vector_size = sizeof (u32),
297  .sibling_of = "nat-default",
298  .format_trace = format_nat44_handoff_trace,
301  .error_strings = nat44_handoff_error_strings,
302 };
303 /* *INDENT-ON* */
304 
308 {
309  return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 0);
310 }
311 
312 /* *INDENT-OFF* */
314  .name = "nat44-out2in-worker-handoff",
315  .vector_size = sizeof (u32),
316  .sibling_of = "nat-default",
317  .format_trace = format_nat44_handoff_trace,
320  .error_strings = nat44_handoff_error_strings,
321 };
322 /* *INDENT-ON* */
323 
324 /*
325  * fd.io coding-style-patch-verification: ON
326  *
327  * Local Variables:
328  * eval: (c-set-style "gnu")
329  * End:
330  */
#define CLIB_UNUSED(x)
Definition: clib.h:87
static char * nat44_handoff_error_strings[]
Definition: nat44_handoff.c:36
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
static u32 vlib_buffer_get_trace_index(vlib_buffer_t *b)
Extract the trace (pool) index from a trace handle.
Definition: buffer.h:392
#define vnet_buffer2(b)
Definition: buffer.h:481
u32 fq_in2out_output_index
Definition: nat.h:582
#define PREDICT_TRUE(x)
Definition: clib.h:122
u32 thread_index
Definition: main.h:250
vlib_node_registration_t snat_in2out_worker_handoff_node
(constructor) VLIB_REGISTER_NODE (snat_in2out_worker_handoff_node)
vlib_main_t * vm
Definition: in2out_ed.c:1580
#define VLIB_NODE_FN(node)
Definition: node.h:203
unsigned char u8
Definition: types.h:56
u8 data[128]
Definition: ipsec_types.api:90
u32 ip4_fib_table_get_index_for_sw_if_index(u32 sw_if_index)
Definition: ip4_fib.c:227
snat_get_worker_out2in_function_t * worker_out2in_cb
Definition: nat.h:525
description fragment has unexpected format
Definition: map.api:433
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:207
const cJSON *const b
Definition: cJSON.h:255
unsigned int u32
Definition: types.h:88
#define foreach_nat44_handoff_error
Definition: nat.h:166
#define VLIB_FRAME_SIZE
Definition: node.h:378
vl_api_fib_path_type_t type
Definition: fib_types.api:123
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:233
#define PREDICT_FALSE(x)
Definition: clib.h:121
u32 node_index
Node index.
Definition: node.h:488
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1231
u32 fq_out2in_index
Definition: nat.h:583
snat_main_t snat_main
Definition: nat.c:39
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:170
u16 n_vectors
Definition: node.h:397
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
#define ARRAY_LEN(x)
Definition: clib.h:67
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1580
u32 fq_in2out_index
Definition: nat.h:581
static uword nat44_worker_handoff_fn_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_output, u8 is_in2out)
Definition: nat44_handoff.c:60
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1581
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
vlib_node_registration_t snat_out2in_worker_handoff_node
(constructor) VLIB_REGISTER_NODE (snat_out2in_worker_handoff_node)
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:494
snat_get_worker_in2out_function_t * worker_in2out_cb
Definition: nat.h:524
#define vnet_buffer(b)
Definition: buffer.h:417
vlib_node_registration_t snat_in2out_output_worker_handoff_node
(constructor) VLIB_REGISTER_NODE (snat_in2out_output_worker_handoff_node)
u16 flags
Copy of main node flags.
Definition: node.h:501
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:634
static u8 * format_nat44_handoff_trace(u8 *s, va_list *args)
Definition: nat44_handoff.c:43
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
Definition: defs.h:46