FD.io VPP  v18.04-17-g3a0d853
Vector Packet Processing
handoff.c
Go to the documentation of this file.
1 
2 /*
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <vnet/vnet.h>
18 #include <vppinfra/xxhash.h>
19 #include <vlib/threads.h>
20 #include <vnet/handoff.h>
21 #include <vnet/feature/feature.h>
22 
23 typedef struct
24 {
28 
29 typedef struct
30 {
34 
36 
37  /* Worker handoff index */
39 
40  /* convenience variables */
43 
44  u64 (*hash_fn) (ethernet_header_t *);
46 
49 
50 typedef struct
51 {
56 
57 /* packet trace format function */
58 static u8 *
59 format_worker_handoff_trace (u8 * s, va_list * args)
60 {
61  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
62  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
63  worker_handoff_trace_t *t = va_arg (*args, worker_handoff_trace_t *);
64 
65  s =
66  format (s, "worker-handoff: sw_if_index %d, next_worker %d, buffer 0x%x",
68  return s;
69 }
70 
72 
73 static uword
75  vlib_node_runtime_t * node, vlib_frame_t * frame)
76 {
79  u32 n_left_from, *from;
80  static __thread vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index;
81  static __thread vlib_frame_queue_t **congested_handoff_queue_by_worker_index
82  = 0;
83  vlib_frame_queue_elt_t *hf = 0;
84  int i;
85  u32 n_left_to_next_worker = 0, *to_next_worker = 0;
86  u32 next_worker_index = 0;
87  u32 current_worker_index = ~0;
88 
89  if (PREDICT_FALSE (handoff_queue_elt_by_worker_index == 0))
90  {
91  vec_validate (handoff_queue_elt_by_worker_index, tm->n_vlib_mains - 1);
92 
93  vec_validate_init_empty (congested_handoff_queue_by_worker_index,
94  hm->first_worker_index + hm->num_workers - 1,
95  (vlib_frame_queue_t *) (~0));
96  }
97 
98  from = vlib_frame_vector_args (frame);
99  n_left_from = frame->n_vectors;
100 
101  while (n_left_from > 0)
102  {
103  u32 bi0;
104  vlib_buffer_t *b0;
105  u32 sw_if_index0;
106  u32 hash;
107  u64 hash_key;
109  u32 index0;
110 
111  bi0 = from[0];
112  from += 1;
113  n_left_from -= 1;
114 
115  b0 = vlib_get_buffer (vm, bi0);
116  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
117  ASSERT (hm->if_data);
118  ihd0 = vec_elt_at_index (hm->if_data, sw_if_index0);
119 
120  next_worker_index = hm->first_worker_index;
121 
122  /*
123  * Force unknown traffic onto worker 0,
124  * and into ethernet-input. $$$$ add more hashes.
125  */
126 
127  /* Compute ingress LB hash */
128  hash_key = hm->hash_fn ((ethernet_header_t *) b0->data);
129  hash = (u32) clib_xxhash (hash_key);
130 
131  /* if input node did not specify next index, then packet
132  should go to eternet-input */
133  if (PREDICT_FALSE ((b0->flags & VNET_BUFFER_F_HANDOFF_NEXT_VALID) == 0))
134  vnet_buffer (b0)->handoff.next_index =
136  else if (vnet_buffer (b0)->handoff.next_index ==
138  || vnet_buffer (b0)->handoff.next_index ==
140  || vnet_buffer (b0)->handoff.next_index ==
142  vlib_buffer_advance (b0, (sizeof (ethernet_header_t)));
143 
144  if (PREDICT_TRUE (is_pow2 (vec_len (ihd0->workers))))
145  index0 = hash & (vec_len (ihd0->workers) - 1);
146  else
147  index0 = hash % vec_len (ihd0->workers);
148 
149  next_worker_index += ihd0->workers[index0];
150 
151  if (next_worker_index != current_worker_index)
152  {
153  if (hf)
154  hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
155 
157  next_worker_index,
158  handoff_queue_elt_by_worker_index);
159 
160  n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
161  to_next_worker = &hf->buffer_index[hf->n_vectors];
162  current_worker_index = next_worker_index;
163  }
164 
165  /* enqueue to correct worker thread */
166  to_next_worker[0] = bi0;
167  to_next_worker++;
168  n_left_to_next_worker--;
169 
170  if (n_left_to_next_worker == 0)
171  {
174  current_worker_index = ~0;
175  handoff_queue_elt_by_worker_index[next_worker_index] = 0;
176  hf = 0;
177  }
178 
180  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
181  {
183  vlib_add_trace (vm, node, b0, sizeof (*t));
184  t->sw_if_index = sw_if_index0;
185  t->next_worker_index = next_worker_index - hm->first_worker_index;
186  t->buffer_index = bi0;
187  }
188 
189  }
190 
191  if (hf)
192  hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
193 
194  /* Ship frames to the worker nodes */
195  for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
196  {
197  if (handoff_queue_elt_by_worker_index[i])
198  {
199  hf = handoff_queue_elt_by_worker_index[i];
200  /*
201  * It works better to let the handoff node
202  * rate-adapt, always ship the handoff queue element.
203  */
204  if (1 || hf->n_vectors == hf->last_n_vectors)
205  {
207  handoff_queue_elt_by_worker_index[i] = 0;
208  }
209  else
210  hf->last_n_vectors = hf->n_vectors;
211  }
212  congested_handoff_queue_by_worker_index[i] =
213  (vlib_frame_queue_t *) (~0);
214  }
215  hf = 0;
216  current_worker_index = ~0;
217  return frame->n_vectors;
218 }
219 
220 /* *INDENT-OFF* */
222  .function = worker_handoff_node_fn,
223  .name = "worker-handoff",
224  .vector_size = sizeof (u32),
225  .format_trace = format_worker_handoff_trace,
226  .type = VLIB_NODE_TYPE_INTERNAL,
227 
228  .n_next_nodes = 1,
229  .next_nodes = {
230  [0] = "error-drop",
231  },
232 };
233 
235 /* *INDENT-ON* */
236 
237 int
239  uword * bitmap, int enable_disable)
240 {
243  vnet_main_t *vnm = vnet_get_main ();
245  int i, rv = 0;
246 
247  if (pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index))
248  return VNET_API_ERROR_INVALID_SW_IF_INDEX;
249 
250  sw = vnet_get_sw_interface (vnm, sw_if_index);
252  return VNET_API_ERROR_INVALID_SW_IF_INDEX;
253 
254  if (clib_bitmap_last_set (bitmap) >= hm->num_workers)
255  return VNET_API_ERROR_INVALID_WORKER;
256 
257  if (hm->frame_queue_index == ~0)
258  hm->frame_queue_index =
260 
261  vec_validate (hm->if_data, sw_if_index);
262  d = vec_elt_at_index (hm->if_data, sw_if_index);
263 
264  vec_free (d->workers);
266 
267  if (enable_disable)
268  {
269  d->workers_bitmap = bitmap;
270  /* *INDENT-OFF* */
271  clib_bitmap_foreach (i, bitmap,
272  ({
273  vec_add1(d->workers, i);
274  }));
275  /* *INDENT-ON* */
276  }
277 
278  vnet_feature_enable_disable ("device-input", "worker-handoff",
279  sw_if_index, enable_disable, 0, 0);
280  return rv;
281 }
282 
283 static clib_error_t *
285  unformat_input_t * input,
286  vlib_cli_command_t * cmd)
287 {
289  u32 sw_if_index = ~0;
290  int enable_disable = 1;
291  uword *bitmap = 0;
292  u32 sym = ~0;
293 
294  int rv = 0;
295 
297  {
298  if (unformat (input, "disable"))
299  enable_disable = 0;
300  else if (unformat (input, "workers %U", unformat_bitmap_list, &bitmap))
301  ;
302  else if (unformat (input, "%U", unformat_vnet_sw_interface,
303  vnet_get_main (), &sw_if_index))
304  ;
305  else if (unformat (input, "symmetrical"))
306  sym = 1;
307  else if (unformat (input, "asymmetrical"))
308  sym = 0;
309  else
310  break;
311  }
312 
313  if (sw_if_index == ~0)
314  return clib_error_return (0, "Please specify an interface...");
315 
316  if (bitmap == 0)
317  return clib_error_return (0, "Please specify list of workers...");
318 
319  rv =
320  interface_handoff_enable_disable (vm, sw_if_index, bitmap,
321  enable_disable);
322 
323  switch (rv)
324  {
325  case 0:
326  break;
327 
328  case VNET_API_ERROR_INVALID_SW_IF_INDEX:
329  return clib_error_return (0, "Invalid interface");
330  break;
331 
332  case VNET_API_ERROR_INVALID_WORKER:
333  return clib_error_return (0, "Invalid worker(s)");
334  break;
335 
336  case VNET_API_ERROR_UNIMPLEMENTED:
337  return clib_error_return (0,
338  "Device driver doesn't support redirection");
339  break;
340 
341  default:
342  return clib_error_return (0, "unknown return value %d", rv);
343  }
344 
345  if (sym == 1)
346  hm->hash_fn = eth_get_sym_key;
347  else if (sym == 0)
348  hm->hash_fn = eth_get_key;
349 
350  return 0;
351 }
352 
353 /* *INDENT-OFF* */
354 VLIB_CLI_COMMAND (set_interface_handoff_command, static) = {
355  .path = "set interface handoff",
356  .short_help =
357  "set interface handoff <interface-name> workers <workers-list> [symmetrical|asymmetrical]",
359 };
360 /* *INDENT-ON* */
361 
362 typedef struct
363 {
368 
369 /* packet trace format function */
370 static u8 *
371 format_handoff_dispatch_trace (u8 * s, va_list * args)
372 {
373  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
374  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
375  handoff_dispatch_trace_t *t = va_arg (*args, handoff_dispatch_trace_t *);
376 
377  s = format (s, "handoff-dispatch: sw_if_index %d next_index %d buffer 0x%x",
378  t->sw_if_index, t->next_index, t->buffer_index);
379  return s;
380 }
381 
382 #define foreach_handoff_dispatch_error \
383 _(EXAMPLE, "example packets")
384 
385 typedef enum
386 {
387 #define _(sym,str) HANDOFF_DISPATCH_ERROR_##sym,
389 #undef _
392 
394 #define _(sym,string) string,
396 #undef _
397 };
398 
399 static uword
401  vlib_node_runtime_t * node, vlib_frame_t * frame)
402 {
403  u32 n_left_from, *from, *to_next;
404  handoff_dispatch_next_t next_index;
405 
406  from = vlib_frame_vector_args (frame);
407  n_left_from = frame->n_vectors;
408  next_index = node->cached_next_index;
409 
410  while (n_left_from > 0)
411  {
412  u32 n_left_to_next;
413 
414  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
415 
416  while (n_left_from >= 4 && n_left_to_next >= 2)
417  {
418  u32 bi0, bi1;
419  vlib_buffer_t *b0, *b1;
420  u32 next0, next1;
421  u32 sw_if_index0, sw_if_index1;
422 
423  /* Prefetch next iteration. */
424  {
425  vlib_buffer_t *p2, *p3;
426 
427  p2 = vlib_get_buffer (vm, from[2]);
428  p3 = vlib_get_buffer (vm, from[3]);
429 
430  vlib_prefetch_buffer_header (p2, LOAD);
431  vlib_prefetch_buffer_header (p3, LOAD);
432  }
433 
434  /* speculatively enqueue b0 and b1 to the current next frame */
435  to_next[0] = bi0 = from[0];
436  to_next[1] = bi1 = from[1];
437  from += 2;
438  to_next += 2;
439  n_left_from -= 2;
440  n_left_to_next -= 2;
441 
442  b0 = vlib_get_buffer (vm, bi0);
443  b1 = vlib_get_buffer (vm, bi1);
444 
445  next0 = vnet_buffer (b0)->handoff.next_index;
446  next1 = vnet_buffer (b1)->handoff.next_index;
447 
449  {
450  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
451  {
452  vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */
453  0);
455  vlib_add_trace (vm, node, b0, sizeof (*t));
456  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
457  t->sw_if_index = sw_if_index0;
458  t->next_index = next0;
459  t->buffer_index = bi0;
460  }
461  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
462  {
463  vlib_trace_buffer (vm, node, next1, b1, /* follow_chain */
464  0);
466  vlib_add_trace (vm, node, b1, sizeof (*t));
467  sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
468  t->sw_if_index = sw_if_index1;
469  t->next_index = next1;
470  t->buffer_index = bi1;
471  }
472  }
473 
474  /* verify speculative enqueues, maybe switch current next frame */
475  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
476  to_next, n_left_to_next,
477  bi0, bi1, next0, next1);
478  }
479 
480  while (n_left_from > 0 && n_left_to_next > 0)
481  {
482  u32 bi0;
483  vlib_buffer_t *b0;
484  u32 next0;
485  u32 sw_if_index0;
486 
487  /* speculatively enqueue b0 to the current next frame */
488  bi0 = from[0];
489  to_next[0] = bi0;
490  from += 1;
491  to_next += 1;
492  n_left_from -= 1;
493  n_left_to_next -= 1;
494 
495  b0 = vlib_get_buffer (vm, bi0);
496 
497  next0 = vnet_buffer (b0)->handoff.next_index;
498 
500  {
501  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
502  {
503  vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */
504  0);
506  vlib_add_trace (vm, node, b0, sizeof (*t));
507  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
508  t->sw_if_index = sw_if_index0;
509  t->next_index = next0;
510  t->buffer_index = bi0;
511  }
512  }
513 
514  /* verify speculative enqueue, maybe switch current next frame */
515  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
516  to_next, n_left_to_next,
517  bi0, next0);
518  }
519 
520  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
521  }
522 
523  return frame->n_vectors;
524 }
525 
526 /* *INDENT-OFF* */
528  .function = handoff_dispatch_node_fn,
529  .name = "handoff-dispatch",
530  .vector_size = sizeof (u32),
531  .format_trace = format_handoff_dispatch_trace,
532  .type = VLIB_NODE_TYPE_INTERNAL,
534 
536  .error_strings = handoff_dispatch_error_strings,
537 
538  .n_next_nodes = HANDOFF_DISPATCH_N_NEXT,
539 
540  .next_nodes = {
541  [HANDOFF_DISPATCH_NEXT_DROP] = "error-drop",
542  [HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT] = "ethernet-input",
543  [HANDOFF_DISPATCH_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
544  [HANDOFF_DISPATCH_NEXT_IP6_INPUT] = "ip6-input",
545  [HANDOFF_DISPATCH_NEXT_MPLS_INPUT] = "mpls-input",
546  },
547 };
548 
550 /* *INDENT-ON* */
551 
552 clib_error_t *
554 {
557  clib_error_t *error;
558  uword *p;
559 
560  if ((error = vlib_call_init_function (vm, threads_init)))
561  return error;
562 
564  /* Only the standard vnet worker threads are supported */
565  p = hash_get_mem (tm->thread_registrations_by_name, "workers");
566  if (p)
567  {
568  tr = (vlib_thread_registration_t *) p[0];
569  if (tr)
570  {
571  hm->num_workers = tr->count;
573  }
574  }
575 
576  hm->hash_fn = eth_get_key;
577 
578  hm->vlib_main = vm;
579  hm->vnet_main = &vnet_main;
580 
581  hm->frame_queue_index = ~0;
582 
583  return 0;
584 }
585 
587 
588 /*
589  * fd.io coding-style-patch-verification: ON
590  *
591  * Local Variables:
592  * eval: (c-set-style "gnu")
593  * End:
594  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:434
u32 cached_next_index
Definition: handoff.c:31
u32 trace_active_hint
Definition: trace.h:83
#define CLIB_UNUSED(x)
Definition: clib.h:79
handoff_main_t handoff_main
Definition: handoff.c:47
u64(* hash_fn)(ethernet_header_t *)
Definition: handoff.c:44
u32 num_workers
Definition: handoff.c:32
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define PREDICT_TRUE(x)
Definition: clib.h:106
clib_error_t * threads_init(vlib_main_t *vm)
Definition: threads.c:1816
u32 vlib_frame_queue_main_init(u32 node_index, u32 frame_queue_nelts)
Definition: threads.c:1754
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:520
static u64 clib_xxhash(u64 key)
Definition: xxhash.h:58
int i
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
unformat_function_t unformat_vnet_sw_interface
u32 buffer_index[VLIB_FRAME_SIZE]
Definition: threads.h:98
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:104
handoff_dispatch_error_t
Definition: handoff.c:385
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:111
static u8 * format_worker_handoff_trace(u8 *s, va_list *args)
Definition: handoff.c:59
clib_error_t * handoff_init(vlib_main_t *vm)
Definition: handoff.c:553
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:191
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
unsigned long u64
Definition: types.h:89
static u8 * format_handoff_dispatch_trace(u8 *s, va_list *args)
Definition: handoff.c:371
#define vlib_call_init_function(vm, x)
Definition: init.h:162
#define VLIB_FRAME_SIZE
Definition: node.h:328
u32 first_worker_index
Definition: handoff.c:33
#define VLIB_NODE_FUNCTION_MULTIARCH(node, fn)
Definition: node.h:158
#define clib_bitmap_foreach(i, ai, body)
Macro to iterate across set bits in a bitmap.
Definition: bitmap.h:361
vlib_node_registration_t handoff_dispatch_node
(constructor) VLIB_REGISTER_NODE (handoff_dispatch_node)
Definition: handoff.c:48
static uword clib_bitmap_last_set(uword *ai)
Return the higest numbered set bit in a bitmap.
Definition: bitmap.h:402
vlib_node_registration_t handoff_node
Definition: handoff.c:71
struct _unformat_input_t unformat_input_t
#define PREDICT_FALSE(x)
Definition: clib.h:105
u32 frame_queue_index
Definition: handoff.c:38
static vlib_frame_queue_elt_t * vlib_get_worker_handoff_queue_elt(u32 frame_queue_index, u32 vlib_worker_index, vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index)
Definition: threads.h:502
vnet_main_t vnet_main
Definition: misc.c:43
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
per_inteface_handoff_data_t * if_data
Definition: handoff.c:35
vnet_main_t * vnet_main
Definition: handoff.c:42
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
static uword handoff_dispatch_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: handoff.c:400
static u64 eth_get_sym_key(ethernet_header_t *h0)
Definition: handoff.h:134
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
#define UNFORMAT_END_OF_INPUT
Definition: format.h:143
u16 n_vectors
Definition: node.h:344
vlib_main_t * vm
Definition: buffer.c:294
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:336
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:270
#define foreach_handoff_dispatch_error
Definition: handoff.c:382
#define ARRAY_LEN(x)
Definition: clib.h:59
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:154
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:456
static clib_error_t * set_interface_handoff_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: handoff.c:284
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
vlib_main_t * vlib_main
Definition: handoff.c:41
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:222
static char * handoff_dispatch_error_strings[]
Definition: handoff.c:393
vlib_trace_main_t trace_main
Definition: main.h:138
uword * thread_registrations_by_name
Definition: threads.h:297
static uword is_pow2(uword x)
Definition: clib.h:280
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
struct _vlib_node_registration vlib_node_registration_t
#define VLIB_NODE_FLAG_IS_HANDOFF
Definition: node.h:256
static uword worker_handoff_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: handoff.c:74
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
handoff_dispatch_next_t
Definition: handoff.h:25
static uword unformat_bitmap_list(unformat_input_t *input, va_list *va)
unformat a list of bit ranges into a bitmap (eg "0-3,5-7,11" )
Definition: bitmap.h:693
vnet_sw_interface_t * sw_interfaces
Definition: interface.h:709
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
static void vlib_put_frame_queue_elt(vlib_frame_queue_elt_t *hf)
Definition: threads.h:432
#define hash_get_mem(h, key)
Definition: hash.h:268
static u64 eth_get_key(ethernet_header_t *h0)
Definition: handoff.h:200
#define vnet_buffer(b)
Definition: buffer.h:372
vnet_sw_interface_type_t type
Definition: interface.h:583
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
u8 data[0]
Packet data.
Definition: buffer.h:179
u16 flags
Copy of main node flags.
Definition: node.h:450
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:259
u32 flags
Definition: vhost-user.h:77
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header, unspecified alignment)
Definition: vec.h:483
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:111
int interface_handoff_enable_disable(vlib_main_t *vm, u32 sw_if_index, uword *bitmap, int enable_disable)
Definition: handoff.c:238
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:972
Definition: defs.h:46
int vnet_feature_enable_disable(const char *arc_name, const char *node_name, u32 sw_if_index, int enable_disable, void *feature_config, u32 n_feature_config_bytes)
Definition: feature.c:233
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:169
vlib_node_registration_t worker_handoff_node
(constructor) VLIB_REGISTER_NODE (worker_handoff_node)
Definition: handoff.c:221