FD.io VPP  v18.10-34-gcce845e
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vlib/vlib.h>
16 #include <vnet/vnet.h>
17 #include <vnet/pg/pg.h>
18 #include <vnet/ethernet/ethernet.h>
19 #include <vppinfra/error.h>
20 #include <sample/sample.h>
21 
22 typedef struct
23 {
26  u8 new_src_mac[6];
27  u8 new_dst_mac[6];
29 
30 
31 /* packet trace format function */
32 static u8 *
33 format_sample_trace (u8 * s, va_list * args)
34 {
35  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
36  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
37  sample_trace_t *t = va_arg (*args, sample_trace_t *);
38 
39  s = format (s, "SAMPLE: sw_if_index %d, next index %d\n",
40  t->sw_if_index, t->next_index);
41  s = format (s, " new src %U -> new dst %U",
44 
45  return s;
46 }
47 
49 
50 #define foreach_sample_error \
51 _(SWAPPED, "Mac swap packets processed")
52 
53 typedef enum
54 {
55 #define _(sym,str) SAMPLE_ERROR_##sym,
57 #undef _
60 
61 static char *sample_error_strings[] = {
62 #define _(sym,string) string,
64 #undef _
65 };
66 
67 typedef enum
68 {
72 
73 /*
74  * Simple dual/single loop version, default version which will compile
75  * everywhere.
76  *
77  * Node costs 30 clocks/pkt at a vector size of 51
78  */
79 #define VERSION_1 1
80 
81 #ifdef VERSION_1
82 #define foreach_mac_address_offset \
83 _(0) \
84 _(1) \
85 _(2) \
86 _(3) \
87 _(4) \
88 _(5)
89 
91  vlib_frame_t * frame)
92 {
93  u32 n_left_from, *from, *to_next;
94  sample_next_t next_index;
95  u32 pkts_swapped = 0;
96 
97  from = vlib_frame_vector_args (frame);
98  n_left_from = frame->n_vectors;
99  next_index = node->cached_next_index;
100 
101  while (n_left_from > 0)
102  {
103  u32 n_left_to_next;
104 
105  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
106 
107  while (n_left_from >= 4 && n_left_to_next >= 2)
108  {
111  u32 sw_if_index0, sw_if_index1;
112  u8 tmp0[6], tmp1[6];
113  ethernet_header_t *en0, *en1;
114  u32 bi0, bi1;
115  vlib_buffer_t *b0, *b1;
116 
117  /* Prefetch next iteration. */
118  {
119  vlib_buffer_t *p2, *p3;
120 
121  p2 = vlib_get_buffer (vm, from[2]);
122  p3 = vlib_get_buffer (vm, from[3]);
123 
124  vlib_prefetch_buffer_header (p2, LOAD);
125  vlib_prefetch_buffer_header (p3, LOAD);
126 
129  }
130 
131  /* speculatively enqueue b0 and b1 to the current next frame */
132  to_next[0] = bi0 = from[0];
133  to_next[1] = bi1 = from[1];
134  from += 2;
135  to_next += 2;
136  n_left_from -= 2;
137  n_left_to_next -= 2;
138 
139  b0 = vlib_get_buffer (vm, bi0);
140  b1 = vlib_get_buffer (vm, bi1);
141 
142  ASSERT (b0->current_data == 0);
143  ASSERT (b1->current_data == 0);
144 
145  en0 = vlib_buffer_get_current (b0);
146  en1 = vlib_buffer_get_current (b1);
147 
148  /* This is not the fastest way to swap src + dst mac addresses */
149 #define _(a) tmp0[a] = en0->src_address[a];
151 #undef _
152 #define _(a) en0->src_address[a] = en0->dst_address[a];
154 #undef _
155 #define _(a) en0->dst_address[a] = tmp0[a];
157 #undef _
158 
159 #define _(a) tmp1[a] = en1->src_address[a];
161 #undef _
162 #define _(a) en1->src_address[a] = en1->dst_address[a];
164 #undef _
165 #define _(a) en1->dst_address[a] = tmp1[a];
167 #undef _
168 
169  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
170  sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
171 
172  /* Send pkt back out the RX interface */
173  vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index0;
174  vnet_buffer (b1)->sw_if_index[VLIB_TX] = sw_if_index1;
175 
176  pkts_swapped += 2;
177 
178  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
179  {
180  if (b0->flags & VLIB_BUFFER_IS_TRACED)
181  {
182  sample_trace_t *t =
183  vlib_add_trace (vm, node, b0, sizeof (*t));
184  t->sw_if_index = sw_if_index0;
185  t->next_index = next0;
187  sizeof (t->new_src_mac));
189  sizeof (t->new_dst_mac));
190 
191  }
192  if (b1->flags & VLIB_BUFFER_IS_TRACED)
193  {
194  sample_trace_t *t =
195  vlib_add_trace (vm, node, b1, sizeof (*t));
196  t->sw_if_index = sw_if_index1;
197  t->next_index = next1;
199  sizeof (t->new_src_mac));
201  sizeof (t->new_dst_mac));
202  }
203  }
204 
205  /* verify speculative enqueues, maybe switch current next frame */
206  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
207  to_next, n_left_to_next,
208  bi0, bi1, next0, next1);
209  }
210 
211  while (n_left_from > 0 && n_left_to_next > 0)
212  {
213  u32 bi0;
214  vlib_buffer_t *b0;
216  u32 sw_if_index0;
217  u8 tmp0[6];
218  ethernet_header_t *en0;
219 
220  /* speculatively enqueue b0 to the current next frame */
221  bi0 = from[0];
222  to_next[0] = bi0;
223  from += 1;
224  to_next += 1;
225  n_left_from -= 1;
226  n_left_to_next -= 1;
227 
228  b0 = vlib_get_buffer (vm, bi0);
229  /*
230  * Direct from the driver, we should be at offset 0
231  * aka at &b0->data[0]
232  */
233  ASSERT (b0->current_data == 0);
234 
235  en0 = vlib_buffer_get_current (b0);
236 
237  /* This is not the fastest way to swap src + dst mac addresses */
238 #define _(a) tmp0[a] = en0->src_address[a];
240 #undef _
241 #define _(a) en0->src_address[a] = en0->dst_address[a];
243 #undef _
244 #define _(a) en0->dst_address[a] = tmp0[a];
246 #undef _
247 
248  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
249 
250  /* Send pkt back out the RX interface */
251  vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index0;
252 
253  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
254  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
255  {
256  sample_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
257  t->sw_if_index = sw_if_index0;
258  t->next_index = next0;
260  sizeof (t->new_src_mac));
262  sizeof (t->new_dst_mac));
263  }
264 
265  pkts_swapped += 1;
266 
267  /* verify speculative enqueue, maybe switch current next frame */
268  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
269  to_next, n_left_to_next,
270  bi0, next0);
271  }
272 
273  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
274  }
275 
277  SAMPLE_ERROR_SWAPPED, pkts_swapped);
278  return frame->n_vectors;
279 }
280 #endif
281 
282 /*
283  * This version swaps mac addresses using an MMX vector shuffle
284  * Node costs about 17 clocks/pkt at a vector size of 26
285  */
286 #ifdef VERSION_2
288  vlib_frame_t * frame)
289 {
290  u32 n_left_from, *from, *to_next;
291  sample_next_t next_index;
292  u32 pkts_swapped = 0;
293  /* Vector shuffle mask to swap src, dst */
294  u8x16 swapmac = { 6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 12, 13, 14, 15 };
295 
296  from = vlib_frame_vector_args (frame);
297  n_left_from = frame->n_vectors;
298  next_index = node->cached_next_index;
299 
300  while (n_left_from > 0)
301  {
302  u32 n_left_to_next;
303 
304  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
305  while (n_left_from >= 4 && n_left_to_next >= 2)
306  {
309  u32 sw_if_index0, sw_if_index1;
310  u8x16 src_dst0, src_dst1;
311  ethernet_header_t *en0, *en1;
312  u32 bi0, bi1;
313  vlib_buffer_t *b0, *b1;
314 
315  /* Prefetch next iteration. */
316  {
317  vlib_buffer_t *p2, *p3;
318 
319  p2 = vlib_get_buffer (vm, from[2]);
320  p3 = vlib_get_buffer (vm, from[3]);
321 
322  vlib_prefetch_buffer_header (p2, LOAD);
323  vlib_prefetch_buffer_header (p3, LOAD);
324 
327  }
328 
329  /* speculatively enqueue b0 and b1 to the current next frame */
330  to_next[0] = bi0 = from[0];
331  to_next[1] = bi1 = from[1];
332  from += 2;
333  to_next += 2;
334  n_left_from -= 2;
335  n_left_to_next -= 2;
336 
337  b0 = vlib_get_buffer (vm, bi0);
338  b1 = vlib_get_buffer (vm, bi1);
339 
340  ASSERT (b0->current_data == 0);
341  ASSERT (b1->current_data == 0);
342 
343  en0 = vlib_buffer_get_current (b0);
344  en1 = vlib_buffer_get_current (b1);
345 
346  src_dst0 = ((u8x16 *) en0)[0];
347  src_dst1 = ((u8x16 *) en1)[0];
348  src_dst0 = u8x16_shuffle (src_dst0, swapmac);
349  src_dst1 = u8x16_shuffle (src_dst1, swapmac);
350  ((u8x16 *) en0)[0] = src_dst0;
351  ((u8x16 *) en1)[0] = src_dst1;
352 
353  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
354  sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
355 
356  /* Send pkt back out the RX interface */
357  vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index0;
358  vnet_buffer (b1)->sw_if_index[VLIB_TX] = sw_if_index1;
359 
360  pkts_swapped += 2;
361 
362  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
363  {
364  if (b0->flags & VLIB_BUFFER_IS_TRACED)
365  {
366  sample_trace_t *t =
367  vlib_add_trace (vm, node, b0, sizeof (*t));
368  t->sw_if_index = sw_if_index0;
369  t->next_index = next0;
371  sizeof (t->new_src_mac));
373  sizeof (t->new_dst_mac));
374 
375  }
376  if (b1->flags & VLIB_BUFFER_IS_TRACED)
377  {
378  sample_trace_t *t =
379  vlib_add_trace (vm, node, b1, sizeof (*t));
380  t->sw_if_index = sw_if_index1;
381  t->next_index = next1;
383  sizeof (t->new_src_mac));
385  sizeof (t->new_dst_mac));
386  }
387  }
388 
389  /* verify speculative enqueues, maybe switch current next frame */
390  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
391  to_next, n_left_to_next,
392  bi0, bi1, next0, next1);
393  }
394 
395  while (n_left_from > 0 && n_left_to_next > 0)
396  {
397  u32 bi0;
398  vlib_buffer_t *b0;
400  u32 sw_if_index0;
401  u8x16 src_dst0;
402  ethernet_header_t *en0;
403 
404  /* speculatively enqueue b0 to the current next frame */
405  bi0 = from[0];
406  to_next[0] = bi0;
407  from += 1;
408  to_next += 1;
409  n_left_from -= 1;
410  n_left_to_next -= 1;
411 
412  b0 = vlib_get_buffer (vm, bi0);
413  /*
414  * Direct from the driver, we should be at offset 0
415  * aka at &b0->data[0]
416  */
417  ASSERT (b0->current_data == 0);
418 
419  en0 = vlib_buffer_get_current (b0);
420  src_dst0 = ((u8x16 *) en0)[0];
421  src_dst0 = u8x16_shuffle (src_dst0, swapmac);
422  ((u8x16 *) en0)[0] = src_dst0;
423 
424  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
425 
426  /* Send pkt back out the RX interface */
427  vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index0;
428 
429  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
430  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
431  {
432  sample_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
433  t->sw_if_index = sw_if_index0;
434  t->next_index = next0;
436  sizeof (t->new_src_mac));
438  sizeof (t->new_dst_mac));
439  }
440 
441  pkts_swapped += 1;
442 
443  /* verify speculative enqueue, maybe switch current next frame */
444  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
445  to_next, n_left_to_next,
446  bi0, next0);
447  }
448 
449  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
450  }
451 
453  SAMPLE_ERROR_SWAPPED, pkts_swapped);
454  return frame->n_vectors;
455 }
456 #endif
457 
458 
459 /*
460  * This version computes all of the buffer pointers in
461  * one motion, uses a quad/single loop model, and
462  * traces the entire frame in one motion.
463  *
464  * Node costs about 16 clocks/pkt at a vector size of 26
465  *
466  * Some compilation drama with u8x16_shuffle, so turned off by
467  * default.
468  */
469 
470 #ifdef VERSION_3
471 
472 #define u8x16_shuffle __builtin_shuffle
473 /* This would normally be a stack local, but since it's a constant... */
474 static const u16 nexts[VLIB_FRAME_SIZE] = { 0 };
475 
477  vlib_frame_t * frame)
478 {
479  u32 n_left_from, *from;
480  u32 pkts_swapped = 0;
481  /* Vector shuffle mask to swap src, dst */
482  u8x16 swapmac = { 6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 12, 13, 14, 15 };
483  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
484  /* See comment below about sending all pkts to the same place... */
485  u16 *next __attribute__ ((unused));
486 
487  from = vlib_frame_vector_args (frame);
488  n_left_from = frame->n_vectors;
489 
490  vlib_get_buffers (vm, from, bufs, n_left_from);
491  b = bufs;
492  // next = nexts;
493 
494  /*
495  * We send all pkts to SAMPLE_NEXT_INTERFACE_OUTPUT, aka
496  * graph arc 0. So the usual setting of next[0...3] is commented
497  * out below
498  */
499 
500  while (n_left_from >= 4)
501  {
502  u8x16 src_dst0, src_dst1, src_dst2, src_dst3;
503  /* Prefetch next iteration. */
504  if (PREDICT_TRUE (n_left_from >= 8))
505  {
506  vlib_prefetch_buffer_header (b[4], STORE);
507  vlib_prefetch_buffer_header (b[5], STORE);
508  vlib_prefetch_buffer_header (b[6], STORE);
509  vlib_prefetch_buffer_header (b[7], STORE);
510  CLIB_PREFETCH (&b[4]->data, CLIB_CACHE_LINE_BYTES, STORE);
511  CLIB_PREFETCH (&b[5]->data, CLIB_CACHE_LINE_BYTES, STORE);
512  CLIB_PREFETCH (&b[6]->data, CLIB_CACHE_LINE_BYTES, STORE);
513  CLIB_PREFETCH (&b[7]->data, CLIB_CACHE_LINE_BYTES, STORE);
514  }
515 
516  src_dst0 = ((u8x16 *) vlib_buffer_get_current (b[0]))[0];
517  src_dst1 = ((u8x16 *) vlib_buffer_get_current (b[1]))[0];
518  src_dst2 = ((u8x16 *) vlib_buffer_get_current (b[2]))[0];
519  src_dst3 = ((u8x16 *) vlib_buffer_get_current (b[3]))[0];
520 
521  src_dst0 = u8x16_shuffle (src_dst0, swapmac);
522  src_dst1 = u8x16_shuffle (src_dst1, swapmac);
523  src_dst2 = u8x16_shuffle (src_dst2, swapmac);
524  src_dst3 = u8x16_shuffle (src_dst3, swapmac);
525 
526  ((u8x16 *) vlib_buffer_get_current (b[0]))[0] = src_dst0;
527  ((u8x16 *) vlib_buffer_get_current (b[1]))[0] = src_dst1;
528  ((u8x16 *) vlib_buffer_get_current (b[2]))[0] = src_dst2;
529  ((u8x16 *) vlib_buffer_get_current (b[3]))[0] = src_dst3;
530 
531  vnet_buffer (b[0])->sw_if_index[VLIB_TX] =
532  vnet_buffer (b[0])->sw_if_index[VLIB_RX];
533  vnet_buffer (b[1])->sw_if_index[VLIB_TX] =
534  vnet_buffer (b[1])->sw_if_index[VLIB_RX];
535  vnet_buffer (b[2])->sw_if_index[VLIB_TX] =
536  vnet_buffer (b[2])->sw_if_index[VLIB_RX];
537  vnet_buffer (b[3])->sw_if_index[VLIB_TX] =
538  vnet_buffer (b[3])->sw_if_index[VLIB_RX];
539 
540  // next[0] = SAMPLE_NEXT_INTERFACE_OUTPUT;
541  // next[1] = SAMPLE_NEXT_INTERFACE_OUTPUT;
542  // next[2] = SAMPLE_NEXT_INTERFACE_OUTPUT;
543  // next[3] = SAMPLE_NEXT_INTERFACE_OUTPUT;
544 
545  b += 4;
546  // next += 4;
547  n_left_from -= 4;
548  pkts_swapped += 4;
549  }
550 
551  while (n_left_from > 0)
552  {
553  u8x16 src_dst0;
554  src_dst0 = ((u8x16 *) vlib_buffer_get_current (b[0]))[0];
555  src_dst0 = u8x16_shuffle (src_dst0, swapmac);
556  ((u8x16 *) vlib_buffer_get_current (b[0]))[0] = src_dst0;
557  vnet_buffer (b[0])->sw_if_index[VLIB_TX] =
558  vnet_buffer (b[0])->sw_if_index[VLIB_RX];
559  // next[0] = SAMPLE_NEXT_INTERFACE_OUTPUT;
560 
561  b += 1;
562  // next += 1;
563  n_left_from -= 1;
564  pkts_swapped += 1;
565 
566  }
567  vlib_buffer_enqueue_to_next (vm, node, from, (u16 *) nexts,
568  frame->n_vectors);
569 
571  SAMPLE_ERROR_SWAPPED, pkts_swapped);
572 
573  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
574  {
575  int i;
576  b = bufs;
577 
578  for (i = 0; i < frame->n_vectors; i++)
579  {
580  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
581  {
582  ethernet_header_t *en;
583  sample_trace_t *t =
584  vlib_add_trace (vm, node, b[0], sizeof (*t));
585  t->sw_if_index = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
587  en = vlib_buffer_get_current (b[0]);
589  sizeof (t->new_src_mac));
591  sizeof (t->new_dst_mac));
592  b++;
593  }
594  else
595  break;
596  }
597  }
598  return frame->n_vectors;
599 }
600 #endif
601 
602 /* *INDENT-OFF* */
604 {
605  .name = "sample",
606  .vector_size = sizeof (u32),
607  .format_trace = format_sample_trace,
608  .type = VLIB_NODE_TYPE_INTERNAL,
609 
610  .n_errors = ARRAY_LEN(sample_error_strings),
611  .error_strings = sample_error_strings,
612 
613  .n_next_nodes = SAMPLE_N_NEXT,
614 
615  /* edit / add dispositions here */
616  .next_nodes = {
617  [SAMPLE_NEXT_INTERFACE_OUTPUT] = "interface-output",
618  },
619 };
620 /* *INDENT-ON* */
621 
622 /*
623  * fd.io coding-style-patch-verification: ON
624  *
625  * Local Variables:
626  * eval: (c-set-style "gnu")
627  * End:
628  */
vlib_node_registration_t sample_node
(constructor) VLIB_REGISTER_NODE (sample_node)
Definition: node.c:603
#define CLIB_UNUSED(x)
Definition: clib.h:81
u32 next_index
Definition: node.c:24
sample_error_t
Definition: node.c:53
#define PREDICT_TRUE(x)
Definition: clib.h:108
u8 src_address[6]
Definition: packet.h:56
int i
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
#define VLIB_NODE_FN(node)
Definition: node.h:187
unsigned char u8
Definition: types.h:56
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:109
#define foreach_sample_error
Definition: node.c:50
u8 dst_address[6]
Definition: packet.h:55
sample_next_t
Definition: node.c:67
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:187
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:382
static u8 * format_sample_trace(u8 *s, va_list *args)
Definition: node.c:33
u8 new_dst_mac[6]
Definition: node.c:27
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:205
#define PREDICT_FALSE(x)
Definition: clib.h:107
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1176
u32 flags
Definition: vhost_user.h:115
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:155
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:79
vlib_main_t * vm
Definition: buffer.c:294
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:332
#define clib_memcpy(a, b, c)
Definition: string.h:75
#define ARRAY_LEN(x)
Definition: clib.h:61
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:455
#define ASSERT(truth)
u8 new_src_mac[6]
Definition: node.c:26
static char * sample_error_strings[]
Definition: node.c:61
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
struct _vlib_node_registration vlib_node_registration_t
Definition: defs.h:47
static_always_inline u8x16 u8x16_shuffle(u8x16 v, u8x16 m)
Definition: vector_neon.h:128
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
u8 * format_mac_address(u8 *s, va_list *args)
Definition: format.c:58
#define vnet_buffer(b)
Definition: buffer.h:344
u8 data[0]
Packet data.
Definition: buffer.h:175
u32 sw_if_index
Definition: node.c:25
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:141
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:310
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:116
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:58
Definition: defs.h:46
#define foreach_mac_address_offset
Definition: node.c:82