FD.io VPP  v16.06
Vector Packet Processing
l2_flood.c
Go to the documentation of this file.
1 /*
2  * l2_flood.c : layer 2 flooding
3  *
4  * Copyright (c) 2013 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/vnet.h>
20 #include <vnet/pg/pg.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vlib/cli.h>
23 #include <vnet/l2/l2_input.h>
24 #include <vnet/l2/feat_bitmap.h>
25 #include <vnet/l2/l2_bvi.h>
26 #include <vnet/replication.h>
27 #include <vnet/l2/l2_fib.h>
28 
29 #include <vppinfra/error.h>
30 #include <vppinfra/hash.h>
31 
32 
33 /*
34  * Flooding uses the packet replication infrastructure to send a copy of the
35  * packet to each member interface. Logically the replication infrastructure
36  * expects two graph nodes: a prep node that initiates replication and sends the
37  * packet to the first destination, and a recycle node that is passed the packet
38  * after it has been transmitted.
39  *
40  * To decrease the amount of code, l2 flooding implements both functions in
41  * the same graph node. This node can tell if is it being called as the "prep"
42  * or "recycle" using replication_is_recycled().
43  */
44 
45 
46 typedef struct {
47 
48  // Next nodes for each feature
49  u32 feat_next_node_index[32];
50 
51  // next node index for the L3 input node of each ethertype
53 
54  /* convenience variables */
58 
59 typedef struct {
60  u8 src[6];
61  u8 dst[6];
65 
66 
67 /* packet trace format function */
68 static u8 * format_l2flood_trace (u8 * s, va_list * args)
69 {
70  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
71  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
72  l2flood_trace_t * t = va_arg (*args, l2flood_trace_t *);
73 
74  s = format (s, "l2-flood: sw_if_index %d dst %U src %U bd_index %d",
75  t->sw_if_index,
78  t->bd_index);
79  return s;
80 }
81 
83 
85 
86 #define foreach_l2flood_error \
87 _(L2FLOOD, "L2 flood packets") \
88 _(REPL_FAIL, "L2 replication failures") \
89 _(NO_MEMBERS, "L2 replication complete") \
90 _(BVI_BAD_MAC, "BVI L3 mac mismatch") \
91 _(BVI_ETHERTYPE, "BVI packet with unhandled ethertype")
92 
93 typedef enum {
94 #define _(sym,str) L2FLOOD_ERROR_##sym,
96 #undef _
99 
100 static char * l2flood_error_strings[] = {
101 #define _(sym,string) string,
103 #undef _
104 };
105 
106 typedef enum {
111 
112 /*
113  * Perform flooding on one packet
114  *
115  * Due to the way BVI processing can modify the packet, the BVI interface
116  * (if present) must be processed last in the replication. The member vector
117  * is arranged so that the BVI interface is always the first element.
118  * Flooding walks the vector in reverse.
119  *
120  * BVI processing causes the packet to go to L3 processing. This strips the
121  * L2 header, which is fine because the replication infrastructure restores
122  * it. However L3 processing can trigger larger changes to the packet. For
123  * example, an ARP request could be turned into an ARP reply, an ICMP request
124  * could be turned into an ICMP reply. If BVI processing is not performed
125  * last, the modified packet would be replicated to the remaining members.
126  */
127 
130  vlib_node_runtime_t * node,
131  l2flood_main_t * msm,
132  u64 * counter_base,
133  vlib_buffer_t * b0,
134  u32 * sw_if_index0,
135  l2fib_entry_key_t * key0,
136  u32 * bucket0,
137  l2fib_entry_result_t * result0,
138  u32 * next0)
139 {
140  u16 bd_index0;
141  l2_bridge_domain_t *bd_config;
142  l2_flood_member_t * members;
143  i32 current_member; // signed
144  replication_context_t * ctx;
145  u8 in_shg = vnet_buffer(b0)->l2.shg;
146 
147  if (!replication_is_recycled(b0)) {
148 
149  // Do flood "prep node" processing
150 
151  // Get config for the bridge domain interface
152  bd_index0 = vnet_buffer(b0)->l2.bd_index;
153  bd_config = vec_elt_at_index(l2input_main.bd_configs, bd_index0);
154  members = bd_config->members;
155 
156  // Find first member that passes the reflection and SHG checks
157  current_member = vec_len(members) - 1;
158  while ((current_member >= 0) &&
159  ((members[current_member].sw_if_index == *sw_if_index0) ||
160  (in_shg && members[current_member].shg == in_shg))) {
161  current_member--;
162  }
163 
164  if (current_member < 0) {
165  // No members to flood to
166  *next0 = L2FLOOD_NEXT_DROP;
167  b0->error = node->errors[L2FLOOD_ERROR_NO_MEMBERS];
168  return;
169  }
170 
171  if ((current_member > 0) &&
172  ((current_member > 1) ||
173  ((members[0].sw_if_index != *sw_if_index0) &&
174  (!in_shg || members[0].shg != in_shg)))) {
175  // If more than one member then initiate replication
176  ctx = replication_prep (vm, b0, l2flood_node.index, 1 /* l2_packet */);
177  ctx->feature_replicas = (uword) members;
178  ctx->feature_counter = current_member;
179  }
180 
181  } else {
182  vnet_buffer_opaque_t *vnet_buff_op;
183 
184  // Do flood "recycle node" processing
185 
187  {
188  (void)replication_recycle (vm, b0, 1 /* is_last */);
189  *next0 = L2FLOOD_NEXT_DROP;
190  b0->error = node->errors[L2FLOOD_ERROR_REPL_FAIL];
191  return;
192  }
193 
194  ctx = replication_get_ctx (b0);
196 
197  members = (l2_flood_member_t *)(intptr_t) ctx->feature_replicas;
198  current_member = (i32)ctx->feature_counter - 1;
199 
200  // Need to update input index from saved packet context
201  vnet_buff_op = (vnet_buffer_opaque_t *)ctx->vnet_buffer;
202  *sw_if_index0 = vnet_buff_op->sw_if_index[VLIB_RX];
203 
204  // Find next member that passes the reflection and SHG check
205  while ((current_member >= 0) &&
206  ((members[current_member].sw_if_index == *sw_if_index0) ||
207  (in_shg && members[current_member].shg == in_shg))) {
208  current_member--;
209  }
210 
211  if (current_member < 0) {
212  // No more members to flood to.
213  // Terminate replication and drop packet.
214 
215  replication_recycle (vm, b0, 1 /* is_last */);
216 
217  *next0 = L2FLOOD_NEXT_DROP;
218  // Ideally we woudn't bump a counter here, just silently complete
219  b0->error = node->errors[L2FLOOD_ERROR_NO_MEMBERS];
220  return;
221  }
222 
223  // Restore packet and context and continue replication
224  ctx->feature_counter = current_member;
225  replication_recycle (vm, b0,
226  ((current_member == 0) || /*is_last */
227  ((current_member == 1) &&
228  ((members[0].sw_if_index == *sw_if_index0) ||
229  (in_shg && members[0].shg == in_shg)))));
230  }
231 
232  // Forward packet to the current member
233 
234  if (PREDICT_TRUE(members[current_member].flags == L2_FLOOD_MEMBER_NORMAL)) {
235  // Do normal L2 forwarding
236  vnet_buffer(b0)->sw_if_index[VLIB_TX] = members[current_member].sw_if_index;
237  *next0 = L2FLOOD_NEXT_L2_OUTPUT;
238 
239  } else {
240  // Do BVI processing
241  u32 rc;
242  rc = l2_to_bvi (vm,
243  msm->vnet_main,
244  b0,
245  members[current_member].sw_if_index,
246  &msm->l3_next,
247  next0);
248 
249  if (PREDICT_FALSE(rc)) {
250  if (rc == TO_BVI_ERR_BAD_MAC) {
251  b0->error = node->errors[L2FLOOD_ERROR_BVI_BAD_MAC];
252  *next0 = L2FLOOD_NEXT_DROP;
253  } else if (rc == TO_BVI_ERR_ETHERTYPE) {
254  b0->error = node->errors[L2FLOOD_ERROR_BVI_ETHERTYPE];
255  *next0 = L2FLOOD_NEXT_DROP;
256  }
257  }
258  }
259 
260 }
261 
262 
263 static uword
265  vlib_node_runtime_t * node,
266  vlib_frame_t * frame)
267 {
268  u32 n_left_from, * from, * to_next;
269  l2flood_next_t next_index;
270  l2flood_main_t * msm = &l2flood_main;
271  vlib_node_t *n = vlib_get_node (vm, l2flood_node.index);
272  u32 node_counter_base_index = n->error_heap_index;
273  vlib_error_main_t * em = &vm->error_main;
274 
275  from = vlib_frame_vector_args (frame);
276  n_left_from = frame->n_vectors; /* number of packets to process */
277  next_index = node->cached_next_index;
278 
279  while (n_left_from > 0)
280  {
281  u32 n_left_to_next;
282 
283  /* get space to enqueue frame to graph node "next_index" */
284  vlib_get_next_frame (vm, node, next_index,
285  to_next, n_left_to_next);
286 
287  while (n_left_from >= 6 && n_left_to_next >= 2)
288  {
289  u32 bi0, bi1;
290  vlib_buffer_t * b0, * b1;
291  u32 next0, next1;
292  u32 sw_if_index0, sw_if_index1;
293  l2fib_entry_key_t key0, key1;
294  l2fib_entry_result_t result0, result1;
295  u32 bucket0, bucket1;
296 
297  /* Prefetch next iteration. */
298  {
299  vlib_buffer_t * p2, * p3, * p4, * p5;
300 
301  p2 = vlib_get_buffer (vm, from[2]);
302  p3 = vlib_get_buffer (vm, from[3]);
303  p4 = vlib_get_buffer (vm, from[4]);
304  p5 = vlib_get_buffer (vm, from[5]);
305 
306  // Prefetch the buffer header for the N+2 loop iteration
307  vlib_prefetch_buffer_header (p4, LOAD);
308  vlib_prefetch_buffer_header (p5, LOAD);
309 
310  // Prefetch the replication context for the N+1 loop iteration
311  // This depends on the buffer header above
314 
315  // Prefetch the packet for the N+1 loop iteration
318  }
319 
320  /* speculatively enqueue b0 and b1 to the current next frame */
321  /* bi is "buffer index", b is pointer to the buffer */
322  to_next[0] = bi0 = from[0];
323  to_next[1] = bi1 = from[1];
324  from += 2;
325  to_next += 2;
326  n_left_from -= 2;
327  n_left_to_next -= 2;
328 
329  b0 = vlib_get_buffer (vm, bi0);
330  b1 = vlib_get_buffer (vm, bi1);
331 
332  /* RX interface handles */
333  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
334  sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
335 
336  /* process 2 pkts */
337  em->counters[node_counter_base_index + L2FLOOD_ERROR_L2FLOOD] += 2;
338 
339  l2flood_process (vm, node, msm, &em->counters[node_counter_base_index],
340  b0, &sw_if_index0, &key0, &bucket0, &result0, &next0);
341 
342  l2flood_process (vm, node, msm, &em->counters[node_counter_base_index],
343  b1, &sw_if_index1, &key1, &bucket1, &result1, &next1);
344 
346  {
348  {
349  l2flood_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
351  t->sw_if_index = sw_if_index0;
352  t->bd_index = vnet_buffer(b0)->l2.bd_index;
353  clib_memcpy(t->src, h0->src_address, 6);
354  clib_memcpy(t->dst, h0->dst_address, 6);
355  }
357  {
358  l2flood_trace_t *t = vlib_add_trace (vm, node, b1, sizeof (*t));
360  t->sw_if_index = sw_if_index1;
361  t->bd_index = vnet_buffer(b1)->l2.bd_index;
362  clib_memcpy(t->src, h1->src_address, 6);
363  clib_memcpy(t->dst, h1->dst_address, 6);
364  }
365  }
366 
367  /* verify speculative enqueues, maybe switch current next frame */
368  /* if next0==next1==next_index then nothing special needs to be done */
369  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
370  to_next, n_left_to_next,
371  bi0, bi1, next0, next1);
372  }
373 
374  while (n_left_from > 0 && n_left_to_next > 0)
375  {
376  u32 bi0;
377  vlib_buffer_t * b0;
378  u32 next0;
379  u32 sw_if_index0;
380  l2fib_entry_key_t key0;
381  l2fib_entry_result_t result0;
382  u32 bucket0;
383 
384  /* speculatively enqueue b0 to the current next frame */
385  bi0 = from[0];
386  to_next[0] = bi0;
387  from += 1;
388  to_next += 1;
389  n_left_from -= 1;
390  n_left_to_next -= 1;
391 
392  b0 = vlib_get_buffer (vm, bi0);
393 
394  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
395 
396  /* process 1 pkt */
397  em->counters[node_counter_base_index + L2FLOOD_ERROR_L2FLOOD] += 1;
398 
399  l2flood_process (vm, node, msm, &em->counters[node_counter_base_index],
400  b0, &sw_if_index0, &key0, &bucket0, &result0, &next0);
401 
402  if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE) &&
403  (b0->flags & VLIB_BUFFER_IS_TRACED)))
404  {
405  l2flood_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
407  t->sw_if_index = sw_if_index0;
408  t->bd_index = vnet_buffer(b0)->l2.bd_index;
409  clib_memcpy(t->src, h0->src_address, 6);
410  clib_memcpy(t->dst, h0->dst_address, 6);
411  }
412 
413  /* verify speculative enqueue, maybe switch current next frame */
414  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
415  to_next, n_left_to_next,
416  bi0, next0);
417  }
418 
419  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
420  }
421 
422  return frame->n_vectors;
423 }
424 
425 
426 VLIB_REGISTER_NODE (l2flood_node,static) = {
427  .function = l2flood_node_fn,
428  .name = "l2-flood",
429  .vector_size = sizeof (u32),
430  .format_trace = format_l2flood_trace,
432 
433  .n_errors = ARRAY_LEN(l2flood_error_strings),
434  .error_strings = l2flood_error_strings,
435 
436  .n_next_nodes = L2FLOOD_N_NEXT,
437 
438  /* edit / add dispositions here */
439  .next_nodes = {
440  [L2FLOOD_NEXT_L2_OUTPUT] = "l2-output",
441  [L2FLOOD_NEXT_DROP] = "error-drop",
442  },
443 };
444 
446 {
448 
449  mp->vlib_main = vm;
450  mp->vnet_main = vnet_get_main();
451 
452  // Initialize the feature next-node indexes
454  l2flood_node.index,
458 
459  return 0;
460 }
461 
463 
464 
465 
466 // Add the L3 input node for this ethertype to the next nodes structure
467 void
470  u32 node_index)
471 {
473  u32 next_index;
474 
475  next_index = vlib_node_add_next (vm,
476  l2flood_node.index,
477  node_index);
478 
479  next_by_ethertype_register (&mp->l3_next, type, next_index);
480 }
481 
482 
483 // set subinterface flood enable/disable
484 // The CLI format is:
485 // set interface l2 flood <interface> [disable]
486 static clib_error_t *
488  unformat_input_t * input,
489  vlib_cli_command_t * cmd)
490 {
491  vnet_main_t * vnm = vnet_get_main();
492  clib_error_t * error = 0;
493  u32 sw_if_index;
494  u32 enable;
495 
496  if (! unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
497  {
498  error = clib_error_return (0, "unknown interface `%U'",
499  format_unformat_error, input);
500  goto done;
501  }
502 
503  enable = 1;
504  if (unformat (input, "disable")) {
505  enable = 0;
506  }
507 
508  // set the interface flag
509  l2input_intf_bitmap_enable(sw_if_index, L2INPUT_FEAT_FLOOD, enable);
510 
511  done:
512  return error;
513 }
514 
515 VLIB_CLI_COMMAND (int_flood_cli, static) = {
516  .path = "set interface l2 flood",
517  .short_help = "set interface l2 flood <interface> [disable]",
518  .function = int_flood,
519 };
u32 feat_next_node_index[32]
Definition: l2_flood.c:49
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Definition: main.c:459
vnet_main_t * vnet_main
Definition: l2_flood.c:56
l2flood_next_t
Definition: l2_flood.c:106
u32 error_heap_index
Definition: node.h:244
#define L2_FLOOD_MEMBER_NORMAL
Definition: l2_bd.h:40
#define CLIB_UNUSED(x)
Definition: clib.h:79
uword unformat(unformat_input_t *i, char *fmt,...)
Definition: unformat.c:942
#define TO_BVI_ERR_BAD_MAC
Definition: l2_bvi.h:28
static void(BVT(clib_bihash)*h, BVT(clib_bihash_value)*v)
always_inline vlib_node_t * vlib_get_node(vlib_main_t *vm, u32 i)
Definition: node_funcs.h:46
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
always_inline void replication_clear_recycled(vlib_buffer_t *b0)
Definition: replication.h:81
#define PREDICT_TRUE(x)
Definition: clib.h:98
ethernet_type_t
Definition: packet.h:43
#define TO_BVI_ERR_ETHERTYPE
Definition: l2_bvi.h:29
static_always_inline u32 l2_to_bvi(vlib_main_t *vlib_main, vnet_main_t *vnet_main, vlib_buffer_t *b0, u32 bvi_sw_if_index, next_by_ethertype_t *l3_next, u32 *next0)
Definition: l2_bvi.h:36
u8 src_address[6]
Definition: packet.h:52
vlib_main_t * vlib_main
Definition: l2_flood.c:55
l2flood_main_t l2flood_main
Definition: l2_flood.c:82
static uword l2flood_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: l2_flood.c:264
void l2flood_register_input_type(vlib_main_t *vm, ethernet_type_t type, u32 node_index)
Definition: l2_flood.c:468
struct _vlib_node_registration vlib_node_registration_t
l2_flood_member_t * members
Definition: l2_bd.h:66
static char * l2flood_error_strings[]
Definition: l2_flood.c:100
unformat_function_t unformat_vnet_sw_interface
Definition: l2_fib.h:50
vlib_error_t * errors
Definition: node.h:378
always_inline void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:184
vnet_main_t * vnet_get_main(void)
Definition: misc.c:45
#define static_always_inline
Definition: clib.h:85
u8 * format_ethernet_address(u8 *s, va_list *args)
Definition: format.c:43
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:109
always_inline replication_context_t * replication_get_ctx(vlib_buffer_t *b0)
Definition: replication.h:91
u8 dst_address[6]
Definition: packet.h:51
static u8 * format_l2flood_trace(u8 *s, va_list *args)
Definition: l2_flood.c:68
int i32
Definition: types.h:81
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
unsigned long u64
Definition: types.h:89
uword unformat_user(unformat_input_t *input, unformat_function_t *func,...)
Definition: unformat.c:953
always_inline void * vlib_frame_vector_args(vlib_frame_t *f)
Definition: node_funcs.h:202
vlib_error_main_t error_main
Definition: main.h:124
#define PREDICT_FALSE(x)
Definition: clib.h:97
always_inline u32 replication_is_recycled(vlib_buffer_t *b0)
Definition: replication.h:73
replication_context_t * replication_prep(vlib_main_t *vm, vlib_buffer_t *b0, u32 recycle_node_index, u32 l2_packet)
Definition: replication.c:29
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Definition: buffer_node.h:43
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Definition: buffer_node.h:83
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:265
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:129
u64 * counters
Definition: error.h:73
clib_error_t * next_by_ethertype_register(next_by_ethertype_t *l3_next, u32 ethertype, u32 next_index)
Definition: node.c:1010
u16 n_vectors
Definition: node.h:307
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
static vlib_node_registration_t l2flood_node
(constructor) VLIB_REGISTER_NODE (l2flood_node)
Definition: l2_flood.c:84
u32 sw_if_index[VLIB_N_RX_TX]
Definition: buffer.h:95
#define clib_memcpy(a, b, c)
Definition: string.h:63
#define ARRAY_LEN(x)
Definition: clib.h:59
char ** l2input_get_feat_names(void)
Definition: l2_input.c:45
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:150
u16 cached_next_index
Definition: node.h:422
#define VLIB_BUFFER_REPL_FAIL
Definition: buffer.h:96
unsigned int u32
Definition: types.h:88
u8 * format_unformat_error(u8 *s, va_list *va)
Definition: unformat.c:87
#define vnet_buffer(b)
Definition: buffer.h:300
u8 * format(u8 *s, char *fmt,...)
Definition: format.c:405
Definition: l2_fib.h:33
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:225
always_inline uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:919
#define VLIB_BUFFER_IS_TRACED
Definition: buffer.h:91
u64 uword
Definition: types.h:112
u32 l2input_intf_bitmap_enable(u32 sw_if_index, u32 feature_bitmap, u32 enable)
Definition: l2_input.c:473
Definition: defs.h:46
unsigned short u16
Definition: types.h:57
l2input_main_t l2input_main
Definition: l2_input.c:72
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
#define foreach_l2flood_error
Definition: l2_flood.c:86
l2flood_error_t
Definition: l2_flood.c:93
always_inline void replication_prefetch_ctx(vlib_buffer_t *b0)
Definition: replication.h:102
l2_bridge_domain_t * bd_configs
Definition: l2_input.h:66
replication_context_t * replication_recycle(vlib_main_t *vm, vlib_buffer_t *b0, u32 is_last)
Definition: replication.c:88
always_inline void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
static_always_inline void l2flood_process(vlib_main_t *vm, vlib_node_runtime_t *node, l2flood_main_t *msm, u64 *counter_base, vlib_buffer_t *b0, u32 *sw_if_index0, l2fib_entry_key_t *key0, u32 *bucket0, l2fib_entry_result_t *result0, u32 *next0)
Definition: l2_flood.c:129
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:162
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:140
u8 data[0]
Packet data.
Definition: buffer.h:150
u32 sw_if_index
Definition: l2_bd.h:44
#define clib_error_return(e, args...)
Definition: error.h:112
struct _unformat_input_t unformat_input_t
next_by_ethertype_t l3_next
Definition: l2_flood.c:52
u32 flags
Definition: vhost-user.h:73
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:84
always_inline vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:69
static clib_error_t * int_flood(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: l2_flood.c:487
always_inline void feat_bitmap_init_next_nodes(vlib_main_t *vm, u32 node_index, u32 num_features, char **feat_names, u32 *next_nodes)
Definition: feat_bitmap.h:41
Definition: defs.h:45
clib_error_t * l2flood_init(vlib_main_t *vm)
Definition: l2_flood.c:445