FD.io VPP  v17.01.1-3-gc6833f8
Vector Packet Processing
l2_rw.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vlib/vlib.h>
17 #include <vnet/l2/feat_bitmap.h>
18 #include <vnet/l2/l2_rw.h>
19 
20 /**
21  * @file
22  * @brief Layer 2 Rewrite.
23  *
24  * Layer 2-Rewrite node uses classify tables to match packets. Then, using
25  * the provisioned mask and value, modfies the packet header.
26  */
27 
28 
30 
32 
33 typedef struct
34 {
39 
40 static u8 *
41 format_l2_rw_entry (u8 * s, va_list * args)
42 {
43  l2_rw_entry_t *e = va_arg (*args, l2_rw_entry_t *);
44  l2_rw_main_t *rw = &l2_rw_main;
45  s = format (s, "%d - mask:%U value:%U\n",
46  e - rw->entries,
47  format_hex_bytes, e->mask,
48  e->rewrite_n_vectors * sizeof (u32x4), format_hex_bytes,
49  e->value, e->rewrite_n_vectors * sizeof (u32x4));
50  s =
51  format (s, " hits:%d skip_bytes:%d", e->hit_count,
52  e->skip_n_vectors * sizeof (u32x4));
53  return s;
54 }
55 
56 static u8 *
57 format_l2_rw_config (u8 * s, va_list * args)
58 {
59  l2_rw_config_t *c = va_arg (*args, l2_rw_config_t *);
60  return format (s, "table-index:%d miss-index:%d",
61  c->table_index, c->miss_index);
62 }
63 
64 /* packet trace format function */
65 static u8 *
66 format_l2_rw_trace (u8 * s, va_list * args)
67 {
68  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
69  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
70  l2_rw_trace_t *t = va_arg (*args, l2_rw_trace_t *);
71  return format (s, "l2-rw: sw_if_index %d, table %d, entry %d",
74 }
75 
76 always_inline l2_rw_config_t *
77 l2_rw_get_config (u32 sw_if_index)
78 {
79  l2_rw_main_t *rw = &l2_rw_main;
80  if (PREDICT_FALSE (!clib_bitmap_get (rw->configs_bitmap, sw_if_index)))
81  {
82  vec_validate (rw->configs, sw_if_index);
83  rw->configs[sw_if_index].table_index = ~0;
84  rw->configs[sw_if_index].miss_index = ~0;
85  rw->configs_bitmap =
86  clib_bitmap_set (rw->configs_bitmap, sw_if_index, 1);
87  }
88  return &rw->configs[sw_if_index];
89 }
90 
92 l2_rw_rewrite (l2_rw_entry_t * rwe, u8 * h)
93 {
94  if (U32X4_ALIGNED (h))
95  {
96  u32x4 *d = ((u32x4 *) h) + rwe->skip_n_vectors;
97  switch (rwe->rewrite_n_vectors)
98  {
99  case 5:
100  d[4] = (d[4] & ~rwe->mask[4]) | rwe->value[4];
101  /* FALLTHROUGH */
102  case 4:
103  d[3] = (d[3] & ~rwe->mask[3]) | rwe->value[3];
104  /* FALLTHROUGH */
105  case 3:
106  d[2] = (d[2] & ~rwe->mask[2]) | rwe->value[2];
107  /* FALLTHROUGH */
108  case 2:
109  d[1] = (d[1] & ~rwe->mask[1]) | rwe->value[1];
110  /* FALLTHROUGH */
111  case 1:
112  d[0] = (d[0] & ~rwe->mask[0]) | rwe->value[0];
113  break;
114  default:
115  abort ();
116  }
117  }
118  else
119  {
120  u64 *d = ((u64 *) h) + rwe->skip_n_vectors * 2;
121  switch (rwe->rewrite_n_vectors)
122  {
123  case 5:
124  d[8] =
125  (d[8] & ~(((u64 *) rwe->mask)[8])) | (((u64 *) rwe->value)[8]);
126  d[9] =
127  (d[9] & ~(((u64 *) rwe->mask)[9])) | (((u64 *) rwe->value)[9]);
128  /* FALLTHROUGH */
129  case 4:
130  d[6] =
131  (d[6] & ~(((u64 *) rwe->mask)[6])) | (((u64 *) rwe->value)[6]);
132  d[7] =
133  (d[7] & ~(((u64 *) rwe->mask)[7])) | (((u64 *) rwe->value)[7]);
134  /* FALLTHROUGH */
135  case 3:
136  d[4] =
137  (d[4] & ~(((u64 *) rwe->mask)[4])) | (((u64 *) rwe->value)[4]);
138  d[5] =
139  (d[5] & ~(((u64 *) rwe->mask)[5])) | (((u64 *) rwe->value)[5]);
140  /* FALLTHROUGH */
141  case 2:
142  d[2] =
143  (d[2] & ~(((u64 *) rwe->mask)[2])) | (((u64 *) rwe->value)[2]);
144  d[3] =
145  (d[3] & ~(((u64 *) rwe->mask)[3])) | (((u64 *) rwe->value)[3]);
146  /* FALLTHROUGH */
147  case 1:
148  d[0] =
149  (d[0] & ~(((u64 *) rwe->mask)[0])) | (((u64 *) rwe->value)[0]);
150  d[1] =
151  (d[1] & ~(((u64 *) rwe->mask)[1])) | (((u64 *) rwe->value)[1]);
152  break;
153  default:
154  abort ();
155  }
156  }
157 }
158 
159 static uword
161  vlib_node_runtime_t * node, vlib_frame_t * frame)
162 {
163  l2_rw_main_t *rw = &l2_rw_main;
164  u32 n_left_from, *from, *to_next, next_index;
166  f64 now = vlib_time_now (vlib_get_main ());
167  u32 prefetch_size = 0;
168 
169  from = vlib_frame_vector_args (frame);
170  n_left_from = frame->n_vectors; /* number of packets to process */
171  next_index = node->cached_next_index;
172 
173  while (n_left_from > 0)
174  {
175  u32 n_left_to_next;
176 
177  /* get space to enqueue frame to graph node "next_index" */
178  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
179 
180  while (n_left_from >= 4 && n_left_to_next >= 2)
181  {
182  u32 bi0, next0, sw_if_index0, feature_bitmap0, rwe_index0;
183  u32 bi1, next1, sw_if_index1, feature_bitmap1, rwe_index1;
184  vlib_buffer_t *b0, *b1;
185  ethernet_header_t *h0, *h1;
186  l2_rw_config_t *config0, *config1;
187  u64 hash0, hash1;
188  vnet_classify_table_t *t0, *t1;
189  vnet_classify_entry_t *e0, *e1;
190  l2_rw_entry_t *rwe0, *rwe1;
191 
192  {
193  vlib_buffer_t *p2, *p3;
194  p2 = vlib_get_buffer (vm, from[2]);
195  p3 = vlib_get_buffer (vm, from[3]);
196 
197  vlib_prefetch_buffer_header (p2, LOAD);
198  vlib_prefetch_buffer_header (p3, LOAD);
199  CLIB_PREFETCH (vlib_buffer_get_current (p2), prefetch_size, LOAD);
200  CLIB_PREFETCH (vlib_buffer_get_current (p3), prefetch_size, LOAD);
201  }
202 
203  bi0 = from[0];
204  bi1 = from[1];
205  to_next[0] = bi0;
206  to_next[1] = bi1;
207  from += 2;
208  to_next += 2;
209  n_left_from -= 2;
210  n_left_to_next -= 2;
211 
212  b0 = vlib_get_buffer (vm, bi0);
213  b1 = vlib_get_buffer (vm, bi1);
214  h0 = vlib_buffer_get_current (b0);
215  h1 = vlib_buffer_get_current (b1);
216 
217  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
218  sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
219  config0 = l2_rw_get_config (sw_if_index0); /*TODO: check sw_if_index0 value */
220  config1 = l2_rw_get_config (sw_if_index1); /*TODO: check sw_if_index0 value */
221  t0 = pool_elt_at_index (vcm->tables, config0->table_index);
222  t1 = pool_elt_at_index (vcm->tables, config1->table_index);
223  prefetch_size =
224  (t1->skip_n_vectors + t1->match_n_vectors) * sizeof (u32x4);
225 
226  hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
227  hash1 = vnet_classify_hash_packet (t1, (u8 *) h1);
228  e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
229  e1 = vnet_classify_find_entry (t1, (u8 *) h1, hash1, now);
230 
231  while (!e0 && (t0->next_table_index != ~0))
232  {
233  t0 = pool_elt_at_index (vcm->tables, t0->next_table_index);
234  hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
235  e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
236  }
237 
238  while (!e1 && (t1->next_table_index != ~0))
239  {
240  t1 = pool_elt_at_index (vcm->tables, t1->next_table_index);
241  hash1 = vnet_classify_hash_packet (t1, (u8 *) h1);
242  e1 = vnet_classify_find_entry (t1, (u8 *) h1, hash1, now);
243  }
244 
245  rwe_index0 = e0 ? e0->opaque_index : config0->miss_index;
246  rwe_index1 = e1 ? e1->opaque_index : config1->miss_index;
247 
248  if (rwe_index0 != ~0)
249  {
250  rwe0 = pool_elt_at_index (rw->entries, rwe_index0);
251  l2_rw_rewrite (rwe0, (u8 *) h0);
252  }
253  if (rwe_index1 != ~0)
254  {
255  rwe1 = pool_elt_at_index (rw->entries, rwe_index1);
256  l2_rw_rewrite (rwe1, (u8 *) h1);
257  }
258 
260  {
261  l2_rw_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
262  t->sw_if_index = sw_if_index0;
263  t->classify_table_index = config0->table_index;
264  t->rewrite_entry_index = rwe_index0;
265  }
266 
268  {
269  l2_rw_trace_t *t = vlib_add_trace (vm, node, b1, sizeof (*t));
270  t->sw_if_index = sw_if_index1;
271  t->classify_table_index = config1->table_index;
272  t->rewrite_entry_index = rwe_index1;
273  }
274 
275  /* Update feature bitmap and get next feature index */
276  feature_bitmap0 =
277  vnet_buffer (b0)->l2.feature_bitmap & ~L2INPUT_FEAT_RW;
278  feature_bitmap1 =
279  vnet_buffer (b1)->l2.feature_bitmap & ~L2INPUT_FEAT_RW;
280  vnet_buffer (b0)->l2.feature_bitmap = feature_bitmap0;
281  vnet_buffer (b1)->l2.feature_bitmap = feature_bitmap1;
283  feature_bitmap0);
285  feature_bitmap1);
286 
287  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
288  to_next, n_left_to_next,
289  bi0, bi1, next0, next1);
290  }
291 
292  while (n_left_from > 0 && n_left_to_next > 0)
293  {
294  u32 bi0, next0, sw_if_index0, feature_bitmap0, rwe_index0;
295  vlib_buffer_t *b0;
296  ethernet_header_t *h0;
297  l2_rw_config_t *config0;
298  u64 hash0;
300  vnet_classify_entry_t *e0;
301  l2_rw_entry_t *rwe0;
302 
303  bi0 = from[0];
304  to_next[0] = bi0;
305  from += 1;
306  to_next += 1;
307  n_left_from -= 1;
308  n_left_to_next -= 1;
309 
310  b0 = vlib_get_buffer (vm, bi0);
311  h0 = vlib_buffer_get_current (b0);
312 
313  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
314  config0 = l2_rw_get_config (sw_if_index0); /*TODO: check sw_if_index0 value */
315  t0 = pool_elt_at_index (vcm->tables, config0->table_index);
316 
317  hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
318  e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
319 
320  while (!e0 && (t0->next_table_index != ~0))
321  {
322  t0 = pool_elt_at_index (vcm->tables, t0->next_table_index);
323  hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
324  e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
325  }
326 
327  rwe_index0 = e0 ? e0->opaque_index : config0->miss_index;
328 
329  if (rwe_index0 != ~0)
330  {
331  rwe0 = pool_elt_at_index (rw->entries, rwe_index0);
332  l2_rw_rewrite (rwe0, (u8 *) h0);
333  }
334 
336  {
337  l2_rw_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
338  t->sw_if_index = sw_if_index0;
339  t->classify_table_index = config0->table_index;
340  t->rewrite_entry_index = rwe_index0;
341  }
342 
343  /* Update feature bitmap and get next feature index */
344  feature_bitmap0 =
345  vnet_buffer (b0)->l2.feature_bitmap & ~L2INPUT_FEAT_RW;
346  vnet_buffer (b0)->l2.feature_bitmap = feature_bitmap0;
348  feature_bitmap0);
349 
350  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
351  to_next, n_left_to_next,
352  bi0, next0);
353  }
354  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
355  }
356 
357  return frame->n_vectors;
358 }
359 
360 int
362  u8 * mask, u8 * value, u32 len, u32 skip, u8 is_del)
363 {
364  l2_rw_main_t *rw = &l2_rw_main;
365  l2_rw_entry_t *e = 0;
366  if (*index != ~0)
367  {
368  if (pool_is_free_index (rw->entries, *index))
369  {
370  return -1;
371  }
372  e = pool_elt_at_index (rw->entries, *index);
373  }
374  else
375  {
376  pool_get (rw->entries, e);
377  *index = e - rw->entries;
378  }
379 
380  if (!e)
381  return -1;
382 
383  if (is_del)
384  {
385  pool_put (rw->entries, e);
386  return 0;
387  }
388 
389  e->skip_n_vectors = skip / sizeof (u32x4);
390  skip -= e->skip_n_vectors * sizeof (u32x4);
391  e->rewrite_n_vectors = (skip + len - 1) / sizeof (u32x4) + 1;
392  vec_alloc_aligned (e->mask, e->rewrite_n_vectors, sizeof (u32x4));
393  memset (e->mask, 0, e->rewrite_n_vectors * sizeof (u32x4));
394  vec_alloc_aligned (e->value, e->rewrite_n_vectors, sizeof (u32x4));
395  memset (e->value, 0, e->rewrite_n_vectors * sizeof (u32x4));
396 
397  clib_memcpy (((u8 *) e->value) + skip, value, len);
398  clib_memcpy (((u8 *) e->mask) + skip, mask, len);
399 
400  int i;
401  for (i = 0; i < e->rewrite_n_vectors; i++)
402  {
403  e->value[i] &= e->mask[i];
404  }
405 
406  return 0;
407 }
408 
409 static clib_error_t *
411  unformat_input_t * input, vlib_cli_command_t * cmd)
412 {
413  u32 index = ~0;
414  u8 *mask = 0;
415  u8 *value = 0;
416  u32 skip = 0;
417  u8 del = 0;
418 
420  {
421  if (unformat (input, "index %d", &index))
422  ;
423  else if (unformat (input, "mask %U", unformat_hex_string, &mask))
424  ;
425  else if (unformat (input, "value %U", unformat_hex_string, &value))
426  ;
427  else if (unformat (input, "skip %d", &skip))
428  ;
429  else if (unformat (input, "del"))
430  del = 1;
431  else
432  break;
433  }
434 
435  if (!mask || !value)
436  return clib_error_return (0, "Unspecified mask or value");
437 
438  if (vec_len (mask) != vec_len (value))
439  return clib_error_return (0, "Mask and value lengths must be identical");
440 
441  int ret;
442  if ((ret =
443  l2_rw_mod_entry (&index, mask, value, vec_len (mask), skip, del)))
444  return clib_error_return (0, "Could not add entry");
445 
446  return 0;
447 }
448 
449 /*?
450  * Layer 2-Rewrite node uses classify tables to match packets. Then, using
451  * the provisioned mask and value, modfies the packet header.
452  *
453  * @cliexpar
454  * @todo This is incomplete. This needs a detailed description and a
455  * practical example.
456 ?*/
457 /* *INDENT-OFF* */
458 VLIB_CLI_COMMAND (l2_rw_entry_cli, static) = {
459  .path = "l2 rewrite entry",
460  .short_help =
461  "l2 rewrite entry [index <index>] [mask <hex-mask>] [value <hex-value>] [skip <n_bytes>] [del]",
462  .function = l2_rw_entry_cli_fn,
463 };
464 /* *INDENT-ON* */
465 
466 int
467 l2_rw_interface_set_table (u32 sw_if_index, u32 table_index, u32 miss_index)
468 {
469  l2_rw_config_t *c = l2_rw_get_config (sw_if_index);
470  l2_rw_main_t *rw = &l2_rw_main;
471 
472  c->table_index = table_index;
473  c->miss_index = miss_index;
474  u32 feature_bitmap = (table_index == ~0) ? 0 : L2INPUT_FEAT_RW;
475 
476  l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_RW, feature_bitmap);
477 
478  if (c->table_index == ~0)
479  clib_bitmap_set (rw->configs_bitmap, sw_if_index, 0);
480 
481  return 0;
482 }
483 
484 static clib_error_t *
486  unformat_input_t * input, vlib_cli_command_t * cmd)
487 {
488  vnet_main_t *vnm = vnet_get_main ();
489  u32 table_index = ~0;
490  u32 sw_if_index = ~0;
491  u32 miss_index = ~0;
492 
494  {
495  unformat (input, "%U", unformat_vnet_sw_interface, vnm, &sw_if_index);
496  }
497 
499  {
500  if (unformat (input, "table %d", &table_index))
501  ;
502  else if (unformat (input, "miss-index %d", &miss_index))
503  ;
504  else
505  break;
506  }
507 
508  if (sw_if_index == ~0)
509  return clib_error_return (0,
510  "You must specify an interface 'iface <interface>'",
511  format_unformat_error, input);
512  int ret;
513  if ((ret =
514  l2_rw_interface_set_table (sw_if_index, table_index, miss_index)))
515  return clib_error_return (0, "l2_rw_interface_set_table returned %d",
516  ret);
517 
518  return 0;
519 }
520 
521 /*?
522  * Layer 2-Rewrite node uses classify tables to match packets. Then, using
523  * the provisioned mask and value, modfies the packet header.
524  *
525  * @cliexpar
526  * @todo This is incomplete. This needs a detailed description and a
527  * practical example.
528 ?*/
529 /* *INDENT-OFF* */
530 VLIB_CLI_COMMAND (l2_rw_interface_cli, static) = {
531  .path = "set interface l2 rewrite",
532  .short_help =
533  "set interface l2 rewrite <interface> [table <table index>] [miss-index <entry-index>]",
534  .function = l2_rw_interface_cli_fn,
535 };
536 /* *INDENT-ON* */
537 
538 static clib_error_t *
540  unformat_input_t * input,
541  vlib_cli_command_t * cmd)
542 {
543  l2_rw_main_t *rw = &l2_rw_main;
545  vlib_cli_output (vm, "No interface is currently using l2 rewrite\n");
546 
547  uword i;
548  /* *INDENT-OFF* */
550  vlib_cli_output (vm, "sw_if_index:%d %U\n", i, format_l2_rw_config, &rw->configs[i]);
551  });
552  /* *INDENT-ON* */
553  return 0;
554 }
555 
556 /*?
557  * Layer 2-Rewrite node uses classify tables to match packets. Then, using
558  * the provisioned mask and value, modfies the packet header.
559  *
560  * @cliexpar
561  * @todo This is incomplete. This needs a detailed description and a
562  * practical example.
563 ?*/
564 /* *INDENT-OFF* */
565 VLIB_CLI_COMMAND (l2_rw_show_interfaces_cli, static) = {
566  .path = "show l2 rewrite interfaces",
567  .short_help =
568  "show l2 rewrite interfaces",
569  .function = l2_rw_show_interfaces_cli_fn,
570 };
571 /* *INDENT-ON* */
572 
573 static clib_error_t *
575  unformat_input_t * input, vlib_cli_command_t * cmd)
576 {
577  l2_rw_main_t *rw = &l2_rw_main;
578  l2_rw_entry_t *e;
579  if (pool_elts (rw->entries) == 0)
580  vlib_cli_output (vm, "No entries\n");
581 
582  /* *INDENT-OFF* */
583  pool_foreach(e, rw->entries, {
584  vlib_cli_output (vm, "%U\n", format_l2_rw_entry, e);
585  });
586  /* *INDENT-ON* */
587  return 0;
588 }
589 
590 /*?
591  * Layer 2-Rewrite node uses classify tables to match packets. Then, using
592  * the provisioned mask and value, modfies the packet header.
593  *
594  * @cliexpar
595  * @todo This is incomplete. This needs a detailed description and a
596  * practical example.
597 ?*/
598 /* *INDENT-OFF* */
599 VLIB_CLI_COMMAND (l2_rw_show_entries_cli, static) = {
600  .path = "show l2 rewrite entries",
601  .short_help =
602  "show l2 rewrite entries",
603  .function = l2_rw_show_entries_cli_fn,
604 };
605 /* *INDENT-ON* */
606 
607 int
608 l2_rw_enable_disable (u32 bridge_domain, u8 disable)
609 {
610  u32 mask = L2INPUT_FEAT_RW;
611  l2input_set_bridge_features (bridge_domain, mask, disable ? 0 : mask);
612  return 0;
613 }
614 
615 static clib_error_t *
617  unformat_input_t * input, vlib_cli_command_t * cmd)
618 {
619  u32 bridge_domain;
620  u8 disable = 0;
621 
623  !unformat (input, "%d", &bridge_domain))
624  {
625  return clib_error_return (0, "You must specify a bridge domain");
626  }
627 
629  unformat (input, "disable"))
630  {
631  disable = 1;
632  }
633 
634  if (l2_rw_enable_disable (bridge_domain, disable))
635  return clib_error_return (0, "Could not enable or disable rewrite");
636 
637  return 0;
638 }
639 
640 /*?
641  * Layer 2-Rewrite node uses classify tables to match packets. Then, using
642  * the provisioned mask and value, modfies the packet header.
643  *
644  * @cliexpar
645  * @todo This is incomplete. This needs a detailed description and a
646  * practical example.
647 ?*/
648 /* *INDENT-OFF* */
649 VLIB_CLI_COMMAND (l2_rw_set_cli, static) = {
650  .path = "set bridge-domain rewrite",
651  .short_help =
652  "set bridge-domain rewrite <bridge-domain> [disable]",
653  .function = l2_rw_set_cli_fn,
654 };
655 /* *INDENT-ON* */
656 
657 static clib_error_t *
659 {
660  l2_rw_main_t *rw = &l2_rw_main;
661  rw->configs = 0;
662  rw->entries = 0;
665  l2_rw_node.index,
669  return 0;
670 }
671 
673 
674 enum
675 {
678 };
679 
680 #define foreach_l2_rw_error \
681 _(UNKNOWN, "Unknown error")
682 
683 typedef enum
684 {
685 #define _(sym,str) L2_RW_ERROR_##sym,
687 #undef _
689 } l2_rw_error_t;
690 
691 static char *l2_rw_error_strings[] = {
692 #define _(sym,string) string,
694 #undef _
695 };
696 
697 /* *INDENT-OFF* */
699  .function = l2_rw_node_fn,
700  .name = "l2-rw",
701  .vector_size = sizeof (u32),
702  .format_trace = format_l2_rw_trace,
704  .n_errors = ARRAY_LEN(l2_rw_error_strings),
705  .error_strings = l2_rw_error_strings,
706  .runtime_data_bytes = 0,
707  .n_next_nodes = L2_RW_N_NEXT,
708  .next_nodes = { [L2_RW_NEXT_DROP] = "error-drop"},
709 };
710 /* *INDENT-ON* */
711 
713 /*
714  * fd.io coding-style-patch-verification: ON
715  *
716  * Local Variables:
717  * eval: (c-set-style "gnu")
718  * End:
719  */
u64 vnet_classify_hash_packet(vnet_classify_table_t *t, u8 *h)
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:396
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:459
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:343
#define CLIB_UNUSED(x)
Definition: clib.h:79
uword unformat(unformat_input_t *i, char *fmt,...)
Definition: unformat.c:966
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
static clib_error_t * l2_rw_show_interfaces_cli_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: l2_rw.c:539
u32 l2input_set_bridge_features(u32 bd_index, u32 feat_mask, u32 feat_value)
Definition: l2_input.c:489
l2_rw_main_t l2_rw_main
Definition: l2_rw.c:29
#define UNFORMAT_END_OF_INPUT
Definition: format.h:143
static l2_rw_config_t * l2_rw_get_config(u32 sw_if_index)
Definition: l2_rw.c:77
static u8 * format_l2_rw_entry(u8 *s, va_list *args)
Definition: l2_rw.c:41
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:182
static clib_error_t * l2_rw_show_entries_cli_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: l2_rw.c:574
#define U32X4_ALIGNED(p)
Definition: vnet_classify.h:47
l2_rw_config_t * configs
Definition: l2_rw.h:57
struct _vlib_node_registration vlib_node_registration_t
static uword * clib_bitmap_set(uword *ai, uword i, uword value)
Sets the ith bit of a bitmap to new_value Removes trailing zeros from the bitmap. ...
Definition: bitmap.h:167
unformat_function_t unformat_vnet_sw_interface
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:200
vlib_node_registration_t l2_rw_node
(constructor) VLIB_REGISTER_NODE (l2_rw_node)
Definition: l2_rw.c:31
static u8 * format_l2_rw_config(u8 *s, va_list *args)
Definition: l2_rw.c:57
#define foreach_l2_rw_error
Definition: l2_rw.c:680
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
u32 classify_table_index
Definition: l2_rw.c:36
#define static_always_inline
Definition: clib.h:85
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:348
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:111
#define always_inline
Definition: clib.h:84
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:194
unformat_function_t unformat_hex_string
Definition: format.h:287
unsigned long long u32x4
Definition: ixge.c:28
static char * l2_rw_error_strings[]
Definition: l2_rw.c:691
u8 * format_hex_bytes(u8 *s, va_list *va)
Definition: std-formats.c:84
int l2_rw_enable_disable(u32 bridge_domain, u8 disable)
Definition: l2_rw.c:608
unsigned long u64
Definition: types.h:89
#define clib_bitmap_foreach(i, ai, body)
Macro to iterate across set bits in a bitmap.
Definition: bitmap.h:361
static clib_error_t * l2_rw_set_cli_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: l2_rw.c:616
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:369
int l2_rw_mod_entry(u32 *index, u8 *mask, u8 *value, u32 len, u32 skip, u8 is_del)
Definition: l2_rw.c:361
static clib_error_t * l2_rw_entry_cli_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: l2_rw.c:410
#define vec_alloc_aligned(V, N, A)
Allocate space for N more elements (no header, given alignment)
Definition: vec.h:248
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:214
static_always_inline void l2_rw_rewrite(l2_rw_entry_t *rwe, u8 *h)
Definition: l2_rw.c:92
uword * configs_bitmap
Definition: l2_rw.h:58
#define PREDICT_FALSE(x)
Definition: clib.h:97
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
static u32 feat_bitmap_get_next_node_index(u32 *next_nodes, u32 bitmap)
Return the graph node index for the feature corresponding to the first set bit in the bitmap...
Definition: feat_bitmap.h:79
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:216
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:350
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:576
static u8 * format_l2_rw_trace(u8 *s, va_list *args)
Definition: l2_rw.c:66
static uword l2_rw_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: l2_rw.c:160
#define clib_bitmap_alloc(v, n_bits)
Allocate a bitmap with the supplied number of bits.
Definition: bitmap.h:109
svmdb_client_t * c
u16 n_vectors
Definition: node.h:344
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
static void feat_bitmap_init_next_nodes(vlib_main_t *vm, u32 node_index, u32 num_features, char **feat_names, u32 *next_nodes)
Initialize the feature next-node indexes of a graph node.
Definition: feat_bitmap.h:43
#define clib_memcpy(a, b, c)
Definition: string.h:69
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:211
#define ARRAY_LEN(x)
Definition: clib.h:59
u32 rewrite_entry_index
Definition: l2_rw.c:37
static uword clib_bitmap_get(uword *ai, uword i)
Gets the ith bit value from a bitmap.
Definition: bitmap.h:197
char ** l2input_get_feat_names(void)
Return an array of strings containing graph node names of each feature.
Definition: l2_input.c:57
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:154
struct _vnet_classify_main vnet_classify_main_t
Definition: vnet_classify.h:69
u16 cached_next_index
Definition: node.h:463
unsigned int u32
Definition: types.h:88
u8 * format_unformat_error(u8 *s, va_list *va)
Definition: unformat.c:91
#define vnet_buffer(b)
Definition: buffer.h:361
u32 sw_if_index
Definition: l2_rw.c:35
static clib_error_t * l2_rw_init(vlib_main_t *vm)
Definition: l2_rw.c:658
static clib_error_t * l2_rw_interface_cli_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: l2_rw.c:485
u32 feat_next_node_index[32]
Definition: l2_rw.h:51
vnet_classify_main_t vnet_classify_main
Definition: vnet_classify.c:22
#define VLIB_BUFFER_IS_TRACED
Definition: buffer.h:95
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
u32 l2input_intf_bitmap_enable(u32 sw_if_index, u32 feature_bitmap, u32 enable)
Enable (or disable) the feature in the bitmap for the given interface.
Definition: l2_input.c:468
static uword clib_bitmap_count_set_bits(uword *ai)
Return the number of set bits in a bitmap.
Definition: bitmap.h:441
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
double f64
Definition: types.h:142
unsigned char u8
Definition: types.h:56
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:253
int l2_rw_interface_set_table(u32 sw_if_index, u32 table_index, u32 miss_index)
Definition: l2_rw.c:467
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:170
#define VLIB_NODE_FUNCTION_MULTIARCH(node, fn)
Definition: node.h:158
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:169
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:418
#define clib_error_return(e, args...)
Definition: error.h:111
struct _unformat_input_t unformat_input_t
l2_rw_entry_t * entries
Definition: l2_rw.h:54
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:85
l2_rw_error_t
Definition: l2_rw.c:683
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
vnet_classify_entry_t * vnet_classify_find_entry(vnet_classify_table_t *t, u8 *h, u64 hash, f64 now)
Definition: defs.h:46
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:109