FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
gbp_classify_node.c
Go to the documentation of this file.
1 /*
2  * gbp.h : Group Based Policy
3  *
4  * Copyright (c) 2018 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <plugins/gbp/gbp.h>
22 #include <vnet/fib/ip4_fib.h>
23 #include <vnet/fib/ip6_fib.h>
24 #include <vnet/dpo/load_balance.h>
25 #include <vnet/l2/l2_input.h>
26 #include <vnet/l2/feat_bitmap.h>
27 #include <vnet/fib/fib_table.h>
30 
31 /**
32  * per-packet trace data
33  */
34 typedef struct gbp_classify_trace_t_
35 {
36  /* per-pkt trace data */
39 
40 /*
41  * determine the SRC EPG form the input port
42  */
48 {
50  u32 n_left_from, *from, *to_next;
51  u32 next_index;
52 
53  next_index = 0;
54  n_left_from = frame->n_vectors;
55  from = vlib_frame_vector_args (frame);
56 
57  while (n_left_from > 0)
58  {
59  u32 n_left_to_next;
60 
61  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
62 
63  while (n_left_from > 0 && n_left_to_next > 0)
64  {
65  u32 next0, bi0, sw_if_index0;
66  const gbp_endpoint_t *ge0;
67  vlib_buffer_t *b0;
68  sclass_t sclass0;
69 
70  bi0 = from[0];
71  to_next[0] = bi0;
72  from += 1;
73  to_next += 1;
74  n_left_from -= 1;
75  n_left_to_next -= 1;
76 
77  b0 = vlib_get_buffer (vm, bi0);
78 
79  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
80  vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
81 
82  if (GBP_SRC_CLASSIFY_NULL == type)
83  {
84  sclass0 = SCLASS_INVALID;
85  next0 =
87  L2INPUT_FEAT_GBP_NULL_CLASSIFY);
88  }
89  else
90  {
91  if (DPO_PROTO_ETHERNET == dproto)
92  {
93  const ethernet_header_t *h0;
94 
95  h0 = vlib_buffer_get_current (b0);
96  next0 =
98  L2INPUT_FEAT_GBP_SRC_CLASSIFY);
100  vnet_buffer (b0)->l2.bd_index);
101  }
102  else if (DPO_PROTO_IP4 == dproto)
103  {
104  const ip4_header_t *h0;
105 
106  h0 = vlib_buffer_get_current (b0);
107 
109  (&h0->src_address,
111  sw_if_index0));
112 
113 
114  /*
115  * Go straight to looukp, do not pass go, do not collect $200
116  */
117  next0 = 0;
118  }
119  else if (DPO_PROTO_IP6 == dproto)
120  {
121  const ip6_header_t *h0;
122 
123  h0 = vlib_buffer_get_current (b0);
124 
126  (&h0->src_address,
128  sw_if_index0));
129 
130 
131  /*
132  * Go straight to lookup, do not pass go, do not collect $200
133  */
134  next0 = 0;
135  }
136  else
137  {
138  ge0 = NULL;
139  next0 = 0;
140  ASSERT (0);
141  }
142 
143  if (PREDICT_TRUE (NULL != ge0))
144  sclass0 = ge0->ge_fwd.gef_sclass;
145  else
146  sclass0 = SCLASS_INVALID;
147  }
148 
149  vnet_buffer2 (b0)->gbp.sclass = sclass0;
150 
151  if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
152  {
154  vlib_add_trace (vm, node, b0, sizeof (*t));
155  t->sclass = sclass0;
156  }
157 
158  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
159  to_next, n_left_to_next,
160  bi0, next0);
161  }
162 
163  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
164  }
165 
166  return frame->n_vectors;
167 }
168 
172 {
173  return (gbp_classify_inline (vm, node, frame,
175 }
176 
180 {
181  return (gbp_classify_inline (vm, node, frame,
183 }
184 
188 {
189  return (gbp_classify_inline (vm, node, frame,
191 }
192 
196 {
197  return (gbp_classify_inline (vm, node, frame,
199 }
200 
201 
202 /* packet trace format function */
203 static u8 *
204 format_gbp_classify_trace (u8 * s, va_list * args)
205 {
206  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
207  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
208  gbp_classify_trace_t *t = va_arg (*args, gbp_classify_trace_t *);
209 
210  s = format (s, "sclass:%d", t->sclass);
211 
212  return s;
213 }
214 
215 /* *INDENT-OFF* */
217  .name = "gbp-null-classify",
218  .vector_size = sizeof (u32),
219  .format_trace = format_gbp_classify_trace,
221 
222  .n_errors = 0,
223  .n_next_nodes = 0,
224 };
225 
227  .name = "gbp-src-classify",
228  .vector_size = sizeof (u32),
229  .format_trace = format_gbp_classify_trace,
231 
232  .n_errors = 0,
233  .n_next_nodes = 0,
234 };
235 
237  .name = "ip4-gbp-src-classify",
238  .vector_size = sizeof (u32),
239  .format_trace = format_gbp_classify_trace,
241 
242  .n_errors = 0,
243  .n_next_nodes = 1,
244  .next_nodes = {
245  [0] = "ip4-lookup"
246  },
247 };
248 
250  .name = "ip6-gbp-src-classify",
251  .vector_size = sizeof (u32),
252  .format_trace = format_gbp_classify_trace,
254 
255  .n_errors = 0,
256  .n_next_nodes = 1,
257  .next_nodes = {
258  [0] = "ip6-lookup"
259  },
260 };
261 
262 VNET_FEATURE_INIT (gbp_ip4_src_classify_feat_node, static) =
263 {
264  .arc_name = "ip4-unicast",
265  .node_name = "ip4-gbp-src-classify",
266  .runs_before = VNET_FEATURES ("nat44-out2in"),
267 };
268 VNET_FEATURE_INIT (gbp_ip6_src_classify_feat_node, static) =
269 {
270  .arc_name = "ip6-unicast",
271  .node_name = "ip6-gbp-src-classify",
272  .runs_before = VNET_FEATURES ("nat66-out2in"),
273 };
274 
275 /* *INDENT-ON* */
276 
278 {
281 
282 /**
283  * per-packet trace data
284  */
286 {
289  ip46_address_t src;
291 
292 /* packet trace format function */
293 static u8 *
294 format_gbp_lpm_classify_trace (u8 * s, va_list * args)
295 {
296  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
297  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
298  gbp_lpm_classify_trace_t *t = va_arg (*args, gbp_lpm_classify_trace_t *);
299 
300  s = format (s, "sclass:%d lb:%d src:%U",
302 
303  return s;
304 }
305 
307 {
311 };
312 
313 /*
314  * Determine the SRC EPG from a LPM
315  */
320  const dpo_proto_t dproto,
321  const enum gbp_lpm_type type)
322 {
324  u32 n_left_from, *from, *to_next;
325  u32 next_index;
326 
327  next_index = 0;
328  n_left_from = frame->n_vectors;
329  from = vlib_frame_vector_args (frame);
330 
331  while (n_left_from > 0)
332  {
333  u32 n_left_to_next;
334 
335  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
336 
337  while (n_left_from > 0 && n_left_to_next > 0)
338  {
339  u32 bi0, sw_if_index0, fib_index0, lbi0;
340  const gbp_endpoint_t *ge0, *ge_lpm0;
342  const ethernet_header_t *eh0;
343  const gbp_policy_dpo_t *gpd0;
344  const ip4_address_t *ip4_0;
345  const ip6_address_t *ip6_0;
346  const gbp_recirc_t *gr0;
347  vlib_buffer_t *b0;
348  sclass_t sclass0;
349 
350  bi0 = from[0];
351  to_next[0] = bi0;
352  from += 1;
353  to_next += 1;
354  n_left_from -= 1;
355  n_left_to_next -= 1;
356  ip4_0 = NULL;
357  ip6_0 = NULL;
358  next0 = GPB_LPM_CLASSIFY_DROP;
359 
360  lbi0 = ~0;
361  eh0 = NULL;
362  b0 = vlib_get_buffer (vm, bi0);
363 
364  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
365  vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
366 
367  if (DPO_PROTO_IP4 == dproto)
368  ip4_0 =
370  else if (DPO_PROTO_IP6 == dproto)
371  ip6_0 =
373  else if (DPO_PROTO_ETHERNET == dproto)
374  {
375  eh0 = vlib_buffer_get_current (b0);
376  gbp_classify_get_ip_address (eh0, &ip4_0, &ip6_0,
378  }
379 
380  if (GBP_LPM_RECIRC == type)
381  {
382  gr0 = gbp_recirc_get (sw_if_index0);
383  fib_index0 = gr0->gr_fib_index[dproto];
384  ge0 = NULL;
385 
386  vnet_feature_next (&next0, b0);
387  }
388  else
389  {
390  if (NULL == eh0)
391  {
392  /* packet should be l2 */
393  sclass0 = SCLASS_INVALID;
394  goto trace;
395  }
396 
397  if (GBP_LPM_ANON == type)
398  {
399  /*
400  * anonymous LPM classification: only honour LPM as no EP
401  * were programmed
402  */
403  gbp_ext_itf_t *gei = gbp_ext_itf_get (sw_if_index0);
404  if (ip4_0)
405  fib_index0 = gei->gx_fib_index[DPO_PROTO_IP4];
406  else if (ip6_0)
407  fib_index0 = gei->gx_fib_index[DPO_PROTO_IP6];
408  else
409  {
410  /* not IP so no LPM classify possible */
411  sclass0 = SCLASS_INVALID;
412  next0 = GPB_LPM_CLASSIFY_DROP;
413  goto trace;
414  }
415  next0 = vnet_l2_feature_next
417  L2INPUT_FEAT_GBP_LPM_ANON_CLASSIFY);
418  }
419  else
420  {
421  /*
422  * not an anonymous LPM classification: check it comes from
423  * an EP, and use EP RD info
424  */
426  vnet_buffer (b0)->l2.bd_index);
427 
428  if (NULL == ge0)
429  {
430  /* packet must have come from an EP's mac */
431  sclass0 = SCLASS_INVALID;
432  goto trace;
433  }
434 
435  fib_index0 = ge0->ge_fwd.gef_fib_index;
436 
437  if (~0 == fib_index0)
438  {
439  sclass0 = SCLASS_INVALID;
440  goto trace;
441  }
442 
443  if (ip4_0)
444  {
445  ge_lpm0 = gbp_endpoint_find_ip4 (ip4_0, fib_index0);
446  }
447  else if (ip6_0)
448  {
449  ge_lpm0 = gbp_endpoint_find_ip6 (ip6_0, fib_index0);
450  }
451  else
452  {
453  ge_lpm0 = NULL;
454  }
455 
456  next0 = vnet_l2_feature_next
458  L2INPUT_FEAT_GBP_LPM_CLASSIFY);
459 
460  /*
461  * if we found the EP by IP lookup, it must be from the EP
462  * not a network behind it
463  */
464  if (NULL != ge_lpm0)
465  {
466  if (PREDICT_FALSE (ge0 != ge_lpm0))
467  {
468  /* an EP spoofing another EP */
469  sclass0 = SCLASS_INVALID;
470  next0 = GPB_LPM_CLASSIFY_DROP;
471  }
472  else
473  {
474  sclass0 = ge0->ge_fwd.gef_sclass;
475  }
476  goto trace;
477  }
478  }
479  }
480 
481  gpd0 = gbp_classify_get_gpd (ip4_0, ip6_0, fib_index0);
482  if (0 == gpd0)
483  {
484  /* could not classify => drop */
485  sclass0 = SCLASS_INVALID;
486  next0 = GPB_LPM_CLASSIFY_DROP;
487  goto trace;
488  }
489 
490  sclass0 = gpd0->gpd_sclass;
491 
492  /* all packets from an external network should not be learned by the
493  * reciever. so set the Do-not-learn bit here */
494  vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_D;
495 
496  trace:
497  vnet_buffer2 (b0)->gbp.sclass = sclass0;
498 
499  if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
500  {
502  vlib_add_trace (vm, node, b0, sizeof (*t));
503  t->sclass = sclass0;
504  t->lbi = lbi0;
505  if (ip4_0)
506  t->src.ip4 = *ip4_0;
507  if (ip6_0)
508  t->src.ip6 = *ip6_0;
509  }
510 
511  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
512  to_next, n_left_to_next,
513  bi0, next0);
514  }
515 
516  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
517  }
518 
519  return frame->n_vectors;
520 }
521 
525 {
528 }
529 
533 {
536 }
537 
541 {
544 }
545 
549 {
552 }
553 
554 /* *INDENT-OFF* */
556  .name = "ip4-gbp-lpm-classify",
557  .vector_size = sizeof (u32),
558  .format_trace = format_gbp_lpm_classify_trace,
560 
561  .n_errors = 0,
562  .n_next_nodes = 1,
563  .next_nodes = {
564  [GPB_LPM_CLASSIFY_DROP] = "ip4-drop"
565  },
566 };
567 
569  .name = "ip6-gbp-lpm-classify",
570  .vector_size = sizeof (u32),
571  .format_trace = format_gbp_lpm_classify_trace,
573 
574  .n_errors = 0,
575  .n_next_nodes = 1,
576  .next_nodes = {
577  [GPB_LPM_CLASSIFY_DROP] = "ip6-drop"
578  },
579 };
580 
582  .name = "l2-gbp-lpm-classify",
583  .vector_size = sizeof (u32),
584  .format_trace = format_gbp_lpm_classify_trace,
586 
587  .n_errors = 0,
588  .n_next_nodes = 1,
589  .next_nodes = {
590  [GPB_LPM_CLASSIFY_DROP] = "error-drop"
591  },
592 };
593 
595  .name = "l2-gbp-lpm-anon-classify",
596  .vector_size = sizeof (u32),
597  .format_trace = format_gbp_lpm_classify_trace,
599 
600  .n_errors = 0,
601  .n_next_nodes = 1,
602  .next_nodes = {
603  [GPB_LPM_CLASSIFY_DROP] = "error-drop"
604  },
605 };
606 
607 VNET_FEATURE_INIT (gbp_ip4_lpm_classify_feat_node, static) =
608 {
609  .arc_name = "ip4-unicast",
610  .node_name = "ip4-gbp-lpm-classify",
611  .runs_before = VNET_FEATURES ("nat44-out2in"),
612 };
613 VNET_FEATURE_INIT (gbp_ip6_lpm_classify_feat_node, static) =
614 {
615  .arc_name = "ip6-unicast",
616  .node_name = "ip6-gbp-lpm-classify",
617  .runs_before = VNET_FEATURES ("nat66-out2in"),
618 };
619 
620 /* *INDENT-ON* */
621 
622 /*
623  * fd.io coding-style-patch-verification: ON
624  *
625  * Local Variables:
626  * eval: (c-set-style "gnu")
627  * End:
628  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
Definition: vlib_api_cli.c:899
u32 gef_fib_index
FIB index the EP is in.
Definition: gbp_endpoint.h:177
u16 sclass_t
Definition: gbp_types.h:25
A Group Based Policy Endpoint.
Definition: gbp_endpoint.h:190
#define CLIB_UNUSED(x)
Definition: clib.h:87
vlib_node_registration_t gbp_ip4_lpm_classify_node
(constructor) VLIB_REGISTER_NODE (gbp_ip4_lpm_classify_node)
gbp_endpoint_fwd_t ge_fwd
Definition: gbp_endpoint.h:208
ip4_address_t src_address
Definition: ip4_packet.h:125
enum gbp_lpm_classify_next_t_ gbp_lpm_classify_next_t
#define vnet_buffer2(b)
Definition: buffer.h:482
#define PREDICT_TRUE(x)
Definition: clib.h:121
u32 fib_table_get_index_for_sw_if_index(fib_protocol_t proto, u32 sw_if_index)
Get the index of the FIB bound to the interface.
Definition: fib_table.c:989
u8 src_address[6]
Definition: packet.h:56
gbp_src_classify_main_t gbp_src_classify_main
Definition: gbp_classify.c:22
u32 index_t
A Data-Path Object is an object that represents actions that are applied to packets are they are swit...
Definition: dpo.h:41
vlib_node_registration_t gbp_null_classify_node
(constructor) VLIB_REGISTER_NODE (gbp_null_classify_node)
static uword gbp_classify_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, gbp_src_classify_type_t type, dpo_proto_t dproto)
vlib_main_t * vm
Definition: in2out_ed.c:1582
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
#define VLIB_NODE_FN(node)
Definition: node.h:202
struct gbp_classify_trace_t_ gbp_classify_trace_t
per-packet trace data
ip6_address_t src_address
Definition: ip6_packet.h:310
unsigned char u8
Definition: types.h:56
static u32 vnet_l2_feature_next(vlib_buffer_t *b, u32 *next_nodes, u32 feat_bit)
Return the graph node index for the feature corresponding to the next set bit after clearing the curr...
Definition: feat_bitmap.h:94
static_always_inline gbp_endpoint_t * gbp_endpoint_find_ip4(const ip4_address_t *ip, u32 fib_index)
Definition: gbp_endpoint.h:314
vlib_node_registration_t gbp_ip4_src_classify_node
(constructor) VLIB_REGISTER_NODE (gbp_ip4_src_classify_node)
u32 gx_fib_index[DPO_PROTO_NUM]
cached FIB indices from the RD
Definition: gbp_ext_itf.h:53
static_always_inline gbp_endpoint_t * gbp_endpoint_find_mac(const u8 *mac, u32 bd_index)
Definition: gbp_endpoint.h:279
A GBP recirculation interface representation Thes interfaces join Bridge domains that are internal to...
Definition: gbp_recirc.h:29
unsigned int u32
Definition: types.h:88
per-packet trace data
enum dpo_proto_t_ dpo_proto_t
Data path protocol.
vlib_node_registration_t gbp_l2_lpm_classify_node
(constructor) VLIB_REGISTER_NODE (gbp_l2_lpm_classify_node)
vl_api_fib_path_type_t type
Definition: fib_types.api:123
u32 gr_fib_index[DPO_PROTO_NUM]
FIB indices the EPG is mapped to.
Definition: gbp_recirc.h:44
The GBP FWD DPO.
vlib_node_registration_t gbp_l2_lpm_anon_classify_node
(constructor) VLIB_REGISTER_NODE (gbp_l2_lpm_anon_classify_node)
static gbp_ext_itf_t * gbp_ext_itf_get(u32 sw_if_index)
Definition: gbp_ext_itf.h:79
per-packet trace data
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define PREDICT_FALSE(x)
Definition: clib.h:120
#define always_inline
Definition: ipsec.h:28
vl_api_address_union_t src_address
Definition: ip_types.api:111
#define SCLASS_INVALID
Definition: gbp_types.h:26
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
sclass_t gef_sclass
Endpoint Group&#39;s sclass.
Definition: gbp_endpoint.h:172
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:391
An external interface maps directly to an oflex L3ExternalInterface.
Definition: gbp_ext_itf.h:33
format_function_t format_ip46_address
Definition: ip46_address.h:50
static_always_inline gbp_endpoint_t * gbp_endpoint_find_ip6(const ip6_address_t *ip, u32 fib_index)
Definition: gbp_endpoint.h:339
enum gbp_src_classify_type_t_ gbp_src_classify_type_t
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:396
static gbp_recirc_t * gbp_recirc_get(u32 sw_if_index)
Definition: gbp_recirc.h:76
Grouping of global data for the GBP source EPG classification feature.
Definition: gbp_classify.h:37
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
u32 l2_input_feat_next[GBP_SRC_N_CLASSIFY][32]
Next nodes for L2 output features.
Definition: gbp_classify.h:42
static_always_inline const gbp_policy_dpo_t * gbp_classify_get_gpd(const ip4_address_t *ip4, const ip6_address_t *ip6, const u32 fib_index)
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
VNET_FEATURE_INIT(gbp_ip4_src_classify_feat_node, static)
sclass_t gpd_sclass
SClass.
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1582
struct gbp_lpm_classify_trace_t_ gbp_lpm_classify_trace_t
per-packet trace data
#define ASSERT(truth)
vlib_node_registration_t gbp_src_classify_node
(constructor) VLIB_REGISTER_NODE (gbp_src_classify_node)
#define VNET_FEATURES(...)
Definition: feature.h:470
static uword gbp_lpm_classify_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, const dpo_proto_t dproto, const enum gbp_lpm_type type)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1583
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
gbp_lpm_classify_next_t_
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
#define vnet_buffer(b)
Definition: buffer.h:417
static_always_inline dpo_proto_t gbp_classify_get_ip_address(const ethernet_header_t *eh0, const ip4_address_t **ip4, const ip6_address_t **ip6, const enum gbp_classify_get_ip_way way)
Definition: gbp_classify.h:54
vlib_node_registration_t gbp_ip6_lpm_classify_node
(constructor) VLIB_REGISTER_NODE (gbp_ip6_lpm_classify_node)
vlib_node_registration_t gbp_ip6_src_classify_node
(constructor) VLIB_REGISTER_NODE (gbp_ip6_src_classify_node)
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:577
static u8 * format_gbp_lpm_classify_trace(u8 *s, va_list *args)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u8 * format_gbp_classify_trace(u8 *s, va_list *args)
Definition: defs.h:46
gbp_lpm_type