FD.io VPP  v19.08.3-2-gbabecb413
Vector Packet Processing
ip_in_out_acl.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/ip/ip.h>
18 
19 typedef struct
20 {
26 
27 /* packet trace format function */
28 static u8 *
29 format_ip_in_out_acl_trace (u8 * s, u32 is_output, va_list * args)
30 {
31  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
32  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
33  ip_in_out_acl_trace_t *t = va_arg (*args, ip_in_out_acl_trace_t *);
34 
35  s = format (s, "%s: sw_if_index %d, next_index %d, table %d, offset %d",
36  is_output ? "OUTACL" : "INACL",
37  t->sw_if_index, t->next_index, t->table_index, t->offset);
38  return s;
39 }
40 
41 static u8 *
42 format_ip_inacl_trace (u8 * s, va_list * args)
43 {
44  return format_ip_in_out_acl_trace (s, 0 /* is_output */ , args);
45 }
46 
47 static u8 *
48 format_ip_outacl_trace (u8 * s, va_list * args)
49 {
50  return format_ip_in_out_acl_trace (s, 1 /* is_output */ , args);
51 }
52 
57 
58 #define foreach_ip_inacl_error \
59 _(MISS, "input ACL misses") \
60 _(HIT, "input ACL hits") \
61 _(CHAIN_HIT, "input ACL hits after chain walk")
62 
63 #define foreach_ip_outacl_error \
64 _(MISS, "output ACL misses") \
65 _(HIT, "output ACL hits") \
66 _(CHAIN_HIT, "output ACL hits after chain walk")
67 
68 typedef enum
69 {
70 #define _(sym,str) IP_INACL_ERROR_##sym,
72 #undef _
75 
76 static char *ip_inacl_error_strings[] = {
77 #define _(sym,string) string,
79 #undef _
80 };
81 
82 typedef enum
83 {
84 #define _(sym,str) IP_OUTACL_ERROR_##sym,
86 #undef _
89 
90 static char *ip_outacl_error_strings[] = {
91 #define _(sym,string) string,
93 #undef _
94 };
95 
96 static inline uword
98  vlib_node_runtime_t * node, vlib_frame_t * frame,
99  int is_ip4, int is_output)
100 {
101  u32 n_left_from, *from, *to_next;
102  acl_next_index_t next_index;
105  f64 now = vlib_time_now (vm);
106  u32 hits = 0;
107  u32 misses = 0;
108  u32 chain_hits = 0;
110  vlib_node_runtime_t *error_node;
111  u32 n_next_nodes;
112 
113  n_next_nodes = node->n_next_nodes;
114 
115  if (is_ip4)
116  {
117  tid = IN_OUT_ACL_TABLE_IP4;
118  error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
119  }
120  else
121  {
122  tid = IN_OUT_ACL_TABLE_IP6;
123  error_node = vlib_node_get_runtime (vm, ip6_input_node.index);
124  }
125 
126  from = vlib_frame_vector_args (frame);
127  n_left_from = frame->n_vectors;
128 
129  /* First pass: compute hashes */
130 
131  while (n_left_from > 2)
132  {
133  vlib_buffer_t *b0, *b1;
134  u32 bi0, bi1;
135  u8 *h0, *h1;
136  u32 sw_if_index0, sw_if_index1;
137  u32 table_index0, table_index1;
138  vnet_classify_table_t *t0, *t1;
139 
140  /* prefetch next iteration */
141  {
142  vlib_buffer_t *p1, *p2;
143 
144  p1 = vlib_get_buffer (vm, from[1]);
145  p2 = vlib_get_buffer (vm, from[2]);
146 
147  vlib_prefetch_buffer_header (p1, STORE);
149  vlib_prefetch_buffer_header (p2, STORE);
151  }
152 
153  bi0 = from[0];
154  b0 = vlib_get_buffer (vm, bi0);
155 
156  bi1 = from[1];
157  b1 = vlib_get_buffer (vm, bi1);
158 
159  sw_if_index0 =
160  vnet_buffer (b0)->sw_if_index[is_output ? VLIB_TX : VLIB_RX];
161  table_index0 =
162  am->classify_table_index_by_sw_if_index[is_output][tid][sw_if_index0];
163 
164  sw_if_index1 =
165  vnet_buffer (b1)->sw_if_index[is_output ? VLIB_TX : VLIB_RX];
166  table_index1 =
167  am->classify_table_index_by_sw_if_index[is_output][tid][sw_if_index1];
168 
169  t0 = pool_elt_at_index (vcm->tables, table_index0);
170 
171  t1 = pool_elt_at_index (vcm->tables, table_index1);
172 
174  h0 = (void *) vlib_buffer_get_current (b0) + t0->current_data_offset;
175  else
176  h0 = b0->data;
177 
178  if (is_output)
179  {
180  /* Save the rewrite length, since we are using the l2_classify struct */
181  vnet_buffer (b0)->l2_classify.pad.l2_len =
182  vnet_buffer (b0)->ip.save_rewrite_length;
183  /* advance the match pointer so the matching happens on IP header */
184  h0 += vnet_buffer (b0)->l2_classify.pad.l2_len;
185  }
186 
187  vnet_buffer (b0)->l2_classify.hash =
188  vnet_classify_hash_packet (t0, (u8 *) h0);
189 
190  vnet_classify_prefetch_bucket (t0, vnet_buffer (b0)->l2_classify.hash);
191 
193  h1 = (void *) vlib_buffer_get_current (b1) + t1->current_data_offset;
194  else
195  h1 = b1->data;
196 
197  if (is_output)
198  {
199  /* Save the rewrite length, since we are using the l2_classify struct */
200  vnet_buffer (b1)->l2_classify.pad.l2_len =
201  vnet_buffer (b1)->ip.save_rewrite_length;
202  /* advance the match pointer so the matching happens on IP header */
203  h1 += vnet_buffer (b1)->l2_classify.pad.l2_len;
204  }
205 
206  vnet_buffer (b1)->l2_classify.hash =
207  vnet_classify_hash_packet (t1, (u8 *) h1);
208 
209  vnet_classify_prefetch_bucket (t1, vnet_buffer (b1)->l2_classify.hash);
210 
211  vnet_buffer (b0)->l2_classify.table_index = table_index0;
212 
213  vnet_buffer (b1)->l2_classify.table_index = table_index1;
214 
215  from += 2;
216  n_left_from -= 2;
217  }
218 
219  while (n_left_from > 0)
220  {
221  vlib_buffer_t *b0;
222  u32 bi0;
223  u8 *h0;
224  u32 sw_if_index0;
225  u32 table_index0;
227 
228  bi0 = from[0];
229  b0 = vlib_get_buffer (vm, bi0);
230 
231  sw_if_index0 =
232  vnet_buffer (b0)->sw_if_index[is_output ? VLIB_TX : VLIB_RX];
233  table_index0 =
234  am->classify_table_index_by_sw_if_index[is_output][tid][sw_if_index0];
235 
236  t0 = pool_elt_at_index (vcm->tables, table_index0);
237 
239  h0 = (void *) vlib_buffer_get_current (b0) + t0->current_data_offset;
240  else
241  h0 = b0->data;
242 
243  if (is_output)
244  {
245  /* Save the rewrite length, since we are using the l2_classify struct */
246  vnet_buffer (b0)->l2_classify.pad.l2_len =
247  vnet_buffer (b0)->ip.save_rewrite_length;
248  /* advance the match pointer so the matching happens on IP header */
249  h0 += vnet_buffer (b0)->l2_classify.pad.l2_len;
250  }
251 
252  vnet_buffer (b0)->l2_classify.hash =
253  vnet_classify_hash_packet (t0, (u8 *) h0);
254 
255  vnet_buffer (b0)->l2_classify.table_index = table_index0;
256  vnet_classify_prefetch_bucket (t0, vnet_buffer (b0)->l2_classify.hash);
257 
258  from++;
259  n_left_from--;
260  }
261 
262  next_index = node->cached_next_index;
263  from = vlib_frame_vector_args (frame);
264  n_left_from = frame->n_vectors;
265 
266  while (n_left_from > 0)
267  {
268  u32 n_left_to_next;
269 
270  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
271 
272  /* Not enough load/store slots to dual loop... */
273  while (n_left_from > 0 && n_left_to_next > 0)
274  {
275  u32 bi0;
276  vlib_buffer_t *b0;
277  u32 next0 = ACL_NEXT_INDEX_DENY;
278  u32 table_index0;
280  vnet_classify_entry_t *e0;
281  u64 hash0;
282  u8 *h0;
283  u8 error0;
284 
285  /* Stride 3 seems to work best */
286  if (PREDICT_TRUE (n_left_from > 3))
287  {
288  vlib_buffer_t *p1 = vlib_get_buffer (vm, from[3]);
290  u32 table_index1;
291  u64 phash1;
292 
293  table_index1 = vnet_buffer (p1)->l2_classify.table_index;
294 
295  if (PREDICT_TRUE (table_index1 != ~0))
296  {
297  tp1 = pool_elt_at_index (vcm->tables, table_index1);
298  phash1 = vnet_buffer (p1)->l2_classify.hash;
299  vnet_classify_prefetch_entry (tp1, phash1);
300  }
301  }
302 
303 
304  /* speculatively enqueue b0 to the current next frame */
305  bi0 = from[0];
306  to_next[0] = bi0;
307  from += 1;
308  to_next += 1;
309  n_left_from -= 1;
310  n_left_to_next -= 1;
311 
312  b0 = vlib_get_buffer (vm, bi0);
313  table_index0 = vnet_buffer (b0)->l2_classify.table_index;
314  e0 = 0;
315  t0 = 0;
316  vnet_get_config_data (am->vnet_config_main[is_output][tid],
317  &b0->current_config_index, &next0,
318  /* # bytes of config data */ 0);
319 
320  vnet_buffer (b0)->l2_classify.opaque_index = ~0;
321 
322  if (PREDICT_TRUE (table_index0 != ~0))
323  {
324  hash0 = vnet_buffer (b0)->l2_classify.hash;
325  t0 = pool_elt_at_index (vcm->tables, table_index0);
326 
328  h0 =
329  (void *) vlib_buffer_get_current (b0) +
331  else
332  h0 = b0->data;
333 
334  /* advance the match pointer so the matching happens on IP header */
335  if (is_output)
336  h0 += vnet_buffer (b0)->l2_classify.pad.l2_len;
337 
338  e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
339  if (e0)
340  {
341  vnet_buffer (b0)->l2_classify.opaque_index
342  = e0->opaque_index;
343  vlib_buffer_advance (b0, e0->advance);
344 
345  next0 = (e0->next_index < n_next_nodes) ?
346  e0->next_index : next0;
347 
348  hits++;
349 
350  if (is_ip4)
351  error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
352  (is_output ? IP4_ERROR_OUTACL_SESSION_DENY :
353  IP4_ERROR_INACL_SESSION_DENY) : IP4_ERROR_NONE;
354  else
355  error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
356  (is_output ? IP6_ERROR_OUTACL_SESSION_DENY :
357  IP6_ERROR_INACL_SESSION_DENY) : IP6_ERROR_NONE;
358  b0->error = error_node->errors[error0];
359 
360  if (!is_output)
361  {
362  if (e0->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX ||
363  e0->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
364  vnet_buffer (b0)->sw_if_index[VLIB_TX] = e0->metadata;
365  else if (e0->action == CLASSIFY_ACTION_SET_METADATA)
366  vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
367  e0->metadata;
368  }
369  }
370  else
371  {
372  while (1)
373  {
374  if (PREDICT_TRUE (t0->next_table_index != ~0))
375  t0 = pool_elt_at_index (vcm->tables,
376  t0->next_table_index);
377  else
378  {
379  next0 = (t0->miss_next_index < n_next_nodes) ?
380  t0->miss_next_index : next0;
381 
382  misses++;
383 
384  if (is_ip4)
385  error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
386  (is_output ? IP4_ERROR_OUTACL_TABLE_MISS :
387  IP4_ERROR_INACL_TABLE_MISS) : IP4_ERROR_NONE;
388  else
389  error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
390  (is_output ? IP6_ERROR_OUTACL_TABLE_MISS :
391  IP6_ERROR_INACL_TABLE_MISS) : IP6_ERROR_NONE;
392  b0->error = error_node->errors[error0];
393  break;
394  }
395 
396  if (t0->current_data_flag ==
398  h0 =
399  (void *) vlib_buffer_get_current (b0) +
401  else
402  h0 = b0->data;
403 
404  /* advance the match pointer so the matching happens on IP header */
405  if (is_output)
406  h0 += vnet_buffer (b0)->l2_classify.pad.l2_len;
407 
408  hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
410  (t0, (u8 *) h0, hash0, now);
411  if (e0)
412  {
413  vnet_buffer (b0)->l2_classify.opaque_index
414  = e0->opaque_index;
415  vlib_buffer_advance (b0, e0->advance);
416  next0 = (e0->next_index < n_next_nodes) ?
417  e0->next_index : next0;
418  hits++;
419  chain_hits++;
420 
421  if (is_ip4)
422  error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
423  (is_output ? IP4_ERROR_OUTACL_SESSION_DENY :
424  IP4_ERROR_INACL_SESSION_DENY) : IP4_ERROR_NONE;
425  else
426  error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
427  (is_output ? IP6_ERROR_OUTACL_SESSION_DENY :
428  IP6_ERROR_INACL_SESSION_DENY) : IP6_ERROR_NONE;
429  b0->error = error_node->errors[error0];
430 
431  if (!is_output)
432  {
433  if (e0->action ==
435  || e0->action ==
437  vnet_buffer (b0)->sw_if_index[VLIB_TX] =
438  e0->metadata;
439  else if (e0->action ==
441  vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
442  e0->metadata;
443  }
444  break;
445  }
446  }
447  }
448  }
449 
451  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
452  {
454  vlib_add_trace (vm, node, b0, sizeof (*t));
455  t->sw_if_index =
456  vnet_buffer (b0)->sw_if_index[is_output ? VLIB_TX : VLIB_RX];
457  t->next_index = next0;
458  t->table_index = t0 ? t0 - vcm->tables : ~0;
459  t->offset = (e0 && t0) ? vnet_classify_get_offset (t0, e0) : ~0;
460  }
461 
462  if ((next0 == ACL_NEXT_INDEX_DENY) && is_output)
463  {
464  /* on output, for the drop node to work properly, go back to ip header */
465  vlib_buffer_advance (b0, vnet_buffer (b0)->l2.l2_len);
466  }
467 
468  /* verify speculative enqueue, maybe switch current next frame */
469  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
470  to_next, n_left_to_next,
471  bi0, next0);
472  }
473 
474  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
475  }
476 
478  is_output ? IP_OUTACL_ERROR_MISS :
479  IP_INACL_ERROR_MISS, misses);
481  is_output ? IP_OUTACL_ERROR_HIT :
482  IP_INACL_ERROR_HIT, hits);
484  is_output ? IP_OUTACL_ERROR_CHAIN_HIT :
485  IP_INACL_ERROR_CHAIN_HIT, chain_hits);
486  return frame->n_vectors;
487 }
488 
490  vlib_frame_t * frame)
491 {
492  return ip_in_out_acl_inline (vm, node, frame, 1 /* is_ip4 */ ,
493  0 /* is_output */ );
494 }
495 
497  vlib_frame_t * frame)
498 {
499  return ip_in_out_acl_inline (vm, node, frame, 1 /* is_ip4 */ ,
500  1 /* is_output */ );
501 }
502 
503 
504 /* *INDENT-OFF* */
506  .name = "ip4-inacl",
507  .vector_size = sizeof (u32),
508  .format_trace = format_ip_inacl_trace,
509  .n_errors = ARRAY_LEN(ip_inacl_error_strings),
510  .error_strings = ip_inacl_error_strings,
511 
512  .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
513  .next_nodes = {
514  [ACL_NEXT_INDEX_DENY] = "ip4-drop",
515  },
516 };
517 
519  .name = "ip4-outacl",
520  .vector_size = sizeof (u32),
521  .format_trace = format_ip_outacl_trace,
522  .n_errors = ARRAY_LEN(ip_outacl_error_strings),
523  .error_strings = ip_outacl_error_strings,
524 
525  .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
526  .next_nodes = {
527  [ACL_NEXT_INDEX_DENY] = "ip4-drop",
528  },
529 };
530 /* *INDENT-ON* */
531 
533  vlib_frame_t * frame)
534 {
535  return ip_in_out_acl_inline (vm, node, frame, 0 /* is_ip4 */ ,
536  0 /* is_output */ );
537 }
538 
540  vlib_frame_t * frame)
541 {
542  return ip_in_out_acl_inline (vm, node, frame, 0 /* is_ip4 */ ,
543  1 /* is_output */ );
544 }
545 
546 /* *INDENT-OFF* */
548  .name = "ip6-inacl",
549  .vector_size = sizeof (u32),
550  .format_trace = format_ip_inacl_trace,
551  .n_errors = ARRAY_LEN(ip_inacl_error_strings),
552  .error_strings = ip_inacl_error_strings,
553 
554  .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
555  .next_nodes = {
556  [ACL_NEXT_INDEX_DENY] = "ip6-drop",
557  },
558 };
559 
561  .name = "ip6-outacl",
562  .vector_size = sizeof (u32),
563  .format_trace = format_ip_outacl_trace,
564  .n_errors = ARRAY_LEN(ip_outacl_error_strings),
565  .error_strings = ip_outacl_error_strings,
566 
567  .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
568  .next_nodes = {
569  [ACL_NEXT_INDEX_DENY] = "ip6-drop",
570  },
571 };
572 /* *INDENT-ON* */
573 
574 #ifndef CLIB_MARCH_VARIANT
575 static clib_error_t *
577 {
578  return 0;
579 }
580 
582 #endif /* CLIB_MARCH_VARIANT */
583 
584 
585 /*
586  * fd.io coding-style-patch-verification: ON
587  *
588  * Local Variables:
589  * eval: (c-set-style "gnu")
590  * End:
591  */
u64 vnet_classify_hash_packet(vnet_classify_table_t *t, u8 *h)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
vlib_node_registration_t ip4_inacl_node
(constructor) VLIB_REGISTER_NODE (ip4_inacl_node)
#define CLIB_UNUSED(x)
Definition: clib.h:83
static u8 * format_ip_in_out_acl_trace(u8 *s, u32 is_output, va_list *args)
Definition: ip_in_out_acl.c:29
#define PREDICT_TRUE(x)
Definition: clib.h:113
unsigned long u64
Definition: types.h:89
u32 * classify_table_index_by_sw_if_index[IN_OUT_ACL_N_TABLE_GROUPS][IN_OUT_ACL_N_TABLES]
Definition: in_out_acl.h:50
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:279
static uword ip_in_out_acl_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_ip4, int is_output)
Definition: ip_in_out_acl.c:97
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
#define VLIB_NODE_FN(node)
Definition: node.h:202
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
unsigned char u8
Definition: types.h:56
ip_inacl_error_t
Definition: ip_in_out_acl.c:68
double f64
Definition: types.h:142
#define CLASSIFY_FLAG_USE_CURR_DATA
Definition: vnet_classify.h:36
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
#define foreach_ip_outacl_error
Definition: ip_in_out_acl.c:63
vnet_config_main_t * vnet_config_main[IN_OUT_ACL_N_TABLE_GROUPS][IN_OUT_ACL_N_TABLES]
Definition: in_out_acl.h:57
unsigned int u32
Definition: types.h:88
static void vnet_classify_prefetch_bucket(vnet_classify_table_t *t, u64 hash)
static void vnet_classify_prefetch_entry(vnet_classify_table_t *t, u64 hash)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
vlib_node_registration_t ip4_input_node
Global ip4 input node.
Definition: ip4_input.c:317
vlib_node_registration_t ip6_input_node
(constructor) VLIB_REGISTER_NODE (ip6_input_node)
Definition: ip6_input.c:230
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
static char * ip_inacl_error_strings[]
Definition: ip_in_out_acl.c:76
static void * vnet_get_config_data(vnet_config_main_t *cm, u32 *config_index, u32 *next_index, u32 n_data_bytes)
Definition: config.h:122
#define PREDICT_FALSE(x)
Definition: clib.h:112
u32 node_index
Node index.
Definition: node.h:496
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
static uword vnet_classify_get_offset(vnet_classify_table_t *t, vnet_classify_entry_t *v)
vlib_node_registration_t ip6_inacl_node
(constructor) VLIB_REGISTER_NODE (ip6_inacl_node)
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:397
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:323
#define foreach_ip_inacl_error
Definition: ip_in_out_acl.c:58
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
u8 data[]
Packet data.
Definition: buffer.h:181
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:147
#define ARRAY_LEN(x)
Definition: clib.h:63
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:456
vlib_node_registration_t ip6_outacl_node
(constructor) VLIB_REGISTER_NODE (ip6_outacl_node)
in_out_acl_table_id_t
Definition: in_out_acl.h:29
struct _vnet_classify_main vnet_classify_main_t
Definition: vnet_classify.h:55
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:515
static u8 * format_ip_outacl_trace(u8 *s, va_list *args)
Definition: ip_in_out_acl.c:48
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
vlib_node_registration_t ip4_outacl_node
(constructor) VLIB_REGISTER_NODE (ip4_outacl_node)
static char * ip_outacl_error_strings[]
Definition: ip_in_out_acl.c:90
static u8 * format_ip_inacl_trace(u8 *s, va_list *args)
Definition: ip_in_out_acl.c:42
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
struct _vlib_node_registration vlib_node_registration_t
Definition: defs.h:47
vnet_classify_main_t * vnet_classify_main
Definition: in_out_acl.h:55
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
ip_outacl_error_t
Definition: ip_in_out_acl.c:82
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
#define vnet_buffer(b)
Definition: buffer.h:365
in_out_acl_main_t in_out_acl_main
Definition: in_out_acl.c:21
u16 flags
Copy of main node flags.
Definition: node.h:509
acl_next_index_t
Definition: in_out_acl.h:23
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static clib_error_t * ip_in_out_acl_init(vlib_main_t *vm)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
vnet_classify_entry_t * vnet_classify_find_entry(vnet_classify_table_t *t, u8 *h, u64 hash, f64 now)
Definition: defs.h:46