FD.io VPP  v21.01.1
Vector Packet Processing
ip_in_out_acl.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/ip/ip.h>
18 
19 typedef struct
20 {
25 }
27 
28 /* packet trace format function */
29 static u8 *
30 format_ip_in_out_acl_trace (u8 * s, u32 is_output, va_list * args)
31 {
32  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
33  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
34  ip_in_out_acl_trace_t *t = va_arg (*args, ip_in_out_acl_trace_t *);
35 
36  s = format (s, "%s: sw_if_index %d, next_index %d, table %d, offset %d",
37  is_output ? "OUTACL" : "INACL",
38  t->sw_if_index, t->next_index, t->table_index, t->offset);
39  return s;
40 }
41 
42 static u8 *
43 format_ip_inacl_trace (u8 * s, va_list * args)
44 {
45  return format_ip_in_out_acl_trace (s, 0 /* is_output */ , args);
46 }
47 
48 static u8 *
49 format_ip_outacl_trace (u8 * s, va_list * args)
50 {
51  return format_ip_in_out_acl_trace (s, 1 /* is_output */ , args);
52 }
53 
58 
59 #define foreach_ip_inacl_error \
60 _(MISS, "input ACL misses") \
61 _(HIT, "input ACL hits") \
62 _(CHAIN_HIT, "input ACL hits after chain walk")
63 
64 #define foreach_ip_outacl_error \
65 _(MISS, "output ACL misses") \
66 _(HIT, "output ACL hits") \
67 _(CHAIN_HIT, "output ACL hits after chain walk")
68 
69 typedef enum
70 {
71 #define _(sym,str) IP_INACL_ERROR_##sym,
73 #undef _
75 }
77 
78 static char *ip_inacl_error_strings[] = {
79 #define _(sym,string) string,
81 #undef _
82 };
83 
84 typedef enum
85 {
86 #define _(sym,str) IP_OUTACL_ERROR_##sym,
88 #undef _
90 }
92 
93 static char *ip_outacl_error_strings[] = {
94 #define _(sym,string) string,
96 #undef _
97 };
98 
102  u16 * next, u32 n_left, int is_ip4, int is_output,
103  int do_trace)
104 {
107  f64 now = vlib_time_now (vm);
108  u32 hits = 0;
109  u32 misses = 0;
110  u32 chain_hits = 0;
112  vlib_node_runtime_t *error_node;
113  u32 n_next_nodes;
114 
115  u8 *h[4];
116  u32 sw_if_index[4];
117  u32 table_index[4];
118  vnet_classify_table_t *t[4] = { 0, 0 };
119  u64 hash[4];
120 
121  n_next_nodes = node->n_next_nodes;
122 
123  if (is_ip4)
124  {
125  tid = IN_OUT_ACL_TABLE_IP4;
126  error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
127  }
128  else
129  {
130  tid = IN_OUT_ACL_TABLE_IP6;
131  error_node = vlib_node_get_runtime (vm, ip6_input_node.index);
132  }
133 
134  /* calculate hashes for b[0] & b[1] */
135  if (n_left >= 2)
136  {
137  sw_if_index[2] =
138  vnet_buffer (b[0])->sw_if_index[is_output ? VLIB_TX : VLIB_RX];
139  sw_if_index[3] =
140  vnet_buffer (b[1])->sw_if_index[is_output ? VLIB_TX : VLIB_RX];
141 
142  table_index[2] =
143  am->classify_table_index_by_sw_if_index[is_output][tid]
144  [sw_if_index[2]];
145  table_index[3] =
146  am->classify_table_index_by_sw_if_index[is_output][tid]
147  [sw_if_index[3]];
148 
149  t[2] = pool_elt_at_index (vcm->tables, table_index[2]);
150  t[3] = pool_elt_at_index (vcm->tables, table_index[3]);
151 
152  if (t[2]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
153  h[2] =
154  (void *) vlib_buffer_get_current (b[0]) + t[2]->current_data_offset;
155  else
156  h[2] = b[0]->data;
157 
158  if (t[3]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
159  h[3] =
160  (void *) vlib_buffer_get_current (b[1]) + t[3]->current_data_offset;
161  else
162  h[3] = b[1]->data;
163 
164  if (is_output)
165  {
166  /* Save the rewrite length, since we are using the l2_classify struct */
167  vnet_buffer (b[0])->l2_classify.pad.l2_len =
168  vnet_buffer (b[0])->ip.save_rewrite_length;
169  /* advance the match pointer so the matching happens on IP header */
170  h[2] += vnet_buffer (b[0])->l2_classify.pad.l2_len;
171 
172  /* Save the rewrite length, since we are using the l2_classify struct */
173  vnet_buffer (b[1])->l2_classify.pad.l2_len =
174  vnet_buffer (b[1])->ip.save_rewrite_length;
175  /* advance the match pointer so the matching happens on IP header */
176  h[3] += vnet_buffer (b[1])->l2_classify.pad.l2_len;
177  }
178 
179  hash[2] = vnet_classify_hash_packet_inline (t[2], (u8 *) h[2]);
180  hash[3] = vnet_classify_hash_packet_inline (t[3], (u8 *) h[3]);
181 
182  vnet_buffer (b[0])->l2_classify.hash = hash[2];
183  vnet_buffer (b[1])->l2_classify.hash = hash[3];
184 
185  vnet_buffer (b[0])->l2_classify.table_index = table_index[2];
186  vnet_buffer (b[1])->l2_classify.table_index = table_index[3];
187 
188  vnet_buffer (b[0])->l2_classify.opaque_index = ~0;
189  vnet_buffer (b[1])->l2_classify.opaque_index = ~0;
190 
192  vnet_buffer (b[0])->l2_classify.hash);
194  vnet_buffer (b[1])->l2_classify.hash);
195  }
196 
197  while (n_left >= 2)
198  {
199  vnet_classify_entry_t *e[2] = { 0, 0 };
201  u8 error[2];
202 
203  h[0] = h[2];
204  h[1] = h[3];
205  t[0] = t[2];
206  t[1] = t[3];
207 
208  sw_if_index[0] = sw_if_index[2];
209  sw_if_index[1] = sw_if_index[3];
210 
211  table_index[0] = table_index[2];
212  table_index[1] = table_index[3];
213 
214  hash[0] = hash[2];
215  hash[1] = hash[3];
216 
217  /* prefetch next iteration */
218  if (n_left >= 6)
219  {
220  vlib_prefetch_buffer_header (b[4], LOAD);
221  vlib_prefetch_buffer_header (b[5], LOAD);
222 
225  }
226 
227  /* calculate hashes for b[2] & b[3] */
228  if (n_left >= 4)
229  {
230  sw_if_index[2] =
231  vnet_buffer (b[2])->sw_if_index[is_output ? VLIB_TX : VLIB_RX];
232  sw_if_index[3] =
233  vnet_buffer (b[3])->sw_if_index[is_output ? VLIB_TX : VLIB_RX];
234 
235  table_index[2] =
236  am->classify_table_index_by_sw_if_index[is_output][tid]
237  [sw_if_index[2]];
238  table_index[3] =
239  am->classify_table_index_by_sw_if_index[is_output][tid]
240  [sw_if_index[3]];
241 
242  t[2] = pool_elt_at_index (vcm->tables, table_index[2]);
243  t[3] = pool_elt_at_index (vcm->tables, table_index[3]);
244 
245  if (t[2]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
246  h[2] =
247  (void *) vlib_buffer_get_current (b[2]) +
248  t[2]->current_data_offset;
249  else
250  h[2] = b[2]->data;
251 
252  if (t[3]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
253  h[3] =
254  (void *) vlib_buffer_get_current (b[3]) +
255  t[3]->current_data_offset;
256  else
257  h[3] = b[3]->data;
258 
259  if (is_output)
260  {
261  /* Save the rewrite length, since we are using the l2_classify struct */
262  vnet_buffer (b[2])->l2_classify.pad.l2_len =
263  vnet_buffer (b[2])->ip.save_rewrite_length;
264  /* advance the match pointer so the matching happens on IP header */
265  h[2] += vnet_buffer (b[2])->l2_classify.pad.l2_len;
266 
267  /* Save the rewrite length, since we are using the l2_classify struct */
268  vnet_buffer (b[3])->l2_classify.pad.l2_len =
269  vnet_buffer (b[3])->ip.save_rewrite_length;
270  /* advance the match pointer so the matching happens on IP header */
271  h[3] += vnet_buffer (b[3])->l2_classify.pad.l2_len;
272  }
273 
274  hash[2] = vnet_classify_hash_packet_inline (t[2], (u8 *) h[2]);
275  hash[3] = vnet_classify_hash_packet_inline (t[3], (u8 *) h[3]);
276 
277  vnet_buffer (b[2])->l2_classify.hash = hash[2];
278  vnet_buffer (b[3])->l2_classify.hash = hash[3];
279 
280  vnet_buffer (b[2])->l2_classify.table_index = table_index[2];
281  vnet_buffer (b[3])->l2_classify.table_index = table_index[3];
282 
283  vnet_buffer (b[2])->l2_classify.opaque_index = ~0;
284  vnet_buffer (b[3])->l2_classify.opaque_index = ~0;
285 
287  vnet_buffer (b[2])->
288  l2_classify.hash);
290  vnet_buffer (b[3])->
291  l2_classify.hash);
292  }
293 
294  /* find entry for b[0] & b[1] */
295  vnet_get_config_data (am->vnet_config_main[is_output][tid],
296  &b[0]->current_config_index, &_next[0],
297  /* # bytes of config data */ 0);
298  vnet_get_config_data (am->vnet_config_main[is_output][tid],
299  &b[1]->current_config_index, &_next[1],
300  /* # bytes of config data */ 0);
301 
302  if (PREDICT_TRUE (table_index[0] != ~0))
303  {
304  e[0] =
305  vnet_classify_find_entry_inline (t[0], (u8 *) h[0], hash[0], now);
306  if (e[0])
307  {
308  vnet_buffer (b[0])->l2_classify.opaque_index
309  = e[0]->opaque_index;
310  vlib_buffer_advance (b[0], e[0]->advance);
311 
312  _next[0] = (e[0]->next_index < n_next_nodes) ?
313  e[0]->next_index : _next[0];
314 
315  hits++;
316 
317  if (is_ip4)
318  error[0] = (_next[0] == ACL_NEXT_INDEX_DENY) ?
319  (is_output ? IP4_ERROR_OUTACL_SESSION_DENY :
320  IP4_ERROR_INACL_SESSION_DENY) : IP4_ERROR_NONE;
321  else
322  error[0] = (_next[0] == ACL_NEXT_INDEX_DENY) ?
323  (is_output ? IP6_ERROR_OUTACL_SESSION_DENY :
324  IP6_ERROR_INACL_SESSION_DENY) : IP6_ERROR_NONE;
325  b[0]->error = error_node->errors[error[0]];
326 
327  if (!is_output)
328  {
331  vnet_buffer (b[0])->sw_if_index[VLIB_TX] = e[0]->metadata;
332  else if (e[0]->action == CLASSIFY_ACTION_SET_METADATA)
333  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] =
334  e[0]->metadata;
335  }
336  }
337  else
338  {
339  while (1)
340  {
341  if (PREDICT_TRUE (t[0]->next_table_index != ~0))
342  t[0] = pool_elt_at_index (vcm->tables,
343  t[0]->next_table_index);
344  else
345  {
346  _next[0] = (t[0]->miss_next_index < n_next_nodes) ?
347  t[0]->miss_next_index : _next[0];
348 
349  misses++;
350 
351  if (is_ip4)
352  error[0] = (_next[0] == ACL_NEXT_INDEX_DENY) ?
353  (is_output ? IP4_ERROR_OUTACL_TABLE_MISS :
354  IP4_ERROR_INACL_TABLE_MISS) : IP4_ERROR_NONE;
355  else
356  error[0] = (_next[0] == ACL_NEXT_INDEX_DENY) ?
357  (is_output ? IP6_ERROR_OUTACL_TABLE_MISS :
358  IP6_ERROR_INACL_TABLE_MISS) : IP6_ERROR_NONE;
359  b[0]->error = error_node->errors[error[0]];
360  break;
361  }
362 
363  if (t[0]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
364  h[0] =
365  (void *) vlib_buffer_get_current (b[0]) +
366  t[0]->current_data_offset;
367  else
368  h[0] = b[0]->data;
369 
370  /* advance the match pointer so the matching happens on IP header */
371  if (is_output)
372  h[0] += vnet_buffer (b[0])->l2_classify.pad.l2_len;
373 
374  hash[0] =
375  vnet_classify_hash_packet_inline (t[0], (u8 *) h[0]);
376  e[0] =
377  vnet_classify_find_entry_inline (t[0], (u8 *) h[0],
378  hash[0], now);
379  if (e[0])
380  {
381  vnet_buffer (b[0])->l2_classify.opaque_index
382  = e[0]->opaque_index;
383  vlib_buffer_advance (b[0], e[0]->advance);
384  _next[0] = (e[0]->next_index < n_next_nodes) ?
385  e[0]->next_index : _next[0];
386  hits++;
387  chain_hits++;
388 
389  if (is_ip4)
390  error[0] = (_next[0] == ACL_NEXT_INDEX_DENY) ?
391  (is_output ? IP4_ERROR_OUTACL_SESSION_DENY :
392  IP4_ERROR_INACL_SESSION_DENY) : IP4_ERROR_NONE;
393  else
394  error[0] = (_next[0] == ACL_NEXT_INDEX_DENY) ?
395  (is_output ? IP6_ERROR_OUTACL_SESSION_DENY :
396  IP6_ERROR_INACL_SESSION_DENY) : IP6_ERROR_NONE;
397  b[0]->error = error_node->errors[error[0]];
398 
399  if (!is_output)
400  {
401  if (e[0]->action ==
403  || e[0]->action ==
405  vnet_buffer (b[0])->sw_if_index[VLIB_TX] =
406  e[0]->metadata;
407  else if (e[0]->action ==
409  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] =
410  e[0]->metadata;
411  }
412  break;
413  }
414  }
415  }
416  }
417 
418  if (PREDICT_TRUE (table_index[1] != ~0))
419  {
420  e[1] =
421  vnet_classify_find_entry_inline (t[1], (u8 *) h[1], hash[1], now);
422  if (e[1])
423  {
424  vnet_buffer (b[1])->l2_classify.opaque_index
425  = e[1]->opaque_index;
426  vlib_buffer_advance (b[1], e[1]->advance);
427 
428  _next[1] = (e[1]->next_index < n_next_nodes) ?
429  e[1]->next_index : _next[1];
430 
431  hits++;
432 
433  if (is_ip4)
434  error[1] = (_next[1] == ACL_NEXT_INDEX_DENY) ?
435  (is_output ? IP4_ERROR_OUTACL_SESSION_DENY :
436  IP4_ERROR_INACL_SESSION_DENY) : IP4_ERROR_NONE;
437  else
438  error[1] = (_next[1] == ACL_NEXT_INDEX_DENY) ?
439  (is_output ? IP6_ERROR_OUTACL_SESSION_DENY :
440  IP6_ERROR_INACL_SESSION_DENY) : IP6_ERROR_NONE;
441  b[1]->error = error_node->errors[error[1]];
442 
443  if (!is_output)
444  {
447  vnet_buffer (b[1])->sw_if_index[VLIB_TX] = e[1]->metadata;
448  else if (e[1]->action == CLASSIFY_ACTION_SET_METADATA)
449  vnet_buffer (b[1])->ip.adj_index[VLIB_TX] =
450  e[1]->metadata;
451  }
452  }
453  else
454  {
455  while (1)
456  {
457  if (PREDICT_TRUE (t[1]->next_table_index != ~0))
458  t[1] = pool_elt_at_index (vcm->tables,
459  t[1]->next_table_index);
460  else
461  {
462  _next[1] = (t[1]->miss_next_index < n_next_nodes) ?
463  t[1]->miss_next_index : _next[1];
464 
465  misses++;
466 
467  if (is_ip4)
468  error[1] = (_next[1] == ACL_NEXT_INDEX_DENY) ?
469  (is_output ? IP4_ERROR_OUTACL_TABLE_MISS :
470  IP4_ERROR_INACL_TABLE_MISS) : IP4_ERROR_NONE;
471  else
472  error[1] = (_next[1] == ACL_NEXT_INDEX_DENY) ?
473  (is_output ? IP6_ERROR_OUTACL_TABLE_MISS :
474  IP6_ERROR_INACL_TABLE_MISS) : IP6_ERROR_NONE;
475  b[1]->error = error_node->errors[error[1]];
476  break;
477  }
478 
479  if (t[1]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
480  h[1] =
481  (void *) vlib_buffer_get_current (b[1]) +
482  t[1]->current_data_offset;
483  else
484  h[1] = b[1]->data;
485 
486  /* advance the match pointer so the matching happens on IP header */
487  if (is_output)
488  h[1] += vnet_buffer (b[1])->l2_classify.pad.l2_len;
489 
490  hash[1] =
491  vnet_classify_hash_packet_inline (t[1], (u8 *) h[1]);
492  e[1] =
493  vnet_classify_find_entry_inline (t[1], (u8 *) h[1],
494  hash[1], now);
495  if (e[1])
496  {
497  vnet_buffer (b[1])->l2_classify.opaque_index
498  = e[1]->opaque_index;
499  vlib_buffer_advance (b[1], e[1]->advance);
500  _next[1] = (e[1]->next_index < n_next_nodes) ?
501  e[1]->next_index : _next[1];
502  hits++;
503  chain_hits++;
504 
505  if (is_ip4)
506  error[1] = (_next[1] == ACL_NEXT_INDEX_DENY) ?
507  (is_output ? IP4_ERROR_OUTACL_SESSION_DENY :
508  IP4_ERROR_INACL_SESSION_DENY) : IP4_ERROR_NONE;
509  else
510  error[1] = (_next[1] == ACL_NEXT_INDEX_DENY) ?
511  (is_output ? IP6_ERROR_OUTACL_SESSION_DENY :
512  IP6_ERROR_INACL_SESSION_DENY) : IP6_ERROR_NONE;
513  b[1]->error = error_node->errors[error[1]];
514 
515  if (!is_output)
516  {
517  if (e[1]->action ==
519  || e[1]->action ==
521  vnet_buffer (b[1])->sw_if_index[VLIB_TX] =
522  e[1]->metadata;
523  else if (e[1]->action ==
525  vnet_buffer (b[1])->ip.adj_index[VLIB_TX] =
526  e[1]->metadata;
527  }
528  break;
529  }
530  }
531  }
532  }
533 
534  if (do_trace && b[0]->flags & VLIB_BUFFER_IS_TRACED)
535  {
537  vlib_add_trace (vm, node, b[0], sizeof (*_t));
538  _t->sw_if_index =
539  vnet_buffer (b[0])->sw_if_index[is_output ? VLIB_TX : VLIB_RX];
540  _t->next_index = _next[0];
541  _t->table_index = t[0] ? t[0] - vcm->tables : ~0;
542  _t->offset = (e[0]
543  && t[0]) ? vnet_classify_get_offset (t[0], e[0]) : ~0;
544  }
545 
546  if (do_trace && b[1]->flags & VLIB_BUFFER_IS_TRACED)
547  {
549  vlib_add_trace (vm, node, b[1], sizeof (*_t));
550  _t->sw_if_index =
551  vnet_buffer (b[1])->sw_if_index[is_output ? VLIB_TX : VLIB_RX];
552  _t->next_index = _next[1];
553  _t->table_index = t[1] ? t[1] - vcm->tables : ~0;
554  _t->offset = (e[1]
555  && t[1]) ? vnet_classify_get_offset (t[1], e[1]) : ~0;
556  }
557 
558  if ((_next[0] == ACL_NEXT_INDEX_DENY) && is_output)
559  {
560  /* on output, for the drop node to work properly, go back to ip header */
561  vlib_buffer_advance (b[0], vnet_buffer (b[0])->l2.l2_len);
562  }
563 
564  if ((_next[1] == ACL_NEXT_INDEX_DENY) && is_output)
565  {
566  /* on output, for the drop node to work properly, go back to ip header */
567  vlib_buffer_advance (b[1], vnet_buffer (b[1])->l2.l2_len);
568  }
569 
570  next[0] = _next[0];
571  next[1] = _next[1];
572 
573  /* _next */
574  next += 2;
575  b += 2;
576  n_left -= 2;
577  }
578 
579  while (n_left > 0)
580  {
581  u8 *h0;
582  u32 sw_if_index0;
583  u32 table_index0;
584  vnet_classify_table_t *t0 = 0;
585  vnet_classify_entry_t *e0 = 0;
586  u32 next0 = ACL_NEXT_INDEX_DENY;
587  u64 hash0;
588  u8 error0;
589 
590  sw_if_index0 =
591  vnet_buffer (b[0])->sw_if_index[is_output ? VLIB_TX : VLIB_RX];
592  table_index0 =
593  am->classify_table_index_by_sw_if_index[is_output][tid][sw_if_index0];
594 
595  t0 = pool_elt_at_index (vcm->tables, table_index0);
596 
598  h0 =
599  (void *) vlib_buffer_get_current (b[0]) + t0->current_data_offset;
600  else
601  h0 = b[0]->data;
602 
603  if (is_output)
604  {
605  /* Save the rewrite length, since we are using the l2_classify struct */
606  vnet_buffer (b[0])->l2_classify.pad.l2_len =
607  vnet_buffer (b[0])->ip.save_rewrite_length;
608  /* advance the match pointer so the matching happens on IP header */
609  h0 += vnet_buffer (b[0])->l2_classify.pad.l2_len;
610  }
611 
612  vnet_buffer (b[0])->l2_classify.hash =
613  vnet_classify_hash_packet (t0, (u8 *) h0);
614 
615  vnet_buffer (b[0])->l2_classify.table_index = table_index0;
616  vnet_buffer (b[0])->l2_classify.opaque_index = ~0;
617 
618  vnet_get_config_data (am->vnet_config_main[is_output][tid],
619  &b[0]->current_config_index, &next0,
620  /* # bytes of config data */ 0);
621 
622  if (PREDICT_TRUE (table_index0 != ~0))
623  {
624  hash0 = vnet_buffer (b[0])->l2_classify.hash;
625  t0 = pool_elt_at_index (vcm->tables, table_index0);
626 
628  h0 =
629  (void *) vlib_buffer_get_current (b[0]) +
631  else
632  h0 = b[0]->data;
633 
634  /* advance the match pointer so the matching happens on IP header */
635  if (is_output)
636  h0 += vnet_buffer (b[0])->l2_classify.pad.l2_len;
637 
638  e0 = vnet_classify_find_entry_inline (t0, (u8 *) h0, hash0, now);
639  if (e0)
640  {
641  vnet_buffer (b[0])->l2_classify.opaque_index = e0->opaque_index;
642  vlib_buffer_advance (b[0], e0->advance);
643 
644  next0 = (e0->next_index < n_next_nodes) ?
645  e0->next_index : next0;
646 
647  hits++;
648 
649  if (is_ip4)
650  error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
651  (is_output ? IP4_ERROR_OUTACL_SESSION_DENY :
652  IP4_ERROR_INACL_SESSION_DENY) : IP4_ERROR_NONE;
653  else
654  error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
655  (is_output ? IP6_ERROR_OUTACL_SESSION_DENY :
656  IP6_ERROR_INACL_SESSION_DENY) : IP6_ERROR_NONE;
657  b[0]->error = error_node->errors[error0];
658 
659  if (!is_output)
660  {
661  if (e0->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX ||
662  e0->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
663  vnet_buffer (b[0])->sw_if_index[VLIB_TX] = e0->metadata;
664  else if (e0->action == CLASSIFY_ACTION_SET_METADATA)
665  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = e0->metadata;
666  }
667  }
668  else
669  {
670  while (1)
671  {
672  if (PREDICT_TRUE (t0->next_table_index != ~0))
673  t0 =
674  pool_elt_at_index (vcm->tables, t0->next_table_index);
675  else
676  {
677  next0 = (t0->miss_next_index < n_next_nodes) ?
678  t0->miss_next_index : next0;
679 
680  misses++;
681 
682  if (is_ip4)
683  error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
684  (is_output ? IP4_ERROR_OUTACL_TABLE_MISS :
685  IP4_ERROR_INACL_TABLE_MISS) : IP4_ERROR_NONE;
686  else
687  error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
688  (is_output ? IP6_ERROR_OUTACL_TABLE_MISS :
689  IP6_ERROR_INACL_TABLE_MISS) : IP6_ERROR_NONE;
690  b[0]->error = error_node->errors[error0];
691  break;
692  }
693 
695  h0 =
696  (void *) vlib_buffer_get_current (b[0]) +
698  else
699  h0 = b[0]->data;
700 
701  /* advance the match pointer so the matching happens on IP header */
702  if (is_output)
703  h0 += vnet_buffer (b[0])->l2_classify.pad.l2_len;
704 
705  hash0 = vnet_classify_hash_packet_inline (t0, (u8 *) h0);
707  (t0, (u8 *) h0, hash0, now);
708  if (e0)
709  {
710  vnet_buffer (b[0])->l2_classify.opaque_index
711  = e0->opaque_index;
712  vlib_buffer_advance (b[0], e0->advance);
713  next0 = (e0->next_index < n_next_nodes) ?
714  e0->next_index : next0;
715  hits++;
716 
717  if (is_ip4)
718  error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
719  (is_output ? IP4_ERROR_OUTACL_SESSION_DENY :
720  IP4_ERROR_INACL_SESSION_DENY) : IP4_ERROR_NONE;
721  else
722  error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
723  (is_output ? IP6_ERROR_OUTACL_SESSION_DENY :
724  IP6_ERROR_INACL_SESSION_DENY) : IP6_ERROR_NONE;
725  b[0]->error = error_node->errors[error0];
726 
727  if (!is_output)
728  {
729  if (e0->action ==
731  || e0->action ==
733  vnet_buffer (b[0])->sw_if_index[VLIB_TX] =
734  e0->metadata;
735  else if (e0->action == CLASSIFY_ACTION_SET_METADATA)
736  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] =
737  e0->metadata;
738  }
739  break;
740  }
741  }
742  }
743  }
744 
745  if (do_trace && b[0]->flags & VLIB_BUFFER_IS_TRACED)
746  {
748  vlib_add_trace (vm, node, b[0], sizeof (*t));
749  t->sw_if_index =
750  vnet_buffer (b[0])->sw_if_index[is_output ? VLIB_TX : VLIB_RX];
751  t->next_index = next0;
752  t->table_index = t0 ? t0 - vcm->tables : ~0;
753  t->offset = (e0 && t0) ? vnet_classify_get_offset (t0, e0) : ~0;
754  }
755 
756  if ((next0 == ACL_NEXT_INDEX_DENY) && is_output)
757  {
758  /* on output, for the drop node to work properly, go back to ip header */
759  vlib_buffer_advance (b[0], vnet_buffer (b[0])->l2.l2_len);
760  }
761 
762  next[0] = next0;
763 
764  /* next */
765  next++;
766  b++;
767  n_left--;
768  }
769 
771  is_output ? IP_OUTACL_ERROR_MISS :
772  IP_INACL_ERROR_MISS, misses);
774  is_output ? IP_OUTACL_ERROR_HIT :
775  IP_INACL_ERROR_HIT, hits);
777  is_output ? IP_OUTACL_ERROR_CHAIN_HIT :
778  IP_INACL_ERROR_CHAIN_HIT, chain_hits);
779 }
780 
783 {
784 
785  u32 *from;
787  u16 nexts[VLIB_FRAME_SIZE];
788 
789  from = vlib_frame_vector_args (frame);
790 
791  vlib_get_buffers (vm, from, bufs, frame->n_vectors);
792 
793  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
794  ip_in_out_acl_inline (vm, node, bufs, nexts, frame->n_vectors,
795  1 /* is_ip4 */ ,
796  0 /* is_output */ , 1 /* is_trace */ );
797  else
798  ip_in_out_acl_inline (vm, node, bufs, nexts, frame->n_vectors,
799  1 /* is_ip4 */ ,
800  0 /* is_output */ , 0 /* is_trace */ );
801 
802  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
803 
804  return frame->n_vectors;
805 }
806 
809 {
810  u32 *from;
812  u16 nexts[VLIB_FRAME_SIZE];
813 
814  from = vlib_frame_vector_args (frame);
815 
816  vlib_get_buffers (vm, from, bufs, frame->n_vectors);
817 
818  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
819  ip_in_out_acl_inline (vm, node, bufs, nexts, frame->n_vectors,
820  1 /* is_ip4 */ ,
821  1 /* is_output */ , 1 /* is_trace */ );
822  else
823  ip_in_out_acl_inline (vm, node, bufs, nexts, frame->n_vectors,
824  1 /* is_ip4 */ ,
825  1 /* is_output */ , 0 /* is_trace */ );
826 
827  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
828 
829  return frame->n_vectors;
830 }
831 
832 /* *INDENT-OFF* */
834  .name = "ip4-inacl",
835  .vector_size = sizeof (u32),
836  .format_trace = format_ip_inacl_trace,
837  .n_errors = ARRAY_LEN(ip_inacl_error_strings),
838  .error_strings = ip_inacl_error_strings,
839 
840  .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
841  .next_nodes = {
842  [ACL_NEXT_INDEX_DENY] = "ip4-drop",
843  },
844 };
845 
847  .name = "ip4-outacl",
848  .vector_size = sizeof (u32),
849  .format_trace = format_ip_outacl_trace,
850  .n_errors = ARRAY_LEN(ip_outacl_error_strings),
851  .error_strings = ip_outacl_error_strings,
852 
853  .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
854  .next_nodes = {
855  [ACL_NEXT_INDEX_DENY] = "ip4-drop",
856  },
857 };
858 /* *INDENT-ON* */
859 
862 {
863  u32 *from;
865  u16 nexts[VLIB_FRAME_SIZE];
866 
867  from = vlib_frame_vector_args (frame);
868 
869  vlib_get_buffers (vm, from, bufs, frame->n_vectors);
870 
871  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
872  ip_in_out_acl_inline (vm, node, bufs, nexts, frame->n_vectors,
873  0 /* is_ip4 */ ,
874  0 /* is_output */ , 1 /* is_trace */ );
875  else
876  ip_in_out_acl_inline (vm, node, bufs, nexts, frame->n_vectors,
877  0 /* is_ip4 */ ,
878  0 /* is_output */ , 0 /* is_trace */ );
879 
880  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
881 
882  return frame->n_vectors;
883 }
884 
887 {
888  u32 *from;
890  u16 nexts[VLIB_FRAME_SIZE];
891 
892  from = vlib_frame_vector_args (frame);
893 
894  vlib_get_buffers (vm, from, bufs, frame->n_vectors);
895 
896  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
897  ip_in_out_acl_inline (vm, node, bufs, nexts, frame->n_vectors,
898  0 /* is_ip4 */ ,
899  1 /* is_output */ , 1 /* is_trace */ );
900  else
901  ip_in_out_acl_inline (vm, node, bufs, nexts, frame->n_vectors,
902  0 /* is_ip4 */ ,
903  1 /* is_output */ , 0 /* is_trace */ );
904 
905  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
906 
907  return frame->n_vectors;
908 }
909 
910 /* *INDENT-OFF* */
912  .name = "ip6-inacl",
913  .vector_size = sizeof (u32),
914  .format_trace = format_ip_inacl_trace,
915  .n_errors = ARRAY_LEN(ip_inacl_error_strings),
916  .error_strings = ip_inacl_error_strings,
917 
918  .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
919  .next_nodes = {
920  [ACL_NEXT_INDEX_DENY] = "ip6-drop",
921  },
922 };
923 
925  .name = "ip6-outacl",
926  .vector_size = sizeof (u32),
927  .format_trace = format_ip_outacl_trace,
928  .n_errors = ARRAY_LEN(ip_outacl_error_strings),
929  .error_strings = ip_outacl_error_strings,
930 
931  .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
932  .next_nodes = {
933  [ACL_NEXT_INDEX_DENY] = "ip6-drop",
934  },
935 };
936 /* *INDENT-ON* */
937 
938 #ifndef CLIB_MARCH_VARIANT
939 static clib_error_t *
941 {
942  return 0;
943 }
944 
946 #endif /* CLIB_MARCH_VARIANT */
947 
948 
949 /*
950  * fd.io coding-style-patch-verification: ON
951  *
952  * Local Variables:
953  * eval: (c-set-style "gnu")
954  * End:
955  */
u64 vnet_classify_hash_packet(vnet_classify_table_t *t, u8 *h)
vlib_node_registration_t ip4_inacl_node
(constructor) VLIB_REGISTER_NODE (ip4_inacl_node)
#define CLIB_UNUSED(x)
Definition: clib.h:87
static u8 * format_ip_in_out_acl_trace(u8 *s, u32 is_output, va_list *args)
Definition: ip_in_out_acl.c:30
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
#define PREDICT_TRUE(x)
Definition: clib.h:122
static vnet_classify_entry_t * vnet_classify_find_entry_inline(vnet_classify_table_t *t, u8 *h, u64 hash, f64 now)
unsigned long u64
Definition: types.h:89
u32 * classify_table_index_by_sw_if_index[IN_OUT_ACL_N_TABLE_GROUPS][IN_OUT_ACL_N_TABLES]
Definition: in_out_acl.h:50
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:334
vlib_main_t * vm
Definition: in2out_ed.c:1580
#define VLIB_NODE_FN(node)
Definition: node.h:203
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
unsigned char u8
Definition: types.h:56
u8 data[128]
Definition: ipsec_types.api:90
ip_inacl_error_t
Definition: ip_in_out_acl.c:69
double f64
Definition: types.h:142
#define CLASSIFY_FLAG_USE_CURR_DATA
Definition: vnet_classify.h:37
#define static_always_inline
Definition: clib.h:109
static_always_inline void ip_in_out_acl_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t **b, u16 *next, u32 n_left, int is_ip4, int is_output, int do_trace)
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
description fragment has unexpected format
Definition: map.api:433
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:207
#define foreach_ip_outacl_error
Definition: ip_in_out_acl.c:64
const cJSON *const b
Definition: cJSON.h:255
vnet_config_main_t * vnet_config_main[IN_OUT_ACL_N_TABLE_GROUPS][IN_OUT_ACL_N_TABLES]
Definition: in_out_acl.h:57
unsigned int u32
Definition: types.h:88
static void vnet_classify_prefetch_bucket(vnet_classify_table_t *t, u64 hash)
#define VLIB_FRAME_SIZE
Definition: node.h:378
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
static u64 vnet_classify_hash_packet_inline(vnet_classify_table_t *t, u8 *h)
Definition: cJSON.c:84
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:546
vlib_node_registration_t ip4_input_node
Global ip4 input node.
Definition: ip4_input.c:385
vlib_node_registration_t ip6_input_node
(constructor) VLIB_REGISTER_NODE (ip6_input_node)
Definition: ip6_input.c:230
unsigned short u16
Definition: types.h:57
vec_header_t h
Definition: buffer.c:322
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:233
static char * ip_inacl_error_strings[]
Definition: ip_in_out_acl.c:78
static void * vnet_get_config_data(vnet_config_main_t *cm, u32 *config_index, u32 *next_index, u32 n_data_bytes)
Definition: config.h:123
#define PREDICT_FALSE(x)
Definition: clib.h:121
u32 node_index
Node index.
Definition: node.h:488
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1231
static uword vnet_classify_get_offset(vnet_classify_table_t *t, vnet_classify_entry_t *v)
vlib_node_registration_t ip6_inacl_node
(constructor) VLIB_REGISTER_NODE (ip6_inacl_node)
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:170
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:339
#define foreach_ip_inacl_error
Definition: ip_in_out_acl.c:59
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:115
u8 data[]
Packet data.
Definition: buffer.h:181
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:147
#define ARRAY_LEN(x)
Definition: clib.h:67
vlib_node_registration_t ip6_outacl_node
(constructor) VLIB_REGISTER_NODE (ip6_outacl_node)
in_out_acl_table_id_t
Definition: in_out_acl.h:29
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1580
struct _vnet_classify_main vnet_classify_main_t
Definition: vnet_classify.h:56
static u8 * format_ip_outacl_trace(u8 *s, va_list *args)
Definition: ip_in_out_acl.c:49
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:252
vlib_node_registration_t ip4_outacl_node
(constructor) VLIB_REGISTER_NODE (ip4_outacl_node)
static char * ip_outacl_error_strings[]
Definition: ip_in_out_acl.c:93
static u8 * format_ip_inacl_trace(u8 *s, va_list *args)
Definition: ip_in_out_acl.c:43
struct _vlib_node_registration vlib_node_registration_t
Definition: defs.h:47
vl_api_mac_event_action_t action
Definition: l2.api:181
vnet_classify_main_t * vnet_classify_main
Definition: in_out_acl.h:55
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1581
VLIB buffer representation.
Definition: buffer.h:102
ip_outacl_error_t
Definition: ip_in_out_acl.c:84
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
#define vnet_buffer(b)
Definition: buffer.h:417
in_out_acl_main_t in_out_acl_main
Definition: in_out_acl.c:21
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:634
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static clib_error_t * ip_in_out_acl_init(vlib_main_t *vm)
vl_api_interface_index_t sw_if_index
Definition: wireguard.api:34
Definition: defs.h:46