FD.io VPP  v18.07.1-19-g511ce25
Vector Packet Processing
input.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
23 
24 #include <avf/avf.h>
25 
26 #define foreach_avf_input_error \
27  _(BUFFER_ALLOC, "buffer alloc error") \
28  _(RX_PACKET_ERROR, "Rx packet errors")
29 
30 typedef enum
31 {
32 #define _(f,s) AVF_INPUT_ERROR_##f,
34 #undef _
37 
38 static __clib_unused char *avf_input_error_strings[] = {
39 #define _(n,s) s,
41 #undef _
42 };
43 
44 #define AVF_RX_DESC_STATUS(x) (1 << x)
45 #define AVF_RX_DESC_STATUS_DD AVF_RX_DESC_STATUS(0)
46 #define AVF_RX_DESC_STATUS_EOP AVF_RX_DESC_STATUS(1)
47 
48 #define AVF_INPUT_REFILL_TRESHOLD 32
51  int use_iova)
52 {
53  u16 n_refill, mask, n_alloc, slot;
54  u32 s0, s1, s2, s3;
55  avf_rx_desc_t *d[4];
56 
57  n_refill = rxq->size - 1 - rxq->n_enqueued;
58  if (PREDICT_TRUE (n_refill <= AVF_INPUT_REFILL_TRESHOLD))
59  return;
60 
61  mask = rxq->size - 1;
62  slot = (rxq->next - n_refill - 1) & mask;
63 
64  n_refill &= ~7; /* round to 8 */
65  n_alloc = vlib_buffer_alloc_to_ring (vm, rxq->bufs, slot, rxq->size,
66  n_refill);
67 
68  if (PREDICT_FALSE (n_alloc != n_refill))
69  {
70  vlib_error_count (vm, node->node_index,
71  AVF_INPUT_ERROR_BUFFER_ALLOC, 1);
72  if (n_alloc)
73  vlib_buffer_free (vm, rxq->bufs + slot, n_alloc);
74  return;
75  }
76 
77  rxq->n_enqueued += n_alloc;
78 
79  while (n_alloc >= 4)
80  {
81  if (PREDICT_TRUE (slot + 3 < rxq->size))
82  {
83  s0 = slot;
84  s1 = slot + 1;
85  s2 = slot + 2;
86  s3 = slot + 3;
87  }
88  else
89  {
90  s0 = slot;
91  s1 = (slot + 1) & mask;
92  s2 = (slot + 2) & mask;
93  s3 = (slot + 3) & mask;
94  }
95 
96  d[0] = ((avf_rx_desc_t *) rxq->descs) + s0;
97  d[1] = ((avf_rx_desc_t *) rxq->descs) + s1;
98  d[2] = ((avf_rx_desc_t *) rxq->descs) + s2;
99  d[3] = ((avf_rx_desc_t *) rxq->descs) + s3;
100  if (use_iova)
101  {
102  vlib_buffer_t *b;
103  b = vlib_get_buffer (vm, rxq->bufs[s0]);
104  d[0]->qword[0] = pointer_to_uword (b->data);
105  b = vlib_get_buffer (vm, rxq->bufs[s1]);
106  d[1]->qword[0] = pointer_to_uword (b->data);
107  b = vlib_get_buffer (vm, rxq->bufs[s2]);
108  d[2]->qword[0] = pointer_to_uword (b->data);
109  b = vlib_get_buffer (vm, rxq->bufs[s3]);
110  d[3]->qword[0] = pointer_to_uword (b->data);
111  }
112  else
113  {
114  d[0]->qword[0] =
116  d[1]->qword[0] =
118  d[2]->qword[0] =
120  d[3]->qword[0] =
122  }
123 
124  d[0]->qword[1] = 0;
125  d[1]->qword[1] = 0;
126  d[2]->qword[1] = 0;
127  d[3]->qword[1] = 0;
128 
129  /* next */
130  slot = (slot + 4) & mask;
131  n_alloc -= 4;
132  }
133  while (n_alloc)
134  {
135  s0 = slot;
136  d[0] = ((avf_rx_desc_t *) rxq->descs) + s0;
137  if (use_iova)
138  {
139  vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[s0]);
140  d[0]->qword[0] = pointer_to_uword (b->data);
141  }
142  else
143  d[0]->qword[0] =
145  d[0]->qword[1] = 0;
146 
147  /* next */
148  slot = (slot + 1) & mask;
149  n_alloc -= 1;
150  }
151 
153  *(rxq->qrx_tail) = slot;
154 }
155 
158  vlib_buffer_t * b, u16 * next)
159 {
160  avf_main_t *am = &avf_main;
161  avf_ptype_t *ptype;
162  if (PREDICT_FALSE (rxve->error))
163  {
164  b->error = node->errors[AVF_INPUT_ERROR_RX_PACKET_ERROR];
165  ptype = am->ptypes + rxve->ptype;
166  /* retract */
167  vlib_buffer_advance (b, --ptype->buffer_advance);
169  }
170 }
171 
174  int maybe_tagged)
175 {
176  avf_main_t *am = &avf_main;
178  avf_ptype_t *ptype;
179  if (maybe_tagged && ethernet_frame_is_tagged (e->type))
181  ptype = am->ptypes + rxve->ptype;
182  vlib_buffer_advance (b, ptype->buffer_advance);
183  b->flags |= ptype->flags;
184  return ptype->next_node;
185 }
186 
187 
191  vlib_buffer_t ** b, u16 * next, u32 n_rxv,
192  u8 maybe_error, int known_next)
193 {
194  uword n_rx_bytes = 0;
195 
196  while (n_rxv >= 4)
197  {
198  if (n_rxv >= 12)
199  {
200  vlib_prefetch_buffer_header (b[8], LOAD);
201  vlib_prefetch_buffer_header (b[9], LOAD);
202  vlib_prefetch_buffer_header (b[10], LOAD);
203  vlib_prefetch_buffer_header (b[11], LOAD);
204  if (!known_next)
205  {
206  CLIB_PREFETCH (b[8]->data, CLIB_CACHE_LINE_BYTES, LOAD);
207  CLIB_PREFETCH (b[9]->data, CLIB_CACHE_LINE_BYTES, LOAD);
208  CLIB_PREFETCH (b[10]->data, CLIB_CACHE_LINE_BYTES, LOAD);
209  CLIB_PREFETCH (b[11]->data, CLIB_CACHE_LINE_BYTES, LOAD);
210  }
211  }
212 
213  n_rx_bytes += b[0]->current_length = rxve[0].length;
214  n_rx_bytes += b[1]->current_length = rxve[1].length;
215  n_rx_bytes += b[2]->current_length = rxve[2].length;
216  n_rx_bytes += b[3]->current_length = rxve[3].length;
217 
218  if (!known_next)
219  {
220  ethernet_header_t *e0, *e1, *e2, *e3;
221 
222  e0 = (ethernet_header_t *) b[0]->data;
223  e1 = (ethernet_header_t *) b[1]->data;
224  e2 = (ethernet_header_t *) b[2]->data;
225  e3 = (ethernet_header_t *) b[3]->data;
226 
228  e2->type, e3->type))
229  {
230  next[0] = avf_find_next (rxve, b[0], 1);
231  next[1] = avf_find_next (rxve + 1, b[1], 1);
232  next[2] = avf_find_next (rxve + 2, b[2], 1);
233  next[3] = avf_find_next (rxve + 3, b[3], 1);
234  }
235  else
236  {
237  next[0] = avf_find_next (rxve, b[0], 0);
238  next[1] = avf_find_next (rxve + 1, b[1], 0);
239  next[2] = avf_find_next (rxve + 2, b[2], 0);
240  next[3] = avf_find_next (rxve + 3, b[3], 0);
241  }
242 
243  if (PREDICT_FALSE (maybe_error))
244  {
245  avf_check_for_error (node, rxve + 0, b[0], next);
246  avf_check_for_error (node, rxve + 1, b[1], next + 1);
247  avf_check_for_error (node, rxve + 2, b[2], next + 2);
248  avf_check_for_error (node, rxve + 3, b[3], next + 3);
249  }
250  }
251  else if (bt->current_config_index)
252  {
257  vnet_buffer (b[0])->feature_arc_index =
258  vnet_buffer (bt)->feature_arc_index;
259  vnet_buffer (b[1])->feature_arc_index =
260  vnet_buffer (bt)->feature_arc_index;
261  vnet_buffer (b[2])->feature_arc_index =
262  vnet_buffer (bt)->feature_arc_index;
263  vnet_buffer (b[3])->feature_arc_index =
264  vnet_buffer (bt)->feature_arc_index;
265  }
266 
267  clib_memcpy (vnet_buffer (b[0])->sw_if_index,
268  vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
269  clib_memcpy (vnet_buffer (b[1])->sw_if_index,
270  vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
271  clib_memcpy (vnet_buffer (b[2])->sw_if_index,
272  vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
273  clib_memcpy (vnet_buffer (b[3])->sw_if_index,
274  vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
275 
280 
281  /* next */
282  rxve += 4;
283  b += 4;
284  next += 4;
285  n_rxv -= 4;
286  }
287  while (n_rxv)
288  {
289  b[0]->current_length = rxve->length;
290  n_rx_bytes += b[0]->current_length;
291 
292  if (!known_next)
293  {
294  next[0] = avf_find_next (rxve, b[0], 1);
295  avf_check_for_error (node, rxve + 0, b[0], next);
296  }
297  else if (bt->current_config_index)
298  {
300  vnet_buffer (b[0])->feature_arc_index =
301  vnet_buffer (bt)->feature_arc_index;
302  }
303 
304  clib_memcpy (vnet_buffer (b[0])->sw_if_index,
305  vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
306 
308 
309  /* next */
310  rxve += 1;
311  b += 1;
312  next += 1;
313  n_rxv -= 1;
314 
315  }
316  return n_rx_bytes;
317 }
318 
321  vlib_frame_t * frame, avf_device_t * ad, u16 qid)
322 {
323  avf_main_t *am = &avf_main;
324  vnet_main_t *vnm = vnet_get_main ();
325  u32 thr_idx = vlib_get_thread_index ();
326  avf_per_thread_data_t *ptd =
327  vec_elt_at_index (am->per_thread_data, thr_idx);
328  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
329  avf_rx_vector_entry_t *rxve = 0;
330  uword n_trace;
331  avf_rx_desc_t *d;
332  u32 n_rx_packets = 0, n_rx_bytes = 0;
333  u16 mask = rxq->size - 1;
334  u16 n_rxv = 0;
335  u8 maybe_error = 0;
336  u32 buffer_indices[AVF_RX_VECTOR_SZ], *bi;
337  u16 nexts[AVF_RX_VECTOR_SZ], *next;
339  vlib_buffer_t *bt = &ptd->buffer_template;
340  int known_next = 0;
342 
348 
349  /* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
350  copy needed data from descriptor to rx vector */
351  d = rxq->descs + rxq->next;
352  bi = buffer_indices;
353  while (n_rxv < AVF_RX_VECTOR_SZ)
354  {
355  if (rxq->next + 11 < rxq->size)
356  {
357  int stride = 8;
358  CLIB_PREFETCH ((void *) (rxq->descs + (rxq->next + stride)),
359  CLIB_CACHE_LINE_BYTES, LOAD);
360  CLIB_PREFETCH ((void *) (rxq->descs + (rxq->next + stride + 1)),
361  CLIB_CACHE_LINE_BYTES, LOAD);
362  CLIB_PREFETCH ((void *) (rxq->descs + (rxq->next + stride + 2)),
363  CLIB_CACHE_LINE_BYTES, LOAD);
364  CLIB_PREFETCH ((void *) (rxq->descs + (rxq->next + stride + 3)),
365  CLIB_CACHE_LINE_BYTES, LOAD);
366  }
367 
368 #ifdef CLIB_HAVE_VEC256
369  u64x4 q1x4, v, err4;
370  u64x4 status_dd_eop_mask = u64x4_splat (0x3);
371 
372  if (n_rxv >= AVF_RX_VECTOR_SZ - 4)
373  goto one_by_one;
374 
375  if (rxq->next >= rxq->size - 4)
376  goto one_by_one;
377 
378  /* load 1st quadword of 4 dscriptors into 256-bit vector register */
379  /* *INDENT-OFF* */
380  q1x4 = (u64x4) {
381  d[0].qword[1],
382  d[1].qword[1],
383  d[2].qword[1],
384  d[3].qword[1]
385  };
386  /* *INDENT-ON* */
387 
388  /* not all packets are ready or at least one of them is chained */
389  if (!u64x4_is_equal (q1x4 & status_dd_eop_mask, status_dd_eop_mask))
390  goto one_by_one;
391 
392  /* shift and mask status, length, ptype and err */
393  v = q1x4 & u64x4_splat ((u64) 0x3FFFFULL);
394  v |= (q1x4 >> 6) & u64x4_splat ((u64) 0xFFFF << 32);
395  v |= (q1x4 << 18) & u64x4_splat ((u64) 0xFF << 48);
396  v |= err4 = (q1x4 << 37) & u64x4_splat ((u64) 0xFF << 56);
397 
398  u64x4_store_unaligned (v, ptd->rx_vector + n_rxv);
399  maybe_error |= !u64x4_is_all_zero (err4);
400 
401  clib_memcpy (bi, rxq->bufs + rxq->next, 4 * sizeof (u32));
402 
403  /* next */
404  rxq->next = (rxq->next + 4) & mask;
405  d = rxq->descs + rxq->next;
406  n_rxv += 4;
407  rxq->n_enqueued -= 4;
408  bi += 4;
409  continue;
410  one_by_one:
411 #endif
412  CLIB_PREFETCH ((void *) (rxq->descs + ((rxq->next + 8) & mask)),
413  CLIB_CACHE_LINE_BYTES, LOAD);
414  if ((d->qword[1] & AVF_RX_DESC_STATUS_DD) == 0)
415  break;
416  rxve = ptd->rx_vector + n_rxv;
417  bi[0] = rxq->bufs[rxq->next];
418  rxve->status = avf_get_u64_bits ((void *) d, 8, 18, 0);
419  rxve->error = avf_get_u64_bits ((void *) d, 8, 26, 19);
420  rxve->ptype = avf_get_u64_bits ((void *) d, 8, 37, 30);
421  rxve->length = avf_get_u64_bits ((void *) d, 8, 63, 38);
422  maybe_error |= rxve->error;
423 
424  /* deal with chained buffers */
425  while (PREDICT_FALSE ((d->qword[1] & AVF_RX_DESC_STATUS_EOP) == 0))
426  {
427  clib_error ("fixme");
428  }
429 
430  /* next */
431  rxq->next = (rxq->next + 1) & mask;
432  d = rxq->descs + rxq->next;
433  n_rxv++;
434  rxq->n_enqueued--;
435  bi++;
436  }
437 
438  if (n_rxv == 0)
439  goto done;
440 
441  /* refill rx ring */
442  if (ad->flags & AVF_DEVICE_F_IOVA)
443  avf_rxq_refill (vm, node, rxq, 1 /* use_iova */ );
444  else
445  avf_rxq_refill (vm, node, rxq, 0 /* use_iova */ );
446 
447  vlib_get_buffers (vm, buffer_indices, bufs, n_rxv);
448  n_rx_packets = n_rxv;
449 
450  vnet_buffer (bt)->sw_if_index[VLIB_RX] = ad->sw_if_index;
451  vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0;
452 
453  /* receive burst of packets from DPDK PMD */
454  if (PREDICT_FALSE (ad->per_interface_next_index != ~0))
455  {
456  known_next = 1;
457  next_index = ad->per_interface_next_index;
458  }
459 
460  /* as all packets belong to thr same interface feature arc lookup
461  can be don once and result stored */
463  {
464  vnet_feature_start_device_input_x1 (ad->sw_if_index, &next_index, bt);
465  known_next = 1;
466  }
467 
468  if (known_next)
469  {
470  clib_memset_u16 (nexts, next_index, n_rxv);
471  n_rx_bytes = avf_process_rx_burst (vm, node, bt, ptd->rx_vector, bufs,
472  nexts, n_rxv, maybe_error, 1);
473  vnet_buffer (bt)->feature_arc_index = 0;
474  bt->current_config_index = 0;
475  }
476  else
477  n_rx_bytes = avf_process_rx_burst (vm, node, bt, ptd->rx_vector, bufs,
478  nexts, n_rxv, maybe_error, 0);
479 
480  /* packet trace if enabled */
481  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
482  {
483  u32 n_left = n_rx_packets;
484  bi = buffer_indices;
485  next = nexts;
486  while (n_trace && n_left)
487  {
488  vlib_buffer_t *b;
489  avf_input_trace_t *tr;
490  b = vlib_get_buffer (vm, bi[0]);
491  vlib_trace_buffer (vm, node, next[0], b, /* follow_chain */ 0);
492  tr = vlib_add_trace (vm, node, b, sizeof (*tr));
493  tr->next_index = next[0];
494  tr->hw_if_index = ad->hw_if_index;
495  clib_memcpy (&tr->rxve, rxve, sizeof (avf_rx_vector_entry_t));
496 
497  /* next */
498  n_trace--;
499  n_left--;
500  bi++;
501  next++;
502  }
503  vlib_set_trace_count (vm, node, n_trace);
504  }
505  vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts, n_rx_packets);
507  + VNET_INTERFACE_COUNTER_RX, thr_idx,
508  ad->hw_if_index, n_rx_packets, n_rx_bytes);
509 
510 done:
511  return n_rx_packets;
512 }
513 
515  vlib_frame_t * frame)
516 {
517  u32 n_rx = 0;
518  avf_main_t *am = &avf_main;
519  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
521 
523  {
524  avf_device_t *ad;
525  ad = vec_elt_at_index (am->devices, dq->dev_instance);
526  if ((ad->flags & AVF_DEVICE_F_ADMIN_UP) == 0)
527  continue;
528  n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id);
529  }
530  return n_rx;
531 }
532 
533 #ifndef CLIB_MARCH_VARIANT
534 /* *INDENT-OFF* */
536  .name = "avf-input",
537  .sibling_of = "device-input",
538  .format_trace = format_avf_input_trace,
539  .type = VLIB_NODE_TYPE_INPUT,
540  .state = VLIB_NODE_STATE_DISABLED,
541  .n_errors = AVF_INPUT_N_ERROR,
542  .error_strings = avf_input_error_strings,
543 };
544 #endif
545 
546 /* *INDENT-ON* */
547 
548 
549 /*
550  * fd.io coding-style-patch-verification: ON
551  *
552  * Local Variables:
553  * eval: (c-set-style "gnu")
554  * End:
555  */
static_always_inline int ethernet_frame_is_any_tagged_x4(u16 type0, u16 type1, u16 type2, u16 type3)
Definition: ethernet.h:111
u32 hw_if_index
Definition: avf.h:110
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
#define foreach_avf_input_error
Definition: input.c:26
static_always_inline void avf_check_for_error(vlib_node_runtime_t *node, avf_rx_vector_entry_t *rxve, vlib_buffer_t *b, u16 *next)
Definition: input.c:157
static u64 avf_get_u64_bits(void *start, int offset, int first, int last)
Definition: avf.h:248
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:143
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:213
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:534
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:132
avf_ptype_t * ptypes
Definition: avf.h:194
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define AVF_RX_DESC_STATUS_EOP
Definition: input.c:46
#define PREDICT_TRUE(x)
Definition: clib.h:106
#define clib_error(format, args...)
Definition: error.h:62
unsigned long u64
Definition: types.h:89
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
avf_input_error_t
Definition: input.c:30
static_always_inline uword avf_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, avf_device_t *ad, u16 qid)
Definition: input.c:320
u32 dev_instance
Definition: avf.h:108
u32 next_index
Definition: avf.h:285
#define VLIB_NODE_FN(node)
Definition: node.h:173
avf_device_t * devices
Definition: avf.h:186
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:451
format_function_t format_avf_input_trace
Definition: avf.h:222
static u64 vlib_get_buffer_data_physical_address(vlib_main_t *vm, u32 buffer_index)
Definition: buffer_funcs.h:306
unsigned char u8
Definition: types.h:56
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:104
#define static_always_inline
Definition: clib.h:93
vlib_node_registration_t avf_input_node
(constructor) VLIB_REGISTER_NODE (avf_input_node)
Definition: input.c:535
avf_rx_vector_entry_t rxve
Definition: avf.h:287
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:811
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
Definition: feature.h:243
volatile u32 * qrx_tail
Definition: avf.h:82
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:184
#define AVF_INPUT_REFILL_TRESHOLD
Definition: input.c:48
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
unsigned int u32
Definition: types.h:88
u32 status
Definition: avf.h:148
static_always_inline uword avf_process_rx_burst(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *bt, avf_rx_vector_entry_t *rxve, vlib_buffer_t **b, u16 *next, u32 n_rxv, u8 maybe_error, int known_next)
Definition: input.c:189
u32 hw_if_index
Definition: avf.h:286
i8 buffer_advance
Definition: avf.h:176
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:108
#define v
Definition: acl.c:491
unsigned short u16
Definition: types.h:57
u8 ptype
Definition: avf.h:150
u64 qword[4]
Definition: avf.h:57
#define PREDICT_FALSE(x)
Definition: clib.h:105
#define AVF_RX_VECTOR_SZ
Definition: avf.h:156
u32 node_index
Node index.
Definition: node.h:473
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:135
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:153
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:77
vlib_main_t * vm
Definition: buffer.c:294
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:332
Definition: avf.h:146
Definition: avf.h:79
#define clib_memcpy(a, b, c)
Definition: string.h:75
u32 per_interface_next_index
Definition: avf.h:106
u32 flags
Definition: avf.h:105
avf_rx_vector_entry_t rx_vector[AVF_RX_VECTOR_SZ]
Definition: avf.h:168
u8 error
Definition: avf.h:151
u32 * bufs
Definition: avf.h:86
#define STATIC_ASSERT_OFFSET_OF(s, e, o)
static_always_inline int ethernet_frame_is_tagged(u16 type)
Definition: ethernet.h:85
vlib_buffer_t buffer_template
Definition: avf.h:170
u16 n_enqueued
Definition: avf.h:87
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:215
static uword pointer_to_uword(const void *p)
Definition: types.h:131
avf_main_t avf_main
Definition: device.c:36
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
static_always_inline void clib_memset_u16(void *p, u16 val, uword count)
Definition: string.h:233
static_always_inline void avf_rxq_refill(vlib_main_t *vm, vlib_node_runtime_t *node, avf_rxq_t *rxq, int use_iova)
Definition: input.c:50
#define foreach_device_and_queue(var, vec)
Definition: devices.h:156
Definition: defs.h:47
static u32 vlib_buffer_alloc_to_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Allocate buffers into ring.
Definition: buffer_funcs.h:507
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:546
u64 uword
Definition: types.h:112
u16 size
Definition: avf.h:84
avf_rxq_t * rxqs
Definition: avf.h:115
u64x4
Definition: vector_avx2.h:110
#define vnet_buffer(b)
Definition: buffer.h:360
avf_per_thread_data_t * per_thread_data
Definition: avf.h:187
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:250
static_always_inline u32 avf_find_next(avf_rx_vector_entry_t *rxve, vlib_buffer_t *b, int maybe_tagged)
Definition: input.c:173
u8 data[0]
Packet data.
Definition: buffer.h:172
u32 sw_if_index
Definition: avf.h:109
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:109
#define AVF_RX_DESC_STATUS_DD
Definition: input.c:45
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:159
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:128
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:62
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:111
static __clib_unused char * avf_input_error_strings[]
Definition: input.c:38
#define STATIC_ASSERT_SIZEOF(d, s)
u16 next
Definition: avf.h:83
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
avf_rx_desc_t * descs
Definition: avf.h:85
u16 length
Definition: avf.h:149
Definition: defs.h:46