FD.io VPP  v18.04-17-g3a0d853
Vector Packet Processing
input.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
23 
24 #include <avf/avf.h>
25 
26 #define foreach_avf_input_error \
27  _(BUFFER_ALLOC, "buffer alloc error") \
28  _(RX_PACKET_ERROR, "Rx packet errors")
29 
30 typedef enum
31 {
32 #define _(f,s) AVF_INPUT_ERROR_##f,
34 #undef _
37 
38 static __clib_unused char *avf_input_error_strings[] = {
39 #define _(n,s) s,
41 #undef _
42 };
43 
44 #define AVF_RX_DESC_STATUS(x) (1 << x)
45 #define AVF_RX_DESC_STATUS_DD AVF_RX_DESC_STATUS(0)
46 #define AVF_RX_DESC_STATUS_EOP AVF_RX_DESC_STATUS(1)
47 
50  vlib_buffer_t * b0, uword * n_trace, avf_device_t * ad,
51  avf_rx_vector_entry_t * rxve)
52 {
54  vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
55  vlib_set_trace_count (vm, node, --(*n_trace));
56  tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
57  tr->next_index = next0;
58  tr->hw_if_index = ad->hw_if_index;
59  clib_memcpy (&tr->rxve, rxve, sizeof (avf_rx_vector_entry_t));
60 }
61 
62 #define AVF_INPUT_REFILL_TRESHOLD 32
65  int use_iova)
66 {
67  u16 n_refill, mask, n_alloc, slot;
68  avf_rx_desc_t *d;
69 
70  n_refill = rxq->size - 1 - rxq->n_bufs;
71  if (PREDICT_TRUE (n_refill <= AVF_INPUT_REFILL_TRESHOLD))
72  return;
73 
74  mask = rxq->size - 1;
75  slot = (rxq->next - n_refill - 1) & mask;
76 
77  n_refill &= ~7; /* round to 8 */
78  n_alloc = vlib_buffer_alloc_to_ring (vm, rxq->bufs, slot, rxq->size,
79  n_refill);
80 
81  if (PREDICT_FALSE (n_alloc != n_refill))
82  {
83  vlib_error_count (vm, node->node_index,
84  AVF_INPUT_ERROR_BUFFER_ALLOC, 1);
85  if (n_alloc)
86  vlib_buffer_free (vm, rxq->bufs + slot, n_alloc);
87  return;
88  }
89 
90  rxq->n_bufs += n_alloc;
91 
92  while (n_alloc--)
93  {
94  u64 addr;
95  d = ((avf_rx_desc_t *) rxq->descs) + slot;
96  if (use_iova)
97  {
98  vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[slot]);
99  addr = pointer_to_uword (b->data);
100  }
101  else
102  addr = vlib_get_buffer_data_physical_address (vm, rxq->bufs[slot]);
103  d->qword[0] = addr;
104  d->qword[1] = 0;
105  slot = (slot + 1) & mask;
106  }
107 
109  *(rxq->qrx_tail) = slot;
110 }
111 
114  vlib_buffer_t * b, u32 * next)
115 {
116  avf_main_t *am = &avf_main;
117  avf_ptype_t *ptype;
118  if (PREDICT_FALSE (rxve->error))
119  {
120  b->error = node->errors[AVF_INPUT_ERROR_RX_PACKET_ERROR];
121  ptype = am->ptypes + rxve->ptype;
122  /* retract */
123  vlib_buffer_advance (b, --ptype->buffer_advance);
125  }
126 }
127 
130  int maybe_tagged)
131 {
132  avf_main_t *am = &avf_main;
134  avf_ptype_t *ptype;
135  if (maybe_tagged && ethernet_frame_is_tagged (e->type))
137  ptype = am->ptypes + rxve->ptype;
138  vlib_buffer_advance (b, ptype->buffer_advance);
139  b->flags |= ptype->flags;
140  return ptype->next_node;
141 }
142 
143 
146  vlib_frame_t * frame, avf_device_t * ad, u16 qid,
147  int with_features_or_tracing)
148 {
149  avf_main_t *am = &avf_main;
150  vnet_main_t *vnm = vnet_get_main ();
151  u32 thr_idx = vlib_get_thread_index ();
152  avf_per_thread_data_t *ptd =
153  vec_elt_at_index (am->per_thread_data, thr_idx);
154  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
155  avf_rx_vector_entry_t *rxve;
156  uword n_trace = vlib_get_trace_count (vm, node);
158  avf_rx_desc_t *d;
159  u32 *to_next = 0;
160  u32 n_rx_packets = 0;
161  u32 n_rx_bytes = 0;
162  u32 sw_if_idx[VLIB_N_RX_TX] = {[VLIB_RX] = ad->sw_if_index,[VLIB_TX] = ~0 };
163  u16 mask = rxq->size - 1;
164  u16 n_rxv = 0;
165  u8 maybe_error = 0;
166 
167  /* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
168  copy needed data from descriptor to rx vector */
169  d = rxq->descs + rxq->next;
170  while ((d->qword[1] & AVF_RX_DESC_STATUS_DD) && n_rxv < AVF_RX_VECTOR_SZ)
171  {
172  u16 next_pf = (rxq->next + 8) & mask;
173  CLIB_PREFETCH (rxq->descs + next_pf, CLIB_CACHE_LINE_BYTES, LOAD);
174  rxve = ptd->rx_vector + n_rxv;
175  rxve->bi = rxq->bufs[rxq->next];
176  rxve->status = avf_get_u64_bits (d, 8, 18, 0);
177  rxve->error = avf_get_u64_bits (d, 8, 26, 19);
178  rxve->ptype = avf_get_u64_bits (d, 8, 37, 30);
179  rxve->length = avf_get_u64_bits (d, 8, 63, 38);
180  maybe_error |= rxve->error;
181 
182  /* deal with chained buffers */
183  while (PREDICT_FALSE ((d->qword[1] & AVF_RX_DESC_STATUS_EOP) == 0))
184  {
185  clib_error ("fixme");
186  }
187 
188  /* next */
189  rxq->next = (rxq->next + 1) & mask;
190  d = rxq->descs + rxq->next;
191  n_rxv++;
192  rxq->n_bufs--;
193  }
194 
195  if (n_rxv == 0)
196  return 0;
197 
198  /* refill rx ring */
199  if (ad->flags & AVF_DEVICE_F_IOVA)
200  avf_rxq_refill (vm, node, rxq, 1 /* use_iova */ );
201  else
202  avf_rxq_refill (vm, node, rxq, 0 /* use_iova */ );
203 
204  n_rx_packets = n_rxv;
205  rxve = ptd->rx_vector;
206  while (n_rxv)
207  {
208  u32 n_left_to_next;
209  u32 bi0, bi1, bi2, bi3;
210  vlib_buffer_t *b0, *b1, *b2, *b3;
211  u32 next0, next1, next2, next3;
212 
213  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
214 
215  while (n_rxv >= 12 && n_left_to_next >= 4)
216  {
217  vlib_buffer_t *p;
218  p = vlib_get_buffer (vm, rxve[8].bi);
219  vlib_prefetch_buffer_header (p, LOAD);
221 
222  p = vlib_get_buffer (vm, rxve[9].bi);
223  vlib_prefetch_buffer_header (p, LOAD);
225 
226  p = vlib_get_buffer (vm, rxve[10].bi);
227  vlib_prefetch_buffer_header (p, LOAD);
229 
230  p = vlib_get_buffer (vm, rxve[11].bi);
231  vlib_prefetch_buffer_header (p, LOAD);
233 
234  to_next[0] = bi0 = rxve[0].bi;
235  to_next[1] = bi1 = rxve[1].bi;
236  to_next[2] = bi2 = rxve[2].bi;
237  to_next[3] = bi3 = rxve[3].bi;
238 
239  b0 = vlib_get_buffer (vm, bi0);
240  b1 = vlib_get_buffer (vm, bi1);
241  b2 = vlib_get_buffer (vm, bi2);
242  b3 = vlib_get_buffer (vm, bi3);
243 
244  b0->current_length = rxve[0].length;
245  b1->current_length = rxve[1].length;
246  b2->current_length = rxve[2].length;
247  b3->current_length = rxve[3].length;
248 
249  n_rx_bytes += b0->current_length;
250  n_rx_bytes += b1->current_length;
251  n_rx_bytes += b2->current_length;
252  n_rx_bytes += b3->current_length;
253 
254  if (PREDICT_TRUE (ad->per_interface_next_index == ~0))
255  {
256  ethernet_header_t *e0, *e1, *e2, *e3;
257 
258  e0 = (ethernet_header_t *) b0->data;
259  e1 = (ethernet_header_t *) b1->data;
260  e2 = (ethernet_header_t *) b2->data;
261  e3 = (ethernet_header_t *) b3->data;
262 
264  e2->type, e3->type))
265  {
266  next0 = avf_find_next (rxve, b0, 1);
267  next1 = avf_find_next (rxve + 1, b1, 1);
268  next2 = avf_find_next (rxve + 2, b2, 1);
269  next3 = avf_find_next (rxve + 3, b3, 1);
270  }
271  else
272  {
273  next0 = avf_find_next (rxve, b0, 0);
274  next1 = avf_find_next (rxve + 1, b1, 0);
275  next2 = avf_find_next (rxve + 2, b2, 0);
276  next3 = avf_find_next (rxve + 3, b3, 0);
277  }
278 
279  if (with_features_or_tracing)
281  &next1, &next2, &next3,
282  b0, b1, b2, b3);
283 
284  if (PREDICT_FALSE (maybe_error))
285  {
286  avf_check_for_error (node, rxve + 0, b0, &next0);
287  avf_check_for_error (node, rxve + 1, b1, &next1);
288  avf_check_for_error (node, rxve + 2, b2, &next2);
289  avf_check_for_error (node, rxve + 3, b3, &next3);
290  }
291  }
292  else
293  next0 = next1 = next2 = next3 = ad->per_interface_next_index;
294 
295  clib_memcpy (vnet_buffer (b0)->sw_if_index, sw_if_idx,
296  sizeof (sw_if_idx));
297  clib_memcpy (vnet_buffer (b1)->sw_if_index, sw_if_idx,
298  sizeof (sw_if_idx));
299  clib_memcpy (vnet_buffer (b2)->sw_if_index, sw_if_idx,
300  sizeof (sw_if_idx));
301  clib_memcpy (vnet_buffer (b3)->sw_if_index, sw_if_idx,
302  sizeof (sw_if_idx));
303 
308 
309  if (with_features_or_tracing && PREDICT_FALSE (n_trace))
310  {
311  avf_input_trace (vm, node, next0, b0, &n_trace, ad, rxve);
312  if (n_trace)
313  avf_input_trace (vm, node, next1, b1, &n_trace, ad, rxve + 1);
314  if (n_trace)
315  avf_input_trace (vm, node, next2, b2, &n_trace, ad, rxve + 2);
316  if (n_trace)
317  avf_input_trace (vm, node, next3, b3, &n_trace, ad, rxve + 3);
318  }
319 
320  /* next */
321  to_next += 4;
322  n_left_to_next -= 4;
323  rxve += 4;
324  n_rxv -= 4;
325 
326  /* enqueue */
327  vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
328  n_left_to_next, bi0, bi1, bi2, bi3,
329  next0, next1, next2, next3);
330  }
331  while (n_rxv && n_left_to_next)
332  {
333  bi0 = rxve[0].bi;
334  to_next[0] = bi0;
335  b0 = vlib_get_buffer (vm, bi0);
336 
337  b0->current_length = rxve->length;
338  n_rx_bytes += b0->current_length;
339 
340  if (PREDICT_TRUE (ad->per_interface_next_index == ~0))
341  {
342  next0 = avf_find_next (rxve, b0, 1);
343  if (with_features_or_tracing)
345  b0);
346  avf_check_for_error (node, rxve + 0, b0, &next0);
347  }
348  else
349  next0 = ad->per_interface_next_index;
350 
351  clib_memcpy (vnet_buffer (b0)->sw_if_index, sw_if_idx,
352  sizeof (sw_if_idx));
353 
355  if (with_features_or_tracing && PREDICT_FALSE (n_trace > 0))
356  avf_input_trace (vm, node, next0, b0, &n_trace, ad, rxve);
357 
358  /* next */
359  to_next += 1;
360  n_left_to_next -= 1;
361  rxve += 1;
362  n_rxv -= 1;
363 
364  /* enqueue */
365  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
366  n_left_to_next, bi0, next0);
367  }
368  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
369  }
370 
372  + VNET_INTERFACE_COUNTER_RX, thr_idx,
373  ad->hw_if_index, n_rx_packets, n_rx_bytes);
374 
375  return n_rx_packets;
376 }
377 
378 uword
380  vlib_frame_t * frame)
381 {
382  u32 n_rx = 0;
383  avf_main_t *am = &avf_main;
384  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
386 
388  {
389  avf_device_t *ad;
390  ad = vec_elt_at_index (am->devices, dq->dev_instance);
391  if ((ad->flags & AVF_DEVICE_F_ADMIN_UP) == 0)
392  continue;
394  vlib_get_trace_count (vm, node))
395  n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id, 1);
396  else
397  n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id, 0);
398  }
399  return n_rx;
400 }
401 
402 #ifndef CLIB_MULTIARCH_VARIANT
403 /* *INDENT-OFF* */
405  .function = avf_input,
406  .name = "avf-input",
407  .sibling_of = "device-input",
408  .format_trace = format_avf_input_trace,
409  .type = VLIB_NODE_TYPE_INPUT,
410  .state = VLIB_NODE_STATE_DISABLED,
411  .n_errors = AVF_INPUT_N_ERROR,
412  .error_strings = avf_input_error_strings,
413 };
414 
415 #if __x86_64__
418 static void __clib_constructor
420 {
421  if (avf_input_avx512 && clib_cpu_supports_avx512f ())
422  avf_input_node.function = avf_input_avx512;
423  else if (avf_input_avx2 && clib_cpu_supports_avx2 ())
424  avf_input_node.function = avf_input_avx2;
425 }
426 
427 #endif
428 #endif
429 
430 /* *INDENT-ON* */
431 
432 
433 /*
434  * fd.io coding-style-patch-verification: ON
435  *
436  * Local Variables:
437  * eval: (c-set-style "gnu")
438  * End:
439  */
static_always_inline int ethernet_frame_is_any_tagged_x4(u16 type0, u16 type1, u16 type2, u16 type3)
Definition: ethernet.h:111
static_always_inline void avf_check_for_error(vlib_node_runtime_t *node, avf_rx_vector_entry_t *rxve, vlib_buffer_t *b, u32 *next)
Definition: input.c:113
u32 hw_if_index
Definition: avf.h:85
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
#define foreach_avf_input_error
Definition: input.c:26
static u64 avf_get_u64_bits(void *start, int offset, int first, int last)
Definition: avf.h:217
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:143
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:211
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:391
vlib_node_function_t __clib_weak avf_input_avx512
Definition: input.c:416
avf_ptype_t * ptypes
Definition: avf.h:166
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define AVF_RX_DESC_STATUS_EOP
Definition: input.c:46
#define PREDICT_TRUE(x)
Definition: clib.h:106
#define clib_error(format, args...)
Definition: error.h:62
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
static_always_inline void vnet_feature_start_device_input_x4(u32 sw_if_index, u32 *next0, u32 *next1, u32 *next2, u32 *next3, vlib_buffer_t *b0, vlib_buffer_t *b1, vlib_buffer_t *b2, vlib_buffer_t *b3)
Definition: feature.h:309
avf_input_error_t
Definition: input.c:30
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
Definition: buffer_node.h:138
u32 dev_instance
Definition: avf.h:83
u32 next_index
Definition: avf.h:254
avf_device_t * devices
Definition: avf.h:160
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:415
format_function_t format_avf_input_trace
Definition: avf.h:191
static u64 vlib_get_buffer_data_physical_address(vlib_main_t *vm, u32 buffer_index)
Definition: buffer_funcs.h:163
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:104
uword CLIB_MULTIARCH_FN() avf_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: input.c:379
#define static_always_inline
Definition: clib.h:93
vlib_node_registration_t avf_input_node
(constructor) VLIB_REGISTER_NODE (avf_input_node)
Definition: input.c:404
avf_rx_vector_entry_t rxve
Definition: avf.h:256
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:718
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
Definition: feature.h:227
volatile u32 * qrx_tail
Definition: avf.h:57
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:191
#define AVF_INPUT_REFILL_TRESHOLD
Definition: input.c:62
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
unsigned long u64
Definition: types.h:89
u32 status
Definition: avf.h:124
static uword pointer_to_uword(const void *p)
Definition: types.h:131
u32 hw_if_index
Definition: avf.h:255
i8 buffer_advance
Definition: avf.h:152
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:108
u8 ptype
Definition: avf.h:127
static void __clib_constructor avf_input_multiarch_select(void)
Definition: input.c:419
u64 qword[4]
Definition: avf.h:38
#define PREDICT_FALSE(x)
Definition: clib.h:105
#define AVF_RX_VECTOR_SZ
Definition: avf.h:133
u32 node_index
Node index.
Definition: node.h:437
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
uword( vlib_node_function_t)(struct vlib_main_t *vm, struct vlib_node_runtime_t *node, struct vlib_frame_t *frame)
Definition: node.h:54
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:130
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:74
vlib_main_t * vm
Definition: buffer.c:294
Definition: avf.h:121
Definition: avf.h:54
#define clib_memcpy(a, b, c)
Definition: string.h:75
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
u32 per_interface_next_index
Definition: avf.h:81
u32 bi
Definition: avf.h:123
unsigned int u32
Definition: types.h:88
u32 flags
Definition: avf.h:80
avf_rx_vector_entry_t rx_vector[AVF_RX_VECTOR_SZ]
Definition: avf.h:145
u8 error
Definition: avf.h:128
u32 * bufs
Definition: avf.h:61
static_always_inline int ethernet_frame_is_tagged(u16 type)
Definition: ethernet.h:85
static_always_inline uword avf_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, avf_device_t *ad, u16 qid, int with_features_or_tracing)
Definition: input.c:145
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:222
avf_main_t avf_main
Definition: device.c:36
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
static_always_inline void avf_rxq_refill(vlib_main_t *vm, vlib_node_runtime_t *node, avf_rxq_t *rxq, int use_iova)
Definition: input.c:64
#define foreach_device_and_queue(var, vec)
Definition: devices.h:156
Definition: defs.h:47
static u32 vlib_buffer_alloc_to_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Allocate buffers into ring.
Definition: buffer_funcs.h:364
unsigned short u16
Definition: types.h:57
unsigned char u8
Definition: types.h:56
vlib_node_function_t __clib_weak avf_input_avx2
Definition: input.c:417
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:553
u16 size
Definition: avf.h:59
avf_rxq_t * rxqs
Definition: avf.h:90
#define vnet_buffer(b)
Definition: buffer.h:372
avf_per_thread_data_t * per_thread_data
Definition: avf.h:161
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:234
static_always_inline u32 avf_find_next(avf_rx_vector_entry_t *rxve, vlib_buffer_t *b, int maybe_tagged)
Definition: input.c:129
u8 data[0]
Packet data.
Definition: buffer.h:179
u32 sw_if_index
Definition: avf.h:84
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:109
static_always_inline void avf_input_trace(vlib_main_t *vm, vlib_node_runtime_t *node, u32 next0, vlib_buffer_t *b0, uword *n_trace, avf_device_t *ad, avf_rx_vector_entry_t *rxve)
Definition: input.c:49
vhost_vring_addr_t addr
Definition: vhost-user.h:83
#define AVF_RX_DESC_STATUS_DD
Definition: input.c:45
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:159
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:111
static __clib_unused char * avf_input_error_strings[]
Definition: input.c:38
u16 next
Definition: avf.h:58
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
avf_rx_desc_t * descs
Definition: avf.h:60
u16 length
Definition: avf.h:125
Definition: defs.h:46
u16 n_bufs
Definition: avf.h:62
#define CLIB_MULTIARCH_FN(fn)
Definition: cpu.h:59