FD.io VPP  v21.01.1
Vector Packet Processing
input.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
23 
24 #include <avf/avf.h>
25 
26 #define foreach_avf_input_error \
27  _(BUFFER_ALLOC, "buffer alloc error")
28 
29 typedef enum
30 {
31 #define _(f,s) AVF_INPUT_ERROR_##f,
33 #undef _
36 
37 static __clib_unused char *avf_input_error_strings[] = {
38 #define _(n,s) s,
40 #undef _
41 };
42 
43 #define AVF_INPUT_REFILL_TRESHOLD 32
44 
47 {
48 #ifdef CLIB_HAVE_VEC256
49  u64x4 v = { addr, 0, 0, 0 };
50  u64x4_store_unaligned (v, (void *) d);
51 #else
52  d->qword[0] = addr;
53  d->qword[1] = 0;
54 #endif
55 }
56 
59  int use_va_dma)
60 {
61  u16 n_refill, mask, n_alloc, slot, size;
62  vlib_buffer_t *b[8];
63  avf_rx_desc_t *d, *first_d;
64  void *p[8];
65 
66  size = rxq->size;
67  mask = size - 1;
68  n_refill = mask - rxq->n_enqueued;
69  if (PREDICT_TRUE (n_refill <= AVF_INPUT_REFILL_TRESHOLD))
70  return;
71 
72  slot = (rxq->next - n_refill - 1) & mask;
73 
74  n_refill &= ~7; /* round to 8 */
75  n_alloc =
76  vlib_buffer_alloc_to_ring_from_pool (vm, rxq->bufs, slot, size, n_refill,
77  rxq->buffer_pool_index);
78 
79  if (PREDICT_FALSE (n_alloc != n_refill))
80  {
81  vlib_error_count (vm, node->node_index,
82  AVF_INPUT_ERROR_BUFFER_ALLOC, 1);
83  if (n_alloc)
84  vlib_buffer_free_from_ring (vm, rxq->bufs, slot, size, n_alloc);
85  return;
86  }
87 
88  rxq->n_enqueued += n_alloc;
89  first_d = rxq->descs;
90 
91  ASSERT (slot % 8 == 0);
92 
93  while (n_alloc >= 8)
94  {
95  d = first_d + slot;
96 
97  if (use_va_dma)
98  {
99  vlib_get_buffers_with_offset (vm, rxq->bufs + slot, p, 8,
100  sizeof (vlib_buffer_t));
101  avf_rx_desc_write (d + 0, pointer_to_uword (p[0]));
102  avf_rx_desc_write (d + 1, pointer_to_uword (p[1]));
103  avf_rx_desc_write (d + 2, pointer_to_uword (p[2]));
104  avf_rx_desc_write (d + 3, pointer_to_uword (p[3]));
105  avf_rx_desc_write (d + 4, pointer_to_uword (p[4]));
106  avf_rx_desc_write (d + 5, pointer_to_uword (p[5]));
107  avf_rx_desc_write (d + 6, pointer_to_uword (p[6]));
108  avf_rx_desc_write (d + 7, pointer_to_uword (p[7]));
109  }
110  else
111  {
112  vlib_get_buffers (vm, rxq->bufs + slot, b, 8);
113  avf_rx_desc_write (d + 0, vlib_buffer_get_pa (vm, b[0]));
114  avf_rx_desc_write (d + 1, vlib_buffer_get_pa (vm, b[1]));
115  avf_rx_desc_write (d + 2, vlib_buffer_get_pa (vm, b[2]));
116  avf_rx_desc_write (d + 3, vlib_buffer_get_pa (vm, b[3]));
117  avf_rx_desc_write (d + 4, vlib_buffer_get_pa (vm, b[4]));
118  avf_rx_desc_write (d + 5, vlib_buffer_get_pa (vm, b[5]));
119  avf_rx_desc_write (d + 6, vlib_buffer_get_pa (vm, b[6]));
120  avf_rx_desc_write (d + 7, vlib_buffer_get_pa (vm, b[7]));
121  }
122 
123  /* next */
124  slot = (slot + 8) & mask;
125  n_alloc -= 8;
126  }
127 
128  clib_atomic_store_rel_n (rxq->qrx_tail, slot);
129 }
130 
131 
134  u64 qw1, avf_rx_tail_t * t)
135 {
136  vlib_buffer_t *hb = b;
137  u32 tlnifb = 0, i = 0;
138 
139  if (qw1 & AVF_RXD_STATUS_EOP)
140  return 0;
141 
142  while ((qw1 & AVF_RXD_STATUS_EOP) == 0)
143  {
145  ASSERT (qw1 & AVF_RXD_STATUS_DD);
146  qw1 = t->qw1s[i];
147  b->next_buffer = t->buffers[i];
148  b->flags |= VLIB_BUFFER_NEXT_PRESENT;
149  b = vlib_get_buffer (vm, b->next_buffer);
151  tlnifb += b->current_length = qw1 >> AVF_RXD_LEN_SHIFT;
152  i++;
153  }
154 
156  hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
157  return tlnifb;
158 }
159 
162  avf_per_thread_data_t * ptd, u32 n_left,
163  int maybe_multiseg)
164 {
165  vlib_buffer_t bt;
166  vlib_buffer_t **b = ptd->bufs;
167  u64 *qw1 = ptd->qw1s;
168  avf_rx_tail_t *tail = ptd->tails;
169  uword n_rx_bytes = 0;
170 
171  /* copy template into local variable - will save per packet load */
173 
174  while (n_left >= 4)
175  {
176  if (n_left >= 12)
177  {
178  vlib_prefetch_buffer_header (b[8], LOAD);
179  vlib_prefetch_buffer_header (b[9], LOAD);
180  vlib_prefetch_buffer_header (b[10], LOAD);
181  vlib_prefetch_buffer_header (b[11], LOAD);
182  }
183 
184  vlib_buffer_copy_template (b[0], &bt);
185  vlib_buffer_copy_template (b[1], &bt);
186  vlib_buffer_copy_template (b[2], &bt);
187  vlib_buffer_copy_template (b[3], &bt);
188 
189  n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
190  n_rx_bytes += b[1]->current_length = qw1[1] >> AVF_RXD_LEN_SHIFT;
191  n_rx_bytes += b[2]->current_length = qw1[2] >> AVF_RXD_LEN_SHIFT;
192  n_rx_bytes += b[3]->current_length = qw1[3] >> AVF_RXD_LEN_SHIFT;
193 
194  if (maybe_multiseg)
195  {
196  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[0], qw1[0], tail + 0);
197  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[1], qw1[1], tail + 1);
198  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[2], qw1[2], tail + 2);
199  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[3], qw1[3], tail + 3);
200  }
201 
206 
207  /* next */
208  qw1 += 4;
209  tail += 4;
210  b += 4;
211  n_left -= 4;
212  }
213  while (n_left)
214  {
215  vlib_buffer_copy_template (b[0], &bt);
216 
217  n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
218 
219  if (maybe_multiseg)
220  n_rx_bytes += avf_rx_attach_tail (vm, &bt, b[0], qw1[0], tail + 0);
221 
223 
224  /* next */
225  qw1 += 1;
226  tail += 1;
227  b += 1;
228  n_left -= 1;
229  }
230  return n_rx_bytes;
231 }
232 
235  vlib_frame_t * frame, avf_device_t * ad, u16 qid)
236 {
237  avf_main_t *am = &avf_main;
238  vnet_main_t *vnm = vnet_get_main ();
239  u32 thr_idx = vlib_get_thread_index ();
240  avf_per_thread_data_t *ptd =
241  vec_elt_at_index (am->per_thread_data, thr_idx);
242  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
243  u32 n_trace, n_rx_packets = 0, n_rx_bytes = 0;
244  u16 n_tail_desc = 0;
245  u64 or_qw1 = 0;
246  u32 *bi, *to_next, n_left_to_next;
247  vlib_buffer_t *bt = &ptd->buffer_template;
249  u16 next = rxq->next;
250  u16 size = rxq->size;
251  u16 mask = size - 1;
252  avf_rx_desc_t *d, *fd = rxq->descs;
253 #ifdef CLIB_HAVE_VEC256
254  u64x4 q1x4, or_q1x4 = { 0 };
255  u64x4 dd_eop_mask4 = u64x4_splat (AVF_RXD_STATUS_DD | AVF_RXD_STATUS_EOP);
256 #endif
257 
258  /* is there anything on the ring */
259  d = fd + next;
260  if ((d->qword[1] & AVF_RXD_STATUS_DD) == 0)
261  goto done;
262 
263  if (PREDICT_FALSE (ad->per_interface_next_index != ~0))
264  next_index = ad->per_interface_next_index;
265 
267  vnet_feature_start_device_input_x1 (ad->sw_if_index, &next_index, bt);
268 
269  vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
270 
271  /* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
272  copy needed data from descriptor to rx vector */
273  bi = to_next;
274 
275  while (n_rx_packets < AVF_RX_VECTOR_SZ)
276  {
277  if (next + 11 < size)
278  {
279  int stride = 8;
280  CLIB_PREFETCH ((void *) (fd + (next + stride)),
281  CLIB_CACHE_LINE_BYTES, LOAD);
282  CLIB_PREFETCH ((void *) (fd + (next + stride + 1)),
283  CLIB_CACHE_LINE_BYTES, LOAD);
284  CLIB_PREFETCH ((void *) (fd + (next + stride + 2)),
285  CLIB_CACHE_LINE_BYTES, LOAD);
286  CLIB_PREFETCH ((void *) (fd + (next + stride + 3)),
287  CLIB_CACHE_LINE_BYTES, LOAD);
288  }
289 
290 #ifdef CLIB_HAVE_VEC256
291  if (n_rx_packets >= AVF_RX_VECTOR_SZ - 4 || next >= size - 4)
292  goto one_by_one;
293 
294  q1x4 = u64x4_gather ((void *) &d[0].qword[1], (void *) &d[1].qword[1],
295  (void *) &d[2].qword[1], (void *) &d[3].qword[1]);
296 
297  /* not all packets are ready or at least one of them is chained */
298  if (!u64x4_is_equal (q1x4 & dd_eop_mask4, dd_eop_mask4))
299  goto one_by_one;
300 
301  or_q1x4 |= q1x4;
302  u64x4_store_unaligned (q1x4, ptd->qw1s + n_rx_packets);
303  vlib_buffer_copy_indices (bi, rxq->bufs + next, 4);
304 
305  /* next */
306  next = (next + 4) & mask;
307  d = fd + next;
308  n_rx_packets += 4;
309  bi += 4;
310  continue;
311  one_by_one:
312 #endif
313  CLIB_PREFETCH ((void *) (fd + ((next + 8) & mask)),
314  CLIB_CACHE_LINE_BYTES, LOAD);
315 
316  if (avf_rxd_is_not_dd (d))
317  break;
318 
319  bi[0] = rxq->bufs[next];
320 
321  /* deal with chained buffers */
323  {
324  u16 tail_desc = 0;
325  u16 tail_next = next;
326  avf_rx_tail_t *tail = ptd->tails + n_rx_packets;
327  avf_rx_desc_t *td;
328  do
329  {
330  tail_next = (tail_next + 1) & mask;
331  td = fd + tail_next;
332 
333  /* bail out in case of incomplete transaction */
334  if (avf_rxd_is_not_dd (td))
335  goto no_more_desc;
336 
337  or_qw1 |= tail->qw1s[tail_desc] = td[0].qword[1];
338  tail->buffers[tail_desc] = rxq->bufs[tail_next];
339  tail_desc++;
340  }
341  while (avf_rxd_is_not_eop (td));
342  next = tail_next;
343  n_tail_desc += tail_desc;
344  }
345 
346  or_qw1 |= ptd->qw1s[n_rx_packets] = d[0].qword[1];
347 
348  /* next */
349  next = (next + 1) & mask;
350  d = fd + next;
351  n_rx_packets++;
352  bi++;
353  }
354 no_more_desc:
355 
356  if (n_rx_packets == 0)
357  goto done;
358 
359  rxq->next = next;
360  rxq->n_enqueued -= n_rx_packets + n_tail_desc;
361 
362 #ifdef CLIB_HAVE_VEC256
363  or_qw1 |= or_q1x4[0] | or_q1x4[1] | or_q1x4[2] | or_q1x4[3];
364 #endif
365 
366  vlib_get_buffers (vm, to_next, ptd->bufs, n_rx_packets);
367 
368  vnet_buffer (bt)->sw_if_index[VLIB_RX] = ad->sw_if_index;
369  vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0;
371  bt->ref_count = 1;
372 
373  if (n_tail_desc)
374  n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 1);
375  else
376  n_rx_bytes = avf_process_rx_burst (vm, node, ptd, n_rx_packets, 0);
377 
378  /* packet trace if enabled */
379  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
380  {
381  u32 n_left = n_rx_packets, i = 0, j;
382  bi = to_next;
383 
384  while (n_trace && n_left)
385  {
386  vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
387  if (PREDICT_TRUE
389  (vm, node, next_index, b, /* follow_chain */ 0)))
390  {
391  avf_input_trace_t *tr =
392  vlib_add_trace (vm, node, b, sizeof (*tr));
393  tr->next_index = next_index;
394  tr->qid = qid;
395  tr->hw_if_index = ad->hw_if_index;
396  tr->qw1s[0] = ptd->qw1s[i];
397  for (j = 1; j < AVF_RX_MAX_DESC_IN_CHAIN; j++)
398  tr->qw1s[j] = ptd->tails[i].qw1s[j - 1];
399 
400  n_trace--;
401  }
402 
403  /* next */
404  n_left--;
405  bi++;
406  i++;
407  }
408  vlib_set_trace_count (vm, node, n_trace);
409  }
410 
412  {
413  vlib_next_frame_t *nf;
414  vlib_frame_t *f;
416  nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
417  f = vlib_get_frame (vm, nf->frame);
419 
420  ef = vlib_frame_scalar_args (f);
421  ef->sw_if_index = ad->sw_if_index;
422  ef->hw_if_index = ad->hw_if_index;
423 
424  if ((or_qw1 & AVF_RXD_ERROR_IPE) == 0)
427  }
428 
429  n_left_to_next -= n_rx_packets;
430  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
431 
433  + VNET_INTERFACE_COUNTER_RX, thr_idx,
434  ad->hw_if_index, n_rx_packets, n_rx_bytes);
435 
436 done:
437  /* refill rx ring */
438  if (ad->flags & AVF_DEVICE_F_VA_DMA)
439  avf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */ );
440  else
441  avf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */ );
442 
443  return n_rx_packets;
444 }
445 
448 {
449  u32 n_rx = 0;
450  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
452 
454  {
455  avf_device_t *ad;
456  ad = avf_get_device (dq->dev_instance);
457  if ((ad->flags & AVF_DEVICE_F_ADMIN_UP) == 0)
458  continue;
459  n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id);
460  }
461  return n_rx;
462 }
463 
464 /* *INDENT-OFF* */
466  .name = "avf-input",
467  .sibling_of = "device-input",
468  .format_trace = format_avf_input_trace,
469  .type = VLIB_NODE_TYPE_INPUT,
470  .state = VLIB_NODE_STATE_DISABLED,
471  .n_errors = AVF_INPUT_N_ERROR,
472  .error_strings = avf_input_error_strings,
474 };
475 
476 /* *INDENT-ON* */
477 
478 
479 /*
480  * fd.io coding-style-patch-verification: ON
481  *
482  * Local Variables:
483  * eval: (c-set-style "gnu")
484  * End:
485  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
u32 hw_if_index
Definition: avf.h:191
static __clib_warn_unused_result u32 vlib_buffer_alloc_to_ring_from_pool(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers into ring from specific buffer pool.
Definition: buffer_funcs.h:722
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
#define foreach_avf_input_error
Definition: input.c:26
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:201
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:239
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
vnet_interface_main_t interface_main
Definition: vnet.h:65
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:457
#define PREDICT_TRUE(x)
Definition: clib.h:122
unsigned long u64
Definition: types.h:89
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:306
avf_input_error_t
Definition: input.c:29
static_always_inline uword avf_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, avf_device_t *ad, u16 qid)
Definition: input.c:234
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
Definition: node_funcs.h:269
u32 buffers[AVF_RX_MAX_DESC_IN_CHAIN - 1]
Definition: avf.h:263
vlib_main_t * vm
Definition: in2out_ed.c:1580
static_always_inline void vlib_get_buffers_with_offset(vlib_main_t *vm, u32 *bi, void **b, int count, i32 offset)
Translate array of buffer indices into buffer pointers with offset.
Definition: buffer_funcs.h:214
#define VLIB_NODE_FN(node)
Definition: node.h:203
#define AVF_RXD_STATUS_DD
Definition: avf.h:45
u16 mask
Definition: flow_types.api:52
format_function_t format_avf_input_trace
Definition: avf.h:308
vhost_vring_addr_t addr
Definition: vhost_user.h:111
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:133
u8 buffer_pool_index
Definition: avf.h:166
#define static_always_inline
Definition: clib.h:109
vlib_node_registration_t avf_input_node
(constructor) VLIB_REGISTER_NODE (avf_input_node)
Definition: input.c:465
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
Definition: ethernet.h:52
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:882
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
Definition: feature.h:336
volatile u32 * qrx_tail
Definition: avf.h:159
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:207
#define AVF_INPUT_REFILL_TRESHOLD
Definition: input.c:43
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:396
u64 qw1s[AVF_RX_MAX_DESC_IN_CHAIN]
Definition: avf.h:392
const cJSON *const b
Definition: cJSON.h:255
avf_rx_tail_t tails[AVF_RX_VECTOR_SZ]
Definition: avf.h:271
unsigned int u32
Definition: types.h:88
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
Definition: node_funcs.h:317
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
Definition: buffer_funcs.h:984
static_always_inline void avf_rxq_refill(vlib_main_t *vm, vlib_node_runtime_t *node, avf_rxq_t *rxq, int use_va_dma)
Definition: input.c:58
u16 next_index
Definition: avf.h:390
u32 hw_if_index
Definition: avf.h:391
u64 qw1s[AVF_RX_VECTOR_SZ]
Definition: avf.h:270
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
Definition: buffer_funcs.h:181
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:142
unsigned short u16
Definition: types.h:57
static_always_inline u64x4 u64x4_gather(void *p0, void *p1, void *p2, void *p3)
Definition: vector_avx2.h:243
#define AVF_RXD_LEN_SHIFT
Definition: avf.h:49
#define ETH_INPUT_FRAME_F_IP4_CKSUM_OK
Definition: ethernet.h:55
u32 size
Definition: vhost_user.h:106
u64 qword[4]
Definition: avf.h:134
#define PREDICT_FALSE(x)
Definition: clib.h:121
#define AVF_RX_VECTOR_SZ
Definition: avf.h:234
u32 node_index
Node index.
Definition: node.h:488
#define AVF_RXD_STATUS_EOP
Definition: avf.h:46
u8 slot
Definition: pci_types.api:22
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:170
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:219
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
static_always_inline int avf_rxd_is_not_dd(avf_rx_desc_t *d)
Definition: avf.h:382
Definition: avf.h:156
static_always_inline uword avf_process_rx_burst(vlib_main_t *vm, vlib_node_runtime_t *node, avf_per_thread_data_t *ptd, u32 n_left, int maybe_multiseg)
Definition: input.c:161
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:311
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
vlib_buffer_t * bufs[AVF_RX_VECTOR_SZ]
Definition: avf.h:269
static_always_inline avf_device_t * avf_get_device(u32 dev_instance)
Definition: avf.h:311
static_always_inline void avf_rx_desc_write(avf_rx_desc_t *d, u64 addr)
Definition: input.c:46
u32 per_interface_next_index
Definition: avf.h:187
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1580
#define ASSERT(truth)
u32 flags
Definition: avf.h:186
u32 * bufs
Definition: avf.h:163
vlib_frame_t * frame
Definition: node.h:406
u16 flags
Definition: node.h:388
vlib_buffer_t buffer_template
Definition: avf.h:272
u16 n_enqueued
Definition: avf.h:164
#define AVF_RXD_ERROR_IPE
Definition: avf.h:52
static uword pointer_to_uword(const void *p)
Definition: types.h:131
avf_main_t avf_main
Definition: device.c:43
#define clib_atomic_store_rel_n(a, b)
Definition: atomics.h:49
#define foreach_device_and_queue(var, vec)
Definition: devices.h:152
Definition: defs.h:47
static_always_inline void vlib_buffer_copy_indices(u32 *dst, u32 *src, u32 n_indices)
Definition: buffer_funcs.h:102
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1581
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:497
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
u16 size
Definition: avf.h:161
avf_rxq_t * rxqs
Definition: avf.h:198
u64x4
Definition: vector_avx2.h:121
#define vnet_buffer(b)
Definition: buffer.h:417
avf_per_thread_data_t * per_thread_data
Definition: avf.h:280
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:343
u32 sw_if_index
Definition: avf.h:190
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:634
u64 qw1s[AVF_RX_MAX_DESC_IN_CHAIN - 1]
Definition: avf.h:262
static void vlib_frame_no_append(vlib_frame_t *f)
Definition: node_funcs.h:277
static_always_inline int avf_rxd_is_not_eop(avf_rx_desc_t *d)
Definition: avf.h:376
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:215
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
static __clib_unused char * avf_input_error_strings[]
Definition: input.c:37
#define AVF_RX_MAX_DESC_IN_CHAIN
Definition: avf.h:50
u16 next
Definition: avf.h:160
volatile u8 ref_count
Reference count for this buffer.
Definition: buffer.h:130
static_always_inline uword avf_rx_attach_tail(vlib_main_t *vm, vlib_buffer_t *bt, vlib_buffer_t *b, u64 qw1, avf_rx_tail_t *t)
Definition: input.c:133
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
avf_rx_desc_t * descs
Definition: avf.h:162
Definition: defs.h:46