FD.io VPP  v19.08.3-2-gbabecb413
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/error.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/xxhash.h>
20 
21 #include <vnet/ethernet/ethernet.h>
22 #include <dpdk/buffer.h>
23 #include <dpdk/device/dpdk.h>
25 #include <vnet/mpls/packet.h>
26 #include <vnet/handoff.h>
27 #include <vnet/devices/devices.h>
28 #include <vnet/feature/feature.h>
29 
30 #include <dpdk/device/dpdk_priv.h>
31 
32 static char *dpdk_error_strings[] = {
33 #define _(n,s) s,
35 #undef _
36 };
37 
38 /* make sure all flags we need are stored in lower 8 bits */
39 STATIC_ASSERT ((PKT_RX_IP_CKSUM_BAD | PKT_RX_FDIR) <
40  256, "dpdk flags not un lower byte, fix needed");
41 
44  struct rte_mbuf *mb, vlib_buffer_t * bt)
45 {
46  u8 nb_seg = 1;
47  struct rte_mbuf *mb_seg = 0;
48  vlib_buffer_t *b_seg, *b_chain = 0;
49  mb_seg = mb->next;
50  b_chain = b;
51 
52  if (mb->nb_segs < 2)
53  return 0;
54 
55  b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
57 
58  while (nb_seg < mb->nb_segs)
59  {
60  ASSERT (mb_seg != 0);
61 
62  b_seg = vlib_buffer_from_rte_mbuf (mb_seg);
63  vlib_buffer_copy_template (b_seg, bt);
64 
65  /*
66  * The driver (e.g. virtio) may not put the packet data at the start
67  * of the segment, so don't assume b_seg->current_data == 0 is correct.
68  */
69  b_seg->current_data =
70  (mb_seg->buf_addr + mb_seg->data_off) - (void *) b_seg->data;
71 
72  b_seg->current_length = mb_seg->data_len;
73  b->total_length_not_including_first_buffer += mb_seg->data_len;
74 
75  b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
76  b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
77 
78  b_chain = b_seg;
79  mb_seg = mb_seg->next;
80  nb_seg++;
81  }
83 }
84 
86 dpdk_prefetch_mbuf_x4 (struct rte_mbuf *mb[])
87 {
88  CLIB_PREFETCH (mb[0], CLIB_CACHE_LINE_BYTES, LOAD);
89  CLIB_PREFETCH (mb[1], CLIB_CACHE_LINE_BYTES, LOAD);
90  CLIB_PREFETCH (mb[2], CLIB_CACHE_LINE_BYTES, LOAD);
91  CLIB_PREFETCH (mb[3], CLIB_CACHE_LINE_BYTES, LOAD);
92 }
93 
95 dpdk_prefetch_buffer_x4 (struct rte_mbuf *mb[])
96 {
97  vlib_buffer_t *b;
98  b = vlib_buffer_from_rte_mbuf (mb[0]);
100  b = vlib_buffer_from_rte_mbuf (mb[1]);
102  b = vlib_buffer_from_rte_mbuf (mb[2]);
104  b = vlib_buffer_from_rte_mbuf (mb[3]);
106 }
107 
108 /** \brief Main DPDK input node
109  @node dpdk-input
110 
111  This is the main DPDK input node: across each assigned interface,
112  call rte_eth_rx_burst(...) or similar to obtain a vector of
113  packets to process. Derive @c vlib_buffer_t metadata from
114  <code>struct rte_mbuf</code> metadata,
115  Depending on the resulting metadata: adjust <code>b->current_data,
116  b->current_length </code> and dispatch directly to
117  ip4-input-no-checksum, or ip6-input. Trace the packet if required.
118 
119  @param vm vlib_main_t corresponding to the current thread
120  @param node vlib_node_runtime_t
121  @param f vlib_frame_t input-node, not used.
122 
123  @par Graph mechanics: buffer metadata, next index usage
124 
125  @em Uses:
126  - <code>struct rte_mbuf mb->ol_flags</code>
127  - PKT_RX_IP_CKSUM_BAD
128 
129  @em Sets:
130  - <code>b->error</code> if the packet is to be dropped immediately
131  - <code>b->current_data, b->current_length</code>
132  - adjusted as needed to skip the L2 header in direct-dispatch cases
133  - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
134  - rx interface sw_if_index
135  - <code>vnet_buffer(b)->sw_if_index[VLIB_TX] = ~0</code>
136  - required by ipX-lookup
137  - <code>b->flags</code>
138  - to indicate multi-segment pkts (VLIB_BUFFER_NEXT_PRESENT), etc.
139 
140  <em>Next Nodes:</em>
141  - Static arcs to: error-drop, ethernet-input,
142  ip4-input-no-checksum, ip6-input, mpls-input
143  - per-interface redirection, controlled by
144  <code>xd->per_interface_next_index</code>
145 */
146 
148 dpdk_ol_flags_extract (struct rte_mbuf **mb, u16 * flags, int count)
149 {
150  u16 rv = 0;
151  int i;
152  for (i = 0; i < count; i++)
153  {
154  /* all flags we are interested in are in lower 8 bits but
155  that might change */
156  flags[i] = (u16) mb[i]->ol_flags;
157  rv |= flags[i];
158  }
159  return rv;
160 }
161 
164  uword n_rx_packets, int maybe_multiseg,
165  u16 * or_flagsp)
166 {
167  u32 n_left = n_rx_packets;
168  vlib_buffer_t *b[4];
169  struct rte_mbuf **mb = ptd->mbufs;
170  uword n_bytes = 0;
171  u16 *flags, or_flags = 0;
172  vlib_buffer_t bt;
173 
174  mb = ptd->mbufs;
175  flags = ptd->flags;
176 
177  /* copy template into local variable - will save per packet load */
179  while (n_left >= 8)
180  {
181  dpdk_prefetch_buffer_x4 (mb + 4);
182 
183  b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
184  b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
185  b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
186  b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
187 
188  vlib_buffer_copy_template (b[0], &bt);
189  vlib_buffer_copy_template (b[1], &bt);
190  vlib_buffer_copy_template (b[2], &bt);
191  vlib_buffer_copy_template (b[3], &bt);
192 
193  dpdk_prefetch_mbuf_x4 (mb + 4);
194 
195  or_flags |= dpdk_ol_flags_extract (mb, flags, 4);
196  flags += 4;
197 
198  b[0]->current_data = mb[0]->data_off - RTE_PKTMBUF_HEADROOM;
199  n_bytes += b[0]->current_length = mb[0]->data_len;
200 
201  b[1]->current_data = mb[1]->data_off - RTE_PKTMBUF_HEADROOM;
202  n_bytes += b[1]->current_length = mb[1]->data_len;
203 
204  b[2]->current_data = mb[2]->data_off - RTE_PKTMBUF_HEADROOM;
205  n_bytes += b[2]->current_length = mb[2]->data_len;
206 
207  b[3]->current_data = mb[3]->data_off - RTE_PKTMBUF_HEADROOM;
208  n_bytes += b[3]->current_length = mb[3]->data_len;
209 
210  if (maybe_multiseg)
211  {
212  n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], &bt);
213  n_bytes += dpdk_process_subseq_segs (vm, b[1], mb[1], &bt);
214  n_bytes += dpdk_process_subseq_segs (vm, b[2], mb[2], &bt);
215  n_bytes += dpdk_process_subseq_segs (vm, b[3], mb[3], &bt);
216  }
217 
222 
223  /* next */
224  mb += 4;
225  n_left -= 4;
226  }
227 
228  while (n_left)
229  {
230  b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
231  vlib_buffer_copy_template (b[0], &bt);
232  or_flags |= dpdk_ol_flags_extract (mb, flags, 1);
233  flags += 1;
234 
235  b[0]->current_data = mb[0]->data_off - RTE_PKTMBUF_HEADROOM;
236  n_bytes += b[0]->current_length = mb[0]->data_len;
237 
238  if (maybe_multiseg)
239  n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], &bt);
241 
242  /* next */
243  mb += 1;
244  n_left -= 1;
245  }
246 
247  *or_flagsp = or_flags;
248  return n_bytes;
249 }
250 
253  uword n_rx_packets)
254 {
255  uword n;
257  vlib_buffer_t *b0;
258 
259  /* TODO prefetch and quad-loop */
260  for (n = 0; n < n_rx_packets; n++)
261  {
262  if ((ptd->flags[n] & PKT_RX_FDIR_ID) == 0)
263  continue;
264 
266  ptd->mbufs[n]->hash.fdir.hi);
267 
268  if (fle->next_index != (u16) ~ 0)
269  ptd->next[n] = fle->next_index;
270 
271  if (fle->flow_id != ~0)
272  {
273  b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
274  b0->flow_id = fle->flow_id;
275  }
276 
277  if (fle->buffer_advance != ~0)
278  {
279  b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
281  }
282  }
283 }
284 
287  vlib_node_runtime_t * node, u32 thread_index, u16 queue_id)
288 {
289  uword n_rx_packets = 0, n_rx_bytes;
290  u32 n_left, n_trace;
291  u32 *buffers;
293  struct rte_mbuf **mb;
294  vlib_buffer_t *b0;
295  u16 *next;
296  u16 or_flags;
297  u32 n;
298  int single_next = 0;
299 
301  thread_index);
302  vlib_buffer_t *bt = &ptd->buffer_template;
303 
304  if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
305  return 0;
306 
307  /* get up to DPDK_RX_BURST_SZ buffers from PMD */
308  while (n_rx_packets < DPDK_RX_BURST_SZ)
309  {
310  n = rte_eth_rx_burst (xd->port_id, queue_id,
311  ptd->mbufs + n_rx_packets,
312  DPDK_RX_BURST_SZ - n_rx_packets);
313  n_rx_packets += n;
314 
315  if (n < 32)
316  break;
317  }
318 
319  if (n_rx_packets == 0)
320  return 0;
321 
322  /* Update buffer template */
323  vnet_buffer (bt)->sw_if_index[VLIB_RX] = xd->sw_if_index;
324  bt->error = node->errors[DPDK_ERROR_NONE];
325  /* as DPDK is allocating empty buffers from mempool provided before interface
326  start for each queue, it is safe to store this in the template */
327  bt->buffer_pool_index = xd->buffer_pool_for_queue[queue_id];
328  bt->ref_count = 1;
329  vnet_buffer (bt)->feature_arc_index = 0;
330  bt->current_config_index = 0;
331 
332  /* receive burst of packets from DPDK PMD */
333  if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
334  next_index = xd->per_interface_next_index;
335 
336  /* as all packets belong to the same interface feature arc lookup
337  can be don once and result stored in the buffer template */
339  vnet_feature_start_device_input_x1 (xd->sw_if_index, &next_index, bt);
340 
341  if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG)
342  n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 1, &or_flags);
343  else
344  n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 0, &or_flags);
345 
346  if (PREDICT_FALSE (or_flags & PKT_RX_FDIR))
347  {
348  /* some packets will need to go to different next nodes */
349  for (n = 0; n < n_rx_packets; n++)
350  ptd->next[n] = next_index;
351 
352  /* flow offload - process if rx flow offload enabled and at least one
353  packet is marked */
354  if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) &&
355  (or_flags & PKT_RX_FDIR)))
356  dpdk_process_flow_offload (xd, ptd, n_rx_packets);
357 
358  /* enqueue buffers to the next node */
359  vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs,
360  ptd->buffers, n_rx_packets,
361  sizeof (struct rte_mbuf));
362 
363  vlib_buffer_enqueue_to_next (vm, node, ptd->buffers, ptd->next,
364  n_rx_packets);
365  }
366  else
367  {
368  u32 *to_next, n_left_to_next;
369 
370  vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
371  vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs, to_next,
372  n_rx_packets,
373  sizeof (struct rte_mbuf));
374 
376  {
377  vlib_next_frame_t *nf;
378  vlib_frame_t *f;
380  nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
381  f = vlib_get_frame (vm, nf->frame);
383 
384  ef = vlib_frame_scalar_args (f);
385  ef->sw_if_index = xd->sw_if_index;
386  ef->hw_if_index = xd->hw_if_index;
387 
388  /* if PMD supports ip4 checksum check and there are no packets
389  marked as ip4 checksum bad we can notify ethernet input so it
390  can send pacets to ip4-input-no-checksum node */
391  if (xd->flags & DPDK_DEVICE_FLAG_RX_IP4_CKSUM &&
392  (or_flags & PKT_RX_IP_CKSUM_BAD) == 0)
395  }
396  n_left_to_next -= n_rx_packets;
397  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
398  single_next = 1;
399  }
400 
401  /* packet trace if enabled */
402  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
403  {
404  if (single_next)
405  vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs,
406  ptd->buffers, n_rx_packets,
407  sizeof (struct rte_mbuf));
408 
409  n_left = n_rx_packets;
410  buffers = ptd->buffers;
411  mb = ptd->mbufs;
412  next = ptd->next;
413 
414  while (n_trace && n_left)
415  {
416  b0 = vlib_get_buffer (vm, buffers[0]);
417  if (single_next == 0)
418  next_index = next[0];
419  vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 0);
420 
421  dpdk_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof t0[0]);
422  t0->queue_index = queue_id;
423  t0->device_index = xd->device_index;
424  t0->buffer_index = vlib_get_buffer_index (vm, b0);
425 
426  clib_memcpy_fast (&t0->mb, mb[0], sizeof t0->mb);
427  clib_memcpy_fast (&t0->buffer, b0,
428  sizeof b0[0] - sizeof b0->pre_data);
430  sizeof t0->buffer.pre_data);
431  clib_memcpy_fast (&t0->data, mb[0]->buf_addr + mb[0]->data_off,
432  sizeof t0->data);
433  n_trace--;
434  n_left--;
435  buffers++;
436  mb++;
437  next++;
438  }
439  vlib_set_trace_count (vm, node, n_trace);
440  }
441 
443  (vnet_get_main ()->interface_main.combined_sw_if_counters
444  + VNET_INTERFACE_COUNTER_RX, thread_index, xd->sw_if_index,
445  n_rx_packets, n_rx_bytes);
446 
447  vnet_device_increment_rx_packets (thread_index, n_rx_packets);
448 
449  return n_rx_packets;
450 }
451 
453  vlib_frame_t * f)
454 {
455  dpdk_main_t *dm = &dpdk_main;
456  dpdk_device_t *xd;
457  uword n_rx_packets = 0;
458  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
460  u32 thread_index = node->thread_index;
461 
462  /*
463  * Poll all devices on this cpu for input/interrupts.
464  */
465  /* *INDENT-OFF* */
467  {
468  xd = vec_elt_at_index(dm->devices, dq->dev_instance);
469  n_rx_packets += dpdk_device_input (vm, dm, xd, node, thread_index,
470  dq->queue_id);
471  }
472  /* *INDENT-ON* */
473  return n_rx_packets;
474 }
475 
476 /* *INDENT-OFF* */
478  .type = VLIB_NODE_TYPE_INPUT,
479  .name = "dpdk-input",
480  .sibling_of = "device-input",
482 
483  /* Will be enabled if/when hardware is detected. */
484  .state = VLIB_NODE_STATE_DISABLED,
485 
486  .format_buffer = format_ethernet_header_with_length,
487  .format_trace = format_dpdk_rx_trace,
488 
489  .n_errors = DPDK_N_ERROR,
490  .error_strings = dpdk_error_strings,
491 };
492 /* *INDENT-ON* */
493 
494 /*
495  * fd.io coding-style-patch-verification: ON
496  *
497  * Local Variables:
498  * eval: (c-set-style "gnu")
499  * End:
500  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
static void vnet_device_increment_rx_packets(u32 thread_index, u64 count)
Definition: devices.h:110
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
#define vlib_buffer_from_rte_mbuf(x)
Definition: buffer.h:20
u32 flags
Definition: vhost_user.h:141
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:187
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
dpdk_main_t dpdk_main
Definition: init.c:45
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
#define PREDICT_TRUE(x)
Definition: clib.h:113
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
u32 sw_if_index
Definition: dpdk.h:201
#define foreach_dpdk_error
Definition: dpdk.h:460
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:306
u16 flags
Definition: dpdk.h:209
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
Definition: node_funcs.h:216
int i
u32 per_interface_next_index
Definition: dpdk.h:204
#define VLIB_NODE_FN(node)
Definition: node.h:202
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
unsigned char u8
Definition: types.h:56
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:133
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:130
#define static_always_inline
Definition: clib.h:100
dpdk_portid_t port_id
Definition: dpdk.h:198
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
Definition: ethernet.h:52
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
Definition: feature.h:308
static_always_inline void dpdk_prefetch_buffer_x4(struct rte_mbuf *mb[])
Definition: node.c:95
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:343
unsigned int u32
Definition: types.h:88
i16 buffer_advance
Definition: dpdk.h:186
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
Definition: node_funcs.h:264
static_always_inline void dpdk_prefetch_mbuf_x4(struct rte_mbuf *mb[])
Definition: node.c:86
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
Definition: buffer_funcs.h:257
#define DPDK_RX_BURST_SZ
Definition: dpdk.h:379
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
static_always_inline uword dpdk_process_subseq_segs(vlib_main_t *vm, vlib_buffer_t *b, struct rte_mbuf *mb, vlib_buffer_t *bt)
Definition: node.c:43
static_always_inline void dpdk_process_flow_offload(dpdk_device_t *xd, dpdk_per_thread_data_t *ptd, uword n_rx_packets)
Definition: node.c:252
dpdk_per_thread_data_t * per_thread_data
Definition: dpdk.h:398
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
Definition: buffer_funcs.h:145
unsigned short u16
Definition: types.h:57
#define ETH_INPUT_FRAME_F_IP4_CKSUM_OK
Definition: ethernet.h:55
vlib_buffer_t buffer_template
Definition: dpdk.h:389
#define PREDICT_FALSE(x)
Definition: clib.h:112
static_always_inline void vlib_get_buffer_indices_with_offset(vlib_main_t *vm, void **b, u32 *bi, uword count, i32 offset)
Translate array of buffer pointers into buffer indices with offset.
Definition: buffer_funcs.h:276
static char * dpdk_error_strings[]
Definition: node.c:32
u32 hw_if_index
Definition: dpdk.h:200
static_always_inline uword dpdk_process_rx_burst(vlib_main_t *vm, dpdk_per_thread_data_t *ptd, uword n_rx_packets, int maybe_multiseg, u16 *or_flagsp)
Definition: node.c:163
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:97
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 flags[DPDK_RX_BURST_SZ]
Definition: dpdk.h:388
dpdk_device_t * devices
Definition: dpdk.h:396
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:323
dpdk_flow_lookup_entry_t * flow_lookup_entries
Definition: dpdk.h:232
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:332
u32 flow_id
Generic flow identifier.
Definition: buffer.h:127
u8 data[]
Packet data.
Definition: buffer.h:181
vlib_node_registration_t dpdk_input_node
(constructor) VLIB_REGISTER_NODE (dpdk_input_node)
Definition: node.c:477
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:258
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:147
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:456
u32 buffers[DPDK_RX_BURST_SZ]
Definition: dpdk.h:385
u16 device_index
Definition: dpdk.h:445
#define ASSERT(truth)
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:178
u16 next[DPDK_RX_BURST_SZ]
Definition: dpdk.h:386
vlib_frame_t * frame
Definition: node.h:406
static_always_inline u32 dpdk_device_input(vlib_main_t *vm, dpdk_main_t *dm, dpdk_device_t *xd, vlib_node_runtime_t *node, u32 thread_index, u16 queue_id)
Definition: node.c:286
u16 flags
Definition: node.h:388
vlib_buffer_t buffer
Definition: dpdk.h:448
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
STATIC_ASSERT(STRUCT_OFFSET_OF(vnet_buffer_opaque_t, l2_hdr_offset)==STRUCT_OFFSET_OF(vnet_buffer_opaque_t, l3_hdr_offset) - 2, "l3_hdr_offset must follow l2_hdr_offset")
size_t count
Definition: vapi.c:47
dpdk_portid_t device_index
Definition: dpdk.h:195
u32 buffer_index
Definition: dpdk.h:444
format_function_t format_dpdk_rx_trace
Definition: dpdk.h:492
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
Definition: dpdk.h:182
struct rte_mbuf * mbufs[DPDK_RX_BURST_SZ]
Definition: dpdk.h:384
u8 data[256]
Definition: dpdk.h:449
#define foreach_device_and_queue(var, vec)
Definition: devices.h:161
static_always_inline u16 dpdk_ol_flags_extract(struct rte_mbuf **mb, u16 *flags, int count)
Main DPDK input node.
Definition: node.c:148
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:489
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
u16 next_index
Definition: dpdk.h:185
#define vnet_buffer(b)
Definition: buffer.h:365
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:315
static void vlib_frame_no_append(vlib_frame_t *f)
Definition: node_funcs.h:224
struct rte_mbuf mb
Definition: dpdk.h:447
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:203
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
volatile u8 ref_count
Reference count for this buffer.
Definition: buffer.h:130
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
u8 * buffer_pool_for_queue
Definition: dpdk.h:225
u32 flow_id
Definition: dpdk.h:184
Definition: defs.h:46
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
u16 queue_index
Definition: dpdk.h:446