FD.io VPP  v18.07.1-19-g511ce25
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/error.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/xxhash.h>
20 
21 #include <vnet/ethernet/ethernet.h>
22 #include <dpdk/device/dpdk.h>
24 #include <vnet/mpls/packet.h>
25 #include <vnet/handoff.h>
26 #include <vnet/devices/devices.h>
27 #include <vnet/feature/feature.h>
28 
29 #include <dpdk/device/dpdk_priv.h>
30 
31 #ifndef CLIB_MARCH_VARIANT
32 static char *dpdk_error_strings[] = {
33 #define _(n,s) s,
35 #undef _
36 };
37 #endif
38 
41  "IP4_INPUT must follow IP4_NCS_INPUT");
42 
43 enum
44 {
48 };
49 
50 /* currently we are just copying bit positions from DPDK, but that
51  might change in future, in case we strart to be interested in something
52  stored in upper bytes. Curently we store only lower byte for perf reasons */
53 STATIC_ASSERT (1 << DPDK_RX_F_CKSUM_GOOD == PKT_RX_IP_CKSUM_GOOD, "");
54 STATIC_ASSERT (1 << DPDK_RX_F_CKSUM_BAD == PKT_RX_IP_CKSUM_BAD, "");
55 STATIC_ASSERT (1 << DPDK_RX_F_FDIR == PKT_RX_FDIR, "");
56 STATIC_ASSERT ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | PKT_RX_FDIR) <
57  256, "dpdk flags not un lower byte, fix needed");
58 
61 {
62  if (PREDICT_TRUE (etype == clib_host_to_net_u16 (ETHERNET_TYPE_IP4)))
63  {
64  /* keep it branchless */
65  u32 is_good = (flags >> DPDK_RX_F_CKSUM_GOOD) & 1;
66  return VNET_DEVICE_INPUT_NEXT_IP4_INPUT - is_good;
67  }
68  else if (PREDICT_TRUE (etype == clib_host_to_net_u16 (ETHERNET_TYPE_IP6)))
70  else if (PREDICT_TRUE (etype == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS)))
72  else
74 }
75 
78  struct rte_mbuf * mb, vlib_buffer_free_list_t * fl)
79 {
80  u8 nb_seg = 1;
81  struct rte_mbuf *mb_seg = 0;
82  vlib_buffer_t *b_seg, *b_chain = 0;
83  mb_seg = mb->next;
84  b_chain = b;
85 
86  if (mb->nb_segs < 2)
87  return 0;
88 
89  b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
91 
92  while (nb_seg < mb->nb_segs)
93  {
94  ASSERT (mb_seg != 0);
95 
96  b_seg = vlib_buffer_from_rte_mbuf (mb_seg);
98 
99  ASSERT ((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
100  ASSERT (b_seg->current_data == 0);
101 
102  /*
103  * The driver (e.g. virtio) may not put the packet data at the start
104  * of the segment, so don't assume b_seg->current_data == 0 is correct.
105  */
106  b_seg->current_data =
107  (mb_seg->buf_addr + mb_seg->data_off) - (void *) b_seg->data;
108 
109  b_seg->current_length = mb_seg->data_len;
110  b->total_length_not_including_first_buffer += mb_seg->data_len;
111 
112  b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
113  b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
114 
115  b_chain = b_seg;
116  mb_seg = mb_seg->next;
117  nb_seg++;
118  }
120 }
121 
123 dpdk_prefetch_mbuf_x4 (struct rte_mbuf *mb[])
124 {
125  CLIB_PREFETCH (mb[0], CLIB_CACHE_LINE_BYTES, LOAD);
126  CLIB_PREFETCH (mb[1], CLIB_CACHE_LINE_BYTES, LOAD);
127  CLIB_PREFETCH (mb[2], CLIB_CACHE_LINE_BYTES, LOAD);
128  CLIB_PREFETCH (mb[3], CLIB_CACHE_LINE_BYTES, LOAD);
129 }
130 
132 dpdk_prefetch_buffer_x4 (struct rte_mbuf *mb[])
133 {
134  vlib_buffer_t *b;
135  b = vlib_buffer_from_rte_mbuf (mb[0]);
137  b = vlib_buffer_from_rte_mbuf (mb[1]);
139  b = vlib_buffer_from_rte_mbuf (mb[2]);
141  b = vlib_buffer_from_rte_mbuf (mb[3]);
143 }
144 
146 dpdk_prefetch_buffer_data_x4 (struct rte_mbuf *mb[])
147 {
148  vlib_buffer_t *b;
149  b = vlib_buffer_from_rte_mbuf (mb[0]);
151  b = vlib_buffer_from_rte_mbuf (mb[1]);
153  b = vlib_buffer_from_rte_mbuf (mb[2]);
155  b = vlib_buffer_from_rte_mbuf (mb[3]);
157 }
158 
159 /** \brief Main DPDK input node
160  @node dpdk-input
161 
162  This is the main DPDK input node: across each assigned interface,
163  call rte_eth_rx_burst(...) or similar to obtain a vector of
164  packets to process. Derive @c vlib_buffer_t metadata from
165  <code>struct rte_mbuf</code> metadata,
166  Depending on the resulting metadata: adjust <code>b->current_data,
167  b->current_length </code> and dispatch directly to
168  ip4-input-no-checksum, or ip6-input. Trace the packet if required.
169 
170  @param vm vlib_main_t corresponding to the current thread
171  @param node vlib_node_runtime_t
172  @param f vlib_frame_t input-node, not used.
173 
174  @par Graph mechanics: buffer metadata, next index usage
175 
176  @em Uses:
177  - <code>struct rte_mbuf mb->ol_flags</code>
178  - PKT_RX_IP_CKSUM_BAD
179 
180  @em Sets:
181  - <code>b->error</code> if the packet is to be dropped immediately
182  - <code>b->current_data, b->current_length</code>
183  - adjusted as needed to skip the L2 header in direct-dispatch cases
184  - <code>vnet_buffer(b)->sw_if_index[VLIB_RX]</code>
185  - rx interface sw_if_index
186  - <code>vnet_buffer(b)->sw_if_index[VLIB_TX] = ~0</code>
187  - required by ipX-lookup
188  - <code>b->flags</code>
189  - to indicate multi-segment pkts (VLIB_BUFFER_NEXT_PRESENT), etc.
190 
191  <em>Next Nodes:</em>
192  - Static arcs to: error-drop, ethernet-input,
193  ip4-input-no-checksum, ip6-input, mpls-input
194  - per-interface redirection, controlled by
195  <code>xd->per_interface_next_index</code>
196 */
197 
199 dpdk_ol_flags_extract (struct rte_mbuf **mb, u8 * flags, int count)
200 {
201  u8 rv = 0;
202  int i;
203  for (i = 0; i < count; i++)
204  {
205  /* all flags we are interested in are in lower 8 bits but
206  that might change */
207  flags[i] = (u8) mb[i]->ol_flags;
208  rv |= flags[i];
209  }
210  return rv;
211 }
212 
215  uword n_rx_packets, int maybe_multiseg, u8 * or_flagsp)
216 {
217  u32 n_left = n_rx_packets;
218  vlib_buffer_t *b[4];
220  struct rte_mbuf **mb = ptd->mbufs;
221  uword n_bytes = 0;
222  i16 off;
223  u8 *flags, or_flags = 0;
224  u16 *next;
225 
227 
228  mb = ptd->mbufs;
229  flags = ptd->flags;
230  next = ptd->next;
231 
232  while (n_left >= 8)
233  {
234  CLIB_PREFETCH (mb + 8, CLIB_CACHE_LINE_BYTES, LOAD);
235 
236  dpdk_prefetch_buffer_x4 (mb + 4);
237 
238  b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
239  b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
240  b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
241  b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
242 
243  clib_memcpy64_x4 (b[0], b[1], b[2], b[3], &ptd->buffer_template);
244 
245  dpdk_prefetch_mbuf_x4 (mb + 4);
246 
247  or_flags |= dpdk_ol_flags_extract (mb, flags, 4);
248  flags += 4;
249 
250  /* we temporary store relative offset of ethertype into next[x]
251  so we can prefetch and get it faster later */
252 
253  off = mb[0]->data_off;
254  next[0] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
255  off -= RTE_PKTMBUF_HEADROOM;
256  vnet_buffer (b[0])->l2_hdr_offset = off;
257  b[0]->current_data = off;
258 
259  off = mb[1]->data_off;
260  next[1] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
261  off -= RTE_PKTMBUF_HEADROOM;
262  vnet_buffer (b[1])->l2_hdr_offset = off;
263  b[1]->current_data = off;
264 
265  off = mb[2]->data_off;
266  next[2] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
267  off -= RTE_PKTMBUF_HEADROOM;
268  vnet_buffer (b[2])->l2_hdr_offset = off;
269  b[2]->current_data = off;
270 
271  off = mb[3]->data_off;
272  next[3] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
273  off -= RTE_PKTMBUF_HEADROOM;
274  vnet_buffer (b[3])->l2_hdr_offset = off;
275  b[3]->current_data = off;
276 
277  b[0]->current_length = mb[0]->data_len;
278  b[1]->current_length = mb[1]->data_len;
279  b[2]->current_length = mb[2]->data_len;
280  b[3]->current_length = mb[3]->data_len;
281 
282  n_bytes += mb[0]->data_len;
283  n_bytes += mb[1]->data_len;
284  n_bytes += mb[2]->data_len;
285  n_bytes += mb[3]->data_len;
286 
287  if (maybe_multiseg)
288  {
289  n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], fl);
290  n_bytes += dpdk_process_subseq_segs (vm, b[1], mb[1], fl);
291  n_bytes += dpdk_process_subseq_segs (vm, b[2], mb[2], fl);
292  n_bytes += dpdk_process_subseq_segs (vm, b[3], mb[3], fl);
293  }
294 
299 
300  /* next */
301  mb += 4;
302  n_left -= 4;
303  next += 4;
304  }
305 
306  while (n_left)
307  {
308  b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
309  clib_memcpy (b[0], &ptd->buffer_template, 64);
310  or_flags |= dpdk_ol_flags_extract (mb, flags, 1);
311  flags += 1;
312 
313  off = mb[0]->data_off;
314  next[0] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
315  off -= RTE_PKTMBUF_HEADROOM;
316  vnet_buffer (b[0])->l2_hdr_offset = off;
317  b[0]->current_data = off;
318  b[0]->current_length = mb[0]->data_len;
319  n_bytes += mb[0]->data_len;
320  if (maybe_multiseg)
321  n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], fl);
323 
324  /* next */
325  mb += 1;
326  n_left -= 1;
327  next += 1;
328  }
329 
330  *or_flagsp = or_flags;
331  return n_bytes;
332 }
333 
336  dpdk_per_thread_data_t * ptd, uword n_rx_packets)
337 {
338  vlib_buffer_t *b[4];
339  i16 adv[4];
340  u16 etype[4];
341  struct rte_mbuf **mb = ptd->mbufs;
342  u8 *flags = ptd->flags;
343  u16 *next = ptd->next;
344  u32 n_left = n_rx_packets;
345 
346  while (n_left >= 12)
347  {
349  dpdk_prefetch_buffer_x4 (mb + 8);
350 
351  b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
352  b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
353  b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
354  b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
355  etype[0] = *(u16 *) ((u8 *) mb[0] + next[0] + sizeof (vlib_buffer_t));
356  etype[1] = *(u16 *) ((u8 *) mb[1] + next[1] + sizeof (vlib_buffer_t));
357  etype[2] = *(u16 *) ((u8 *) mb[2] + next[2] + sizeof (vlib_buffer_t));
358  etype[3] = *(u16 *) ((u8 *) mb[3] + next[3] + sizeof (vlib_buffer_t));
359  next[0] = dpdk_rx_next (node, etype[0], flags[0]);
360  next[1] = dpdk_rx_next (node, etype[1], flags[1]);
361  next[2] = dpdk_rx_next (node, etype[2], flags[2]);
362  next[3] = dpdk_rx_next (node, etype[3], flags[3]);
363  adv[0] = device_input_next_node_advance[next[0]];
364  adv[1] = device_input_next_node_advance[next[1]];
365  adv[2] = device_input_next_node_advance[next[2]];
366  adv[3] = device_input_next_node_advance[next[3]];
367  b[0]->current_data += adv[0];
368  b[1]->current_data += adv[1];
369  b[2]->current_data += adv[2];
370  b[3]->current_data += adv[3];
371  b[0]->current_length -= adv[0];
372  b[1]->current_length -= adv[1];
373  b[2]->current_length -= adv[2];
374  b[3]->current_length -= adv[3];
375 
376  /* next */
377  next += 4;
378  mb += 4;
379  n_left -= 4;
380  flags += 4;
381  }
382 
383  while (n_left)
384  {
385  b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
386  next[0] = *(u16 *) ((u8 *) mb[0] + next[0] + sizeof (vlib_buffer_t));
387  next[0] = dpdk_rx_next (node, next[0], flags[0]);
388  adv[0] = device_input_next_node_advance[next[0]];
389  b[0]->current_data += adv[0];
390  b[0]->current_length -= adv[0];
391 
392  /* next */
393  next += 1;
394  mb += 1;
395  n_left -= 1;
396  flags += 1;
397  }
398 }
399 
402  uword n_rx_packets)
403 {
404  uword n;
406  vlib_buffer_t *b0;
407 
408  /* TODO prefetch and quad-loop */
409  for (n = 0; n < n_rx_packets; n++)
410  {
411  if ((ptd->flags[n] & (1 << DPDK_RX_F_FDIR)) == 0)
412  continue;
413 
415  ptd->mbufs[n]->hash.fdir.hi);
416 
417  if (fle->next_index != (u16) ~ 0)
418  ptd->next[n] = fle->next_index;
419 
420  if (fle->flow_id != ~0)
421  {
422  b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
423  b0->flow_id = fle->flow_id;
424  }
425 
426  if (fle->buffer_advance != ~0)
427  {
428  b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
430  }
431  }
432 }
433 
436  vlib_node_runtime_t * node, u32 thread_index, u16 queue_id)
437 {
438  uword n_rx_packets = 0, n_rx_bytes;
439  u32 n_left, n_trace;
440  u32 *buffers;
442  struct rte_mbuf **mb;
443  vlib_buffer_t *b0;
444  int known_next = 0;
445  u16 *next;
446  u8 or_flags;
447  u32 n;
448 
450  thread_index);
451  vlib_buffer_t *bt = &ptd->buffer_template;
452 
453  if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
454  return 0;
455 
456  /* get up to DPDK_RX_BURST_SZ buffers from PMD */
457  while (n_rx_packets < DPDK_RX_BURST_SZ)
458  {
459  n = rte_eth_rx_burst (xd->port_id, queue_id,
460  ptd->mbufs + n_rx_packets,
461  DPDK_RX_BURST_SZ - n_rx_packets);
462  n_rx_packets += n;
463 
464  if (n < 32)
465  break;
466  }
467 
468  if (n_rx_packets == 0)
469  return 0;
470 
471  /* Update buffer template */
472  vnet_buffer (bt)->sw_if_index[VLIB_RX] = xd->sw_if_index;
473  bt->error = node->errors[DPDK_ERROR_NONE];
474  /* as DPDK is allocating empty buffers from mempool provided before interface
475  start for each queue, it is safe to store this in the template */
476  bt->buffer_pool_index = xd->buffer_pool_for_queue[queue_id];
477 
478  /* receive burst of packets from DPDK PMD */
479  if (PREDICT_FALSE (xd->per_interface_next_index != ~0))
480  {
481  known_next = 1;
482  next_index = xd->per_interface_next_index;
483  }
484 
485  /* as all packets belong to the same interface feature arc lookup
486  can be don once and result stored in the buffer template */
488  {
489  vnet_feature_start_device_input_x1 (xd->sw_if_index, &next_index, bt);
490  known_next = 1;
491  }
492 
493  if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG)
494  n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 1, &or_flags);
495  else
496  n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 0, &or_flags);
497 
498  if (PREDICT_FALSE (known_next))
499  {
500  for (n = 0; n < n_rx_packets; n++)
501  ptd->next[n] = next_index;
502 
503  vnet_buffer (bt)->feature_arc_index = 0;
504  bt->current_config_index = 0;
505  }
506  else
507  dpdk_set_next_from_etype (vm, node, ptd, n_rx_packets);
508 
509  /* flow offload - process if rx flow offlaod enabled and at least one packet
510  is marked */
511  if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) &&
512  (or_flags & (1 << DPDK_RX_F_FDIR))))
513  dpdk_process_flow_offload (xd, ptd, n_rx_packets);
514 
515  /* is at least one packet marked as ip4 checksum bad? */
516  if (PREDICT_FALSE (or_flags & (1 << DPDK_RX_F_CKSUM_BAD)))
517  for (n = 0; n < n_rx_packets; n++)
518  {
519  if ((ptd->flags[n] & (1 << DPDK_RX_F_CKSUM_BAD)) == 0)
520  continue;
521  if (ptd->next[n] != VNET_DEVICE_INPUT_NEXT_IP4_INPUT)
522  continue;
523 
524  b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
525  b0->error = node->errors[DPDK_ERROR_IP_CHECKSUM_ERROR];
527  }
528 
529  /* enqueue buffers to the next node */
530  vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs, ptd->buffers,
531  n_rx_packets,
532  sizeof (struct rte_mbuf));
533 
534  vlib_buffer_enqueue_to_next (vm, node, ptd->buffers, ptd->next,
535  n_rx_packets);
536 
537  /* packet trace if enabled */
538  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
539  {
540  n_left = n_rx_packets;
541  buffers = ptd->buffers;
542  mb = ptd->mbufs;
543  next = ptd->next;
544  while (n_trace && n_left)
545  {
546  b0 = vlib_get_buffer (vm, buffers[0]);
547  vlib_trace_buffer (vm, node, next[0], b0, /* follow_chain */ 0);
548 
549  dpdk_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof t0[0]);
550  t0->queue_index = queue_id;
551  t0->device_index = xd->device_index;
552  t0->buffer_index = vlib_get_buffer_index (vm, b0);
553 
554  clib_memcpy (&t0->mb, mb[0], sizeof t0->mb);
555  clib_memcpy (&t0->buffer, b0, sizeof b0[0] - sizeof b0->pre_data);
556  clib_memcpy (t0->buffer.pre_data, b0->data,
557  sizeof t0->buffer.pre_data);
558  clib_memcpy (&t0->data, mb[0]->buf_addr + mb[0]->data_off,
559  sizeof t0->data);
560  n_trace--;
561  n_left--;
562  buffers++;
563  mb++;
564  next++;
565  }
566  vlib_set_trace_count (vm, node, n_trace);
567  }
568 
569  /* rx pcap capture if enabled */
571  {
572  u32 bi0;
573  n_left = n_rx_packets;
574  buffers = ptd->buffers;
575  while (n_left)
576  {
577  bi0 = buffers[0];
578  b0 = vlib_get_buffer (vm, bi0);
579  buffers++;
580 
581  if (dm->pcap[VLIB_RX].pcap_sw_if_index == 0 ||
583  == vnet_buffer (b0)->sw_if_index[VLIB_RX])
584  {
585  struct rte_mbuf *mb;
586  i16 data_start;
587  i32 temp_advance;
588 
589  /*
590  * Note: current_data will have advanced
591  * when we skip ethernet input.
592  * Temporarily back up to the original DMA
593  * target, so we capture a valid ethernet frame
594  */
595  mb = rte_mbuf_from_vlib_buffer (b0);
596 
597  /* Figure out the original data_start */
598  data_start = (mb->buf_addr + mb->data_off) - (void *) b0->data;
599  /* Back up that far */
600  temp_advance = b0->current_data - data_start;
601  vlib_buffer_advance (b0, -temp_advance);
602  /* Trace the packet */
603  pcap_add_buffer (&dm->pcap[VLIB_RX].pcap_main, vm, bi0, 512);
604  /* and advance again */
605  vlib_buffer_advance (b0, temp_advance);
606  }
607  n_left--;
608  }
609  }
610 
612  (vnet_get_main ()->interface_main.combined_sw_if_counters
613  + VNET_INTERFACE_COUNTER_RX, thread_index, xd->sw_if_index,
614  n_rx_packets, n_rx_bytes);
615 
616  vnet_device_increment_rx_packets (thread_index, n_rx_packets);
617 
618  return n_rx_packets;
619 }
620 
621 VLIB_NODE_FN (dpdk_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
622  vlib_frame_t * f)
623 {
624  dpdk_main_t *dm = &dpdk_main;
625  dpdk_device_t *xd;
626  uword n_rx_packets = 0;
627  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
629  u32 thread_index = node->thread_index;
630 
631  /*
632  * Poll all devices on this cpu for input/interrupts.
633  */
634  /* *INDENT-OFF* */
636  {
637  xd = vec_elt_at_index(dm->devices, dq->dev_instance);
638  if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE))
639  continue; /* Do not poll slave to a bonded interface */
640  n_rx_packets += dpdk_device_input (vm, dm, xd, node, thread_index,
641  dq->queue_id);
642  }
643  /* *INDENT-ON* */
644  return n_rx_packets;
645 }
646 
647 #ifndef CLIB_MARCH_VARIANT
648 /* *INDENT-OFF* */
649 VLIB_REGISTER_NODE (dpdk_input_node) = {
650  .type = VLIB_NODE_TYPE_INPUT,
651  .name = "dpdk-input",
652  .sibling_of = "device-input",
653 
654  /* Will be enabled if/when hardware is detected. */
655  .state = VLIB_NODE_STATE_DISABLED,
656 
657  .format_buffer = format_ethernet_header_with_length,
658  .format_trace = format_dpdk_rx_trace,
659 
660  .n_errors = DPDK_N_ERROR,
661  .error_strings = dpdk_error_strings,
662 };
663 /* *INDENT-ON* */
664 #endif
665 
666 /*
667  * fd.io coding-style-patch-verification: ON
668  *
669  * Local Variables:
670  * eval: (c-set-style "gnu")
671  * End:
672  */
static_always_inline void dpdk_prefetch_buffer_data_x4(struct rte_mbuf *mb[])
Definition: node.c:146
static void vnet_device_increment_rx_packets(u32 thread_index, u64 count)
Definition: devices.h:110
static_always_inline void dpdk_set_next_from_etype(vlib_main_t *vm, vlib_node_runtime_t *node, dpdk_per_thread_data_t *ptd, uword n_rx_packets)
Definition: node.c:335
#define vlib_buffer_from_rte_mbuf(x)
Definition: dpdk_priv.h:17
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:143
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:213
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:132
dpdk_main_t dpdk_main
Definition: init.c:42
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
#define PREDICT_TRUE(x)
Definition: clib.h:106
u32 sw_if_index
Definition: dpdk.h:204
u8 flags[DPDK_RX_BURST_SZ]
Definition: dpdk.h:387
#define foreach_dpdk_error
Definition: dpdk.h:483
u16 flags
Definition: dpdk.h:212
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:140
int i
u32 per_interface_next_index
Definition: dpdk.h:207
#define STRUCT_OFFSET_OF(t, f)
Definition: clib.h:62
#define VLIB_NODE_FN(node)
Definition: node.h:173
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:451
unsigned char u8
Definition: types.h:56
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:104
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:104
#define static_always_inline
Definition: clib.h:93
dpdk_portid_t port_id
Definition: dpdk.h:201
#define always_inline
Definition: clib.h:92
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
Definition: feature.h:243
static_always_inline void dpdk_prefetch_buffer_x4(struct rte_mbuf *mb[])
Definition: node.c:132
STATIC_ASSERT(VNET_DEVICE_INPUT_NEXT_IP4_INPUT-1==VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT,"IP4_INPUT must follow IP4_NCS_INPUT")
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:164
unsigned int u32
Definition: types.h:88
i16 buffer_advance
Definition: dpdk.h:189
static_always_inline void dpdk_prefetch_mbuf_x4(struct rte_mbuf *mb[])
Definition: node.c:123
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
Definition: buffer_funcs.h:141
#define DPDK_RX_BURST_SZ
Definition: dpdk.h:378
#define fl(x, y)
#define rte_mbuf_from_vlib_buffer(x)
Definition: dpdk_priv.h:16
const u32 device_input_next_node_advance[((VNET_DEVICE_INPUT_N_NEXT_NODES/CLIB_CACHE_LINE_BYTES)+1)*CLIB_CACHE_LINE_BYTES]
Definition: devices.c:47
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:464
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:108
u32 pcap_sw_if_index
Definition: dpdk.h:396
static_always_inline void dpdk_process_flow_offload(dpdk_device_t *xd, dpdk_per_thread_data_t *ptd, uword n_rx_packets)
Definition: node.c:401
dpdk_per_thread_data_t * per_thread_data
Definition: dpdk.h:406
unsigned short u16
Definition: types.h:57
vlib_buffer_t buffer_template
Definition: dpdk.h:388
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX
Definition: buffer.h:439
static void pcap_add_buffer(pcap_main_t *pm, vlib_main_t *vm, u32 buffer_index, u32 n_bytes_in_trace)
Add buffer (vlib_buffer_t) to the trace.
Definition: pcap.h:205
#define PREDICT_FALSE(x)
Definition: clib.h:105
static_always_inline void vlib_get_buffer_indices_with_offset(vlib_main_t *vm, void **b, u32 *bi, uword count, i32 offset)
Translate array of buffer pointers into buffer indices with offset.
Definition: buffer_funcs.h:160
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:135
static char * dpdk_error_strings[]
Definition: node.c:32
u32 flags
Definition: vhost_user.h:110
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:91
static_always_inline u8 dpdk_ol_flags_extract(struct rte_mbuf **mb, u8 *flags, int count)
Main DPDK input node.
Definition: node.c:199
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:153
u32 flow_id
Generic flow identifier.
Definition: buffer.h:123
dpdk_device_t * devices
Definition: dpdk.h:404
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:77
vlib_main_t * vm
Definition: buffer.c:294
dpdk_flow_lookup_entry_t * flow_lookup_entries
Definition: dpdk.h:234
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:332
static_always_inline void clib_memcpy64_x4(void *d0, void *d1, void *d2, void *d3, void *s)
Definition: string.h:89
#define clib_memcpy(a, b, c)
Definition: string.h:75
u32 buffers[DPDK_RX_BURST_SZ]
Definition: dpdk.h:384
signed int i32
Definition: types.h:81
u16 device_index
Definition: dpdk.h:468
#define ASSERT(truth)
u16 next[DPDK_RX_BURST_SZ]
Definition: dpdk.h:385
static_always_inline u32 dpdk_device_input(vlib_main_t *vm, dpdk_main_t *dm, dpdk_device_t *xd, vlib_node_runtime_t *node, u32 thread_index, u16 queue_id)
Definition: node.c:435
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:126
static_always_inline uword dpdk_process_rx_burst(vlib_main_t *vm, dpdk_per_thread_data_t *ptd, uword n_rx_packets, int maybe_multiseg, u8 *or_flagsp)
Definition: node.c:214
vlib_buffer_t buffer
Definition: dpdk.h:471
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:215
size_t count
Definition: vapi.c:46
dpdk_portid_t device_index
Definition: dpdk.h:198
u32 buffer_index
Definition: dpdk.h:467
format_function_t format_dpdk_rx_trace
Definition: dpdk.h:515
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:152
Definition: dpdk.h:185
struct rte_mbuf * mbufs[DPDK_RX_BURST_SZ]
Definition: dpdk.h:383
u8 data[256]
Definition: dpdk.h:472
#define foreach_device_and_queue(var, vec)
Definition: devices.h:156
pcap_main_t pcap_main
Definition: dpdk.h:394
static u32 dpdk_rx_next(vlib_node_runtime_t *node, u16 etype, u8 flags)
Definition: node.c:60
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:546
u64 uword
Definition: types.h:112
dpdk_pcap_t pcap[VLIB_N_RX_TX]
Definition: dpdk.h:415
u16 next_index
Definition: dpdk.h:188
static void vlib_buffer_init_for_free_list(vlib_buffer_t *dst, vlib_buffer_free_list_t *fl)
#define vnet_buffer(b)
Definition: buffer.h:360
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:250
u8 data[0]
Packet data.
Definition: buffer.h:172
static vlib_buffer_free_list_t * vlib_buffer_get_free_list(vlib_main_t *vm, vlib_buffer_free_list_index_t free_list_index)
Definition: buffer_funcs.h:662
struct rte_mbuf mb
Definition: dpdk.h:470
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:159
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:62
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:111
static_always_inline uword dpdk_process_subseq_segs(vlib_main_t *vm, vlib_buffer_t *b, struct rte_mbuf *mb, vlib_buffer_free_list_t *fl)
Definition: node.c:77
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
u8 * buffer_pool_for_queue
Definition: dpdk.h:227
u32 flow_id
Definition: dpdk.h:187
Definition: defs.h:46
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
int pcap_enable
Definition: dpdk.h:393
signed short i16
Definition: types.h:46
u16 queue_index
Definition: dpdk.h:469