FD.io VPP  v21.06-3-gbb25fbf28
Vector Packet Processing
output.c
Go to the documentation of this file.
1 #include <poll.h>
2 #include <string.h>
3 #include <vlib/vlib.h>
4 #include <vlib/unix/unix.h>
6 #include <vnet/devices/devices.h>
7 #include <af_xdp/af_xdp.h>
8 
9 #define AF_XDP_TX_RETRIES 5
10 
13  af_xdp_txq_t * txq)
14 {
15  const __u64 *compl;
16  const u32 size = txq->cq.size;
17  const u32 mask = size - 1;
18  u32 bis[VLIB_FRAME_SIZE], *bi = bis;
19  u32 n_wrap, idx;
20  u32 n = xsk_ring_cons__peek (&txq->cq, ARRAY_LEN (bis), &idx);
21  const u32 n_free = n;
22 
23  /* we rely on on casting addr (u64) -> bi (u32) to discard XSK offset below */
25  XSK_UNALIGNED_BUF_OFFSET_SHIFT, "wrong size");
26  ASSERT (mask == txq->cq.mask);
27 
28  if (!n_free)
29  return;
30 
31  compl = xsk_ring_cons__comp_addr (&txq->cq, idx);
32  n = clib_min (n_free, size - (idx & mask));
33  n_wrap = n_free - n;
34 
35 wrap_around:
36 
37  while (n >= 8)
38  {
39 #ifdef CLIB_HAVE_VEC256
40  u64x4 b0 = (*(u64x4u *) (compl + 0)) >> CLIB_LOG2_CACHE_LINE_BYTES;
41  u64x4 b1 = (*(u64x4u *) (compl + 4)) >> CLIB_LOG2_CACHE_LINE_BYTES;
42  /* permute 256-bit register so lower u32s of each buffer index are
43  * placed into lower 128-bits */
44  const u32x8 mask = { 0, 2, 4, 6, 1, 3, 5, 7 };
45  u32x8 b2 = u32x8_permute ((u32x8) b0, mask);
46  u32x8 b3 = u32x8_permute ((u32x8) b1, mask);
47  /* extract lower 128-bits and save them to the array of buffer indices */
48  *(u32x4u *) (bi + 0) = u32x8_extract_lo (b2);
49  *(u32x4u *) (bi + 4) = u32x8_extract_lo (b3);
50 #else
51  bi[0] = compl[0] >> CLIB_LOG2_CACHE_LINE_BYTES;
52  bi[1] = compl[1] >> CLIB_LOG2_CACHE_LINE_BYTES;
53  bi[2] = compl[2] >> CLIB_LOG2_CACHE_LINE_BYTES;
54  bi[3] = compl[3] >> CLIB_LOG2_CACHE_LINE_BYTES;
55  bi[4] = compl[4] >> CLIB_LOG2_CACHE_LINE_BYTES;
56  bi[5] = compl[5] >> CLIB_LOG2_CACHE_LINE_BYTES;
57  bi[6] = compl[6] >> CLIB_LOG2_CACHE_LINE_BYTES;
58  bi[7] = compl[7] >> CLIB_LOG2_CACHE_LINE_BYTES;
59 #endif
60  compl += 8;
61  bi += 8;
62  n -= 8;
63  }
64 
65  while (n >= 1)
66  {
67  bi[0] = compl[0] >> CLIB_LOG2_CACHE_LINE_BYTES;
68  ASSERT (vlib_buffer_is_known (vm, bi[0]) ==
70  compl += 1;
71  bi += 1;
72  n -= 1;
73  }
74 
75  if (n_wrap)
76  {
77  compl = xsk_ring_cons__comp_addr (&txq->cq, 0);
78  n = n_wrap;
79  n_wrap = 0;
80  goto wrap_around;
81  }
82 
83  xsk_ring_cons__release (&txq->cq, n_free);
84  vlib_buffer_free (vm, bis, n_free);
85 }
86 
89  const vlib_node_runtime_t * node,
90  af_xdp_device_t * ad,
91  af_xdp_txq_t * txq, const u32 n_tx)
92 {
93  xsk_ring_prod__submit (&txq->tx, n_tx);
94 
95  if (!xsk_ring_prod__needs_wakeup (&txq->tx))
96  return;
97 
98  vlib_error_count (vm, node->node_index, AF_XDP_TX_ERROR_SYSCALL_REQUIRED, 1);
99 
101 
102  if (xsk_ring_prod__needs_wakeup (&txq->tx))
103  {
104  struct pollfd fd = { .fd = txq->xsk_fd, .events = POLLIN | POLLOUT };
105  int ret = poll (&fd, 1, 0);
106  if (PREDICT_FALSE (ret < 0))
107  {
108  /* something bad is happening */
109  vlib_error_count (vm, node->node_index,
110  AF_XDP_TX_ERROR_SYSCALL_FAILURES, 1);
111  af_xdp_device_error (ad, "tx poll() failed");
112  }
113  }
114 
116 }
117 
120  const vlib_node_runtime_t * node,
121  af_xdp_device_t * ad, af_xdp_txq_t * txq,
122  u32 n_tx, u32 * bi)
123 {
125  const uword start = vm->buffer_main->buffer_mem_start;
126  const u32 size = txq->tx.size;
127  const u32 mask = size - 1;
128  struct xdp_desc *desc;
129  u64 offset, addr;
130  u32 idx, n, n_wrap;
131 
132  ASSERT (mask == txq->cq.mask);
133 
134  n_tx = xsk_ring_prod__reserve (&txq->tx, n_tx, &idx);
135 
136  /* if ring is full, do nothing */
137  if (PREDICT_FALSE (0 == n_tx))
138  return 0;
139 
140  vlib_get_buffers (vm, bi, bufs, n_tx);
141 
142  desc = xsk_ring_prod__tx_desc (&txq->tx, idx);
143  n = clib_min (n_tx, size - (idx & mask));
144  n_wrap = n_tx - n;
145 
146 wrap_around:
147 
148  while (n >= 8)
149  {
150  vlib_prefetch_buffer_header (b[4], LOAD);
151  offset =
152  (sizeof (vlib_buffer_t) +
153  b[0]->current_data) << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
154  addr = pointer_to_uword (b[0]) - start;
155  desc[0].addr = offset | addr;
156  desc[0].len = b[0]->current_length;
157 
158  vlib_prefetch_buffer_header (b[5], LOAD);
159  offset =
160  (sizeof (vlib_buffer_t) +
161  b[1]->current_data) << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
162  addr = pointer_to_uword (b[1]) - start;
163  desc[1].addr = offset | addr;
164  desc[1].len = b[1]->current_length;
165 
166  vlib_prefetch_buffer_header (b[6], LOAD);
167  offset =
168  (sizeof (vlib_buffer_t) +
169  b[2]->current_data) << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
170  addr = pointer_to_uword (b[2]) - start;
171  desc[2].addr = offset | addr;
172  desc[2].len = b[2]->current_length;
173 
174  vlib_prefetch_buffer_header (b[7], LOAD);
175  offset =
176  (sizeof (vlib_buffer_t) +
177  b[3]->current_data) << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
178  addr = pointer_to_uword (b[3]) - start;
179  desc[3].addr = offset | addr;
180  desc[3].len = b[3]->current_length;
181 
182  desc += 4;
183  b += 4;
184  n -= 4;
185  }
186 
187  while (n >= 1)
188  {
189  offset =
190  (sizeof (vlib_buffer_t) +
191  b[0]->current_data) << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
192  addr = pointer_to_uword (b[0]) - start;
193  desc[0].addr = offset | addr;
194  desc[0].len = b[0]->current_length;
195  desc += 1;
196  b += 1;
197  n -= 1;
198  }
199 
200  if (n_wrap)
201  {
202  desc = xsk_ring_prod__tx_desc (&txq->tx, 0);
203  n = n_wrap;
204  n_wrap = 0;
205  goto wrap_around;
206  }
207 
208  return n_tx;
209 }
210 
214 {
215  af_xdp_main_t *rm = &af_xdp_main;
216  vnet_interface_output_runtime_t *ord = (void *) node->runtime_data;
219  af_xdp_txq_t *txq =
220  vec_elt_at_index (ad->txqs, (thread_index - 1) % ad->txq_num);
221  u32 *from;
222  u32 n, n_tx;
223  int i;
224 
226  n_tx = frame->n_vectors;
227 
229 
230  for (i = 0, n = 0; i < AF_XDP_TX_RETRIES && n < n_tx; i++)
231  {
232  u32 n_enq;
234  n_enq = af_xdp_device_output_tx_try (vm, node, ad, txq, n_tx - n, from);
235  n += n_enq;
236  from += n_enq;
237  }
238 
239  af_xdp_device_output_tx_db (vm, node, ad, txq, n);
240 
242 
243  if (PREDICT_FALSE (n != n_tx))
244  {
245  vlib_buffer_free (vm, from + n, n_tx - n);
246  vlib_error_count (vm, node->node_index,
247  AF_XDP_TX_ERROR_NO_FREE_SLOTS, n_tx - n);
248  }
249 
250  return n;
251 }
252 
253 /*
254  * fd.io coding-style-patch-verification: ON
255  *
256  * Local Variables:
257  * eval: (c-set-style "gnu")
258  * End:
259  */
vlib.h
vlib_buffer_free
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:982
thread_index
u32 thread_index
Definition: nat44_ei_hairpinning.c:492
bufs
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
Definition: nat44_ei_out2in.c:717
u32x8_permute
foreach_avx2_vec256i static foreach_avx2_vec256u u32x8 u32x8_permute(u32x8 v, u32x8 idx)
Definition: vector_avx2.h:73
frame
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
vlib_prefetch_buffer_header
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
af_xdp_main_t
Definition: af_xdp.h:126
af_xdp.h
AF_XDP_TX_RETRIES
#define AF_XDP_TX_RETRIES
Definition: output.c:9
clib_spinlock_lock_if_init
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
Definition: lock.h:106
pointer_to_uword
static uword pointer_to_uword(const void *p)
Definition: types.h:131
pool_elt_at_index
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:553
vlib_get_buffers
vlib_get_buffers(vm, from, b, n_left_from)
string.h
VLIB_FRAME_SIZE
#define VLIB_FRAME_SIZE
Definition: node.h:368
node
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
af_xdp_main_t::devices
af_xdp_device_t * devices
Definition: af_xdp.h:128
vlib_main_t::buffer_main
vlib_buffer_main_t * buffer_main
Definition: main.h:165
vm
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
af_xdp_device_error
#define af_xdp_device_error(dev, fmt,...)
Definition: af_xdp.h:45
af_xdp_txq_t::tx
struct xsk_ring_prod tx
Definition: af_xdp.h:86
addr
vhost_vring_addr_t addr
Definition: vhost_user.h:130
vlib_buffer_is_known
static vlib_buffer_known_state_t vlib_buffer_is_known(vlib_main_t *vm, u32 buffer_index)
Definition: buffer_funcs.h:529
vlib_error_count
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
vlib_frame_t
Definition: node.h:372
af_xdp_txq_t::syscall_lock
clib_spinlock_t syscall_lock
Definition: af_xdp.h:85
ethernet.h
VNET_DEVICE_CLASS_TX_FN
#define VNET_DEVICE_CLASS_TX_FN(devclass)
Definition: interface.h:317
af_xdp_device_output_tx_try
static_always_inline u32 af_xdp_device_output_tx_try(vlib_main_t *vm, const vlib_node_runtime_t *node, af_xdp_device_t *ad, af_xdp_txq_t *txq, u32 n_tx, u32 *bi)
Definition: output.c:119
af_xdp_main
af_xdp_main_t af_xdp_main
Definition: device.c:31
vnet_interface_output_runtime_t::dev_instance
u32 dev_instance
Definition: interface_funcs.h:476
af_xdp_txq_t::xsk_fd
int xsk_fd
Definition: af_xdp.h:88
vec_elt_at_index
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
Definition: vec_bootstrap.h:203
offset
struct clib_bihash_value offset
template key/value backing page structure
PREDICT_FALSE
#define PREDICT_FALSE(x)
Definition: clib.h:124
ARRAY_LEN
#define ARRAY_LEN(x)
Definition: clib.h:70
BITS
#define BITS(x)
Definition: clib.h:69
vlib_frame_vector_args
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
static_always_inline
#define static_always_inline
Definition: clib.h:112
uword
u64 uword
Definition: types.h:112
af_xdp_device_t
Definition: af_xdp.h:91
af_xdp_device_output_free
static_always_inline void af_xdp_device_output_free(vlib_main_t *vm, const vlib_node_runtime_t *node, af_xdp_txq_t *txq)
Definition: output.c:12
vlib_main_t::thread_index
u32 thread_index
Definition: main.h:213
af_xdp_txq_t::cq
struct xsk_ring_cons cq
Definition: af_xdp.h:87
i
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
mask
vl_api_pnat_mask_t mask
Definition: pnat.api:45
clib_min
#define clib_min(x, y)
Definition: clib.h:342
STATIC_ASSERT
#define STATIC_ASSERT(truth,...)
Definition: error_bootstrap.h:111
vlib_buffer_main_t::buffer_mem_start
uword buffer_mem_start
Definition: buffer.h:480
vlib_buffer_t::current_length
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
af_xdp_device_class
VNET_DEVICE_CLASS_TX_FN() af_xdp_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: output.c:211
size
u32 size
Definition: vhost_user.h:125
clib_bihash_value
template key/value backing page structure
Definition: bihash_doc.h:44
u64
unsigned long u64
Definition: types.h:89
ASSERT
#define ASSERT(truth)
Definition: error_bootstrap.h:69
n_free
u32 n_free
Definition: interface_output.c:1078
u32
unsigned int u32
Definition: types.h:88
CLIB_LOG2_CACHE_LINE_BYTES
#define CLIB_LOG2_CACHE_LINE_BYTES
Definition: cache.h:50
vlib_main_t
Definition: main.h:102
b
vlib_buffer_t ** b
Definition: nat44_ei_out2in.c:717
unix.h
devices.h
VLIB_BUFFER_KNOWN_ALLOCATED
@ VLIB_BUFFER_KNOWN_ALLOCATED
Definition: buffer_funcs.h:520
af_xdp_device_output_tx_db
static_always_inline void af_xdp_device_output_tx_db(vlib_main_t *vm, const vlib_node_runtime_t *node, af_xdp_device_t *ad, af_xdp_txq_t *txq, const u32 n_tx)
Definition: output.c:88
vlib_node_runtime_t
Definition: node.h:454
clib_spinlock_unlock_if_init
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
Definition: lock.h:129
from
from
Definition: nat44_ei_hairpinning.c:415
af_xdp_txq_t
Definition: af_xdp.h:78
af_xdp_txq_t::lock
clib_spinlock_t lock
Definition: af_xdp.h:84
u64x4
u64x4
Definition: vector_avx2.h:142
vlib_buffer_t
VLIB buffer representation.
Definition: buffer.h:111
vnet_interface_output_runtime_t
Definition: interface_funcs.h:472