FD.io VPP  v18.01.2-1-g9b554f3
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #define _GNU_SOURCE
19 #include <stdint.h>
20 #include <net/if.h>
21 #include <sys/ioctl.h>
22 #include <sys/uio.h>
23 
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 
28 #include <memif/memif.h>
29 #include <memif/private.h>
30 
31 #define foreach_memif_tx_func_error \
32 _(NO_FREE_SLOTS, "no free tx slots") \
33 _(TRUNC_PACKET, "packet > buffer size -- truncated in tx ring") \
34 _(PENDING_MSGS, "pending msgs in tx ring")
35 
36 typedef enum
37 {
38 #define _(f,s) MEMIF_TX_ERROR_##f,
40 #undef _
43 
44 static __clib_unused char *memif_tx_func_error_strings[] = {
45 #define _(n,s) s,
47 #undef _
48 };
49 
50 #ifndef CLIB_MULTIARCH_VARIANT
51 u8 *
52 format_memif_device_name (u8 * s, va_list * args)
53 {
54  u32 dev_instance = va_arg (*args, u32);
55  memif_main_t *mm = &memif_main;
56  memif_if_t *mif = pool_elt_at_index (mm->interfaces, dev_instance);
57 
58  s = format (s, "memif%lu/%lu", mif->socket_file_index, mif->id);
59  return s;
60 }
61 #endif
62 
63 static __clib_unused u8 *
64 format_memif_device (u8 * s, va_list * args)
65 {
66  u32 dev_instance = va_arg (*args, u32);
67  int verbose = va_arg (*args, int);
68  u32 indent = format_get_indent (s);
69 
70  s = format (s, "MEMIF interface");
71  if (verbose)
72  {
73  s = format (s, "\n%U instance %u", format_white_space, indent + 2,
74  dev_instance);
75  }
76  return s;
77 }
78 
79 static __clib_unused u8 *
80 format_memif_tx_trace (u8 * s, va_list * args)
81 {
82  s = format (s, "Unimplemented...");
83  return s;
84 }
85 
88 {
89  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
92 }
93 
94 /**
95  * @brief Copy buffer to tx ring
96  *
97  * @param * vm (in)
98  * @param * node (in)
99  * @param * mif (in) pointer to memif interface
100  * @param bi (in) vlib buffer index
101  * @param * ring (in) pointer to memif ring
102  * @param * head (in/out) ring head
103  * @param mask (in) ring size - 1
104  */
107  memif_if_t * mif, u32 bi, memif_ring_t * ring,
108  u16 * head, u16 mask)
109 {
110  vlib_buffer_t *b0;
111  void *mb0;
112  u32 total = 0, len;
113  u16 slot = (*head) & mask;
114 
115  mb0 = memif_get_buffer (mif, ring, slot);
116  ring->desc[slot].flags = 0;
117  do
118  {
119  b0 = vlib_get_buffer (vm, bi);
120  len = b0->current_length;
121  if (PREDICT_FALSE (ring->desc[slot].buffer_length < (total + len)))
122  {
123  if (PREDICT_TRUE (total))
124  {
125  ring->desc[slot].length = total;
126  total = 0;
127  ring->desc[slot].flags |= MEMIF_DESC_FLAG_NEXT;
128  (*head)++;
129  slot = (*head) & mask;
130  mb0 = memif_get_buffer (mif, ring, slot);
131  ring->desc[slot].flags = 0;
132  }
133  }
134  if (PREDICT_TRUE (ring->desc[slot].buffer_length >= (total + len)))
135  {
136  clib_memcpy (mb0 + total, vlib_buffer_get_current (b0),
138  if (len > CLIB_CACHE_LINE_BYTES)
139  clib_memcpy (mb0 + CLIB_CACHE_LINE_BYTES + total,
141  len - CLIB_CACHE_LINE_BYTES);
142  total += len;
143  }
144  else
145  {
146  vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_TRUNC_PACKET,
147  1);
148  break;
149  }
150  }
151  while ((bi = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) ? b0->next_buffer : 0));
152 
153  if (PREDICT_TRUE (total))
154  {
155  ring->desc[slot].length = total;
156  (*head)++;
157  }
158 }
159 
162  vlib_frame_t * frame, memif_if_t * mif,
163  memif_ring_type_t type)
164 {
165  u8 qid;
166  memif_ring_t *ring;
167  u32 *buffers = vlib_frame_args (frame);
168  u32 n_left = frame->n_vectors;
169  u16 ring_size, mask;
170  u16 head, tail;
171  u16 free_slots;
172  u32 thread_index = vlib_get_thread_index ();
173  u8 tx_queues = vec_len (mif->tx_queues);
174  memif_queue_t *mq;
175  int n_retries = 5;
176 
177  if (tx_queues < vec_len (vlib_mains))
178  {
179  qid = thread_index % tx_queues;
181  }
182  else
183  qid = thread_index;
184 
185  mq = vec_elt_at_index (mif->tx_queues, qid);
186  ring = mq->ring;
187  ring_size = 1 << mq->log2_ring_size;
188  mask = ring_size - 1;
189 retry:
190 
191  /* free consumed buffers */
192 
193  head = ring->head;
194  tail = ring->tail;
195 
196  free_slots = ring_size - head + tail;
197 
198  while (n_left > 5 && free_slots > 1)
199  {
200  CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 2) & mask),
201  CLIB_CACHE_LINE_BYTES, STORE);
202  CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 3) & mask),
203  CLIB_CACHE_LINE_BYTES, STORE);
204  CLIB_PREFETCH (&ring->desc[(head + 4) & mask], CLIB_CACHE_LINE_BYTES,
205  STORE);
206  CLIB_PREFETCH (&ring->desc[(head + 5) & mask], CLIB_CACHE_LINE_BYTES,
207  STORE);
208  memif_prefetch_buffer_and_data (vm, buffers[2]);
209  memif_prefetch_buffer_and_data (vm, buffers[3]);
210 
211  memif_copy_buffer_to_tx_ring (vm, node, mif, buffers[0], ring, &head,
212  mask);
213  memif_copy_buffer_to_tx_ring (vm, node, mif, buffers[1], ring, &head,
214  mask);
215 
216  buffers += 2;
217  n_left -= 2;
218  free_slots -= 2;
219  }
220 
221  while (n_left && free_slots)
222  {
223  memif_copy_buffer_to_tx_ring (vm, node, mif, buffers[0], ring, &head,
224  mask);
225  buffers++;
226  n_left--;
227  free_slots--;
228  }
229 
231  ring->head = head;
232 
233  if (n_left && n_retries--)
234  goto retry;
235 
237 
238  if (n_left)
239  {
240  vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_NO_FREE_SLOTS,
241  n_left);
242  }
243 
244  if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0 && mq->int_fd > -1)
245  {
246  u64 b = 1;
247  CLIB_UNUSED (int r) = write (mq->int_fd, &b, sizeof (b));
248  mq->int_count++;
249  }
250 
251  vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
252 
253  return frame->n_vectors;
254 }
255 
256 uword
258  vlib_node_runtime_t * node,
259  vlib_frame_t * frame)
260 {
261  memif_main_t *nm = &memif_main;
262  vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
264 
265  if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
266  return memif_interface_tx_inline (vm, node, frame, mif, MEMIF_RING_S2M);
267  else
268  return memif_interface_tx_inline (vm, node, frame, mif, MEMIF_RING_M2S);
269 }
270 
271 static __clib_unused void
273  u32 node_index)
274 {
275  memif_main_t *apm = &memif_main;
276  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
278 
279  /* Shut off redirection */
280  if (node_index == ~0)
281  {
282  mif->per_interface_next_index = node_index;
283  return;
284  }
285 
287  vlib_node_add_next (vlib_get_main (), memif_input_node.index, node_index);
288 }
289 
290 static __clib_unused void
292 {
293  /* Nothing for now */
294 }
295 
296 static __clib_unused clib_error_t *
299 {
300  memif_main_t *mm = &memif_main;
301  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
303  memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, qid);
304 
307  else
309 
310  return 0;
311 }
312 
313 static __clib_unused clib_error_t *
315 {
316  memif_main_t *mm = &memif_main;
317  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
319  static clib_error_t *error = 0;
320 
322  mif->flags |= MEMIF_IF_FLAG_ADMIN_UP;
323  else
324  mif->flags &= ~MEMIF_IF_FLAG_ADMIN_UP;
325 
326  return error;
327 }
328 
329 static __clib_unused clib_error_t *
331  u32 hw_if_index,
332  struct vnet_sw_interface_t *st, int is_add)
333 {
334  /* Nothing for now */
335  return 0;
336 }
337 
338 #ifndef CLIB_MULTIARCH_VARIANT
339 /* *INDENT-OFF* */
341  .name = "memif",
342  .tx_function = memif_interface_tx,
343  .format_device_name = format_memif_device_name,
344  .format_device = format_memif_device,
345  .format_tx_trace = format_memif_tx_trace,
346  .tx_function_n_errors = MEMIF_TX_N_ERROR,
347  .tx_function_error_strings = memif_tx_func_error_strings,
348  .rx_redirect_to_node = memif_set_interface_next_node,
349  .clear_counters = memif_clear_hw_interface_counters,
350  .admin_up_down_function = memif_interface_admin_up_down,
351  .subif_add_del_function = memif_subif_add_del_function,
352  .rx_mode_change_function = memif_interface_rx_mode_change,
353 };
354 
355 #if __x86_64__
358 static void __clib_constructor
360 {
361  if (memif_interface_tx_avx512 && clib_cpu_supports_avx512f ())
363  else if (memif_interface_tx_avx2 && clib_cpu_supports_avx2 ())
365 }
366 #endif
367 #endif
368 
369 /* *INDENT-ON* */
370 
371 /*
372  * fd.io coding-style-patch-verification: ON
373  *
374  * Local Variables:
375  * eval: (c-set-style "gnu")
376  * End:
377  */
memif_if_t * interfaces
Definition: private.h:184
uword( vlib_node_function_t)(struct vlib_main_t *vm, struct vlib_node_runtime_t *node, struct vlib_frame_t *frame)
Definition: node.h:54
static_always_inline void memif_copy_buffer_to_tx_ring(vlib_main_t *vm, vlib_node_runtime_t *node, memif_if_t *mif, u32 bi, memif_ring_t *ring, u16 *head, u16 mask)
Copy buffer to tx ring.
Definition: device.c:106
static __clib_unused u8 * format_memif_device(u8 *s, va_list *args)
Definition: device.c:64
#define CLIB_UNUSED(x)
Definition: clib.h:79
static __clib_unused void memif_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:272
memif_tx_func_error_t
Definition: device.c:36
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:356
#define PREDICT_TRUE(x)
Definition: clib.h:106
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:112
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
Definition: lock.h:85
static __clib_unused clib_error_t * memif_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
Definition: device.c:297
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
vlib_node_function_t __clib_weak memif_interface_tx_avx512
Definition: device.c:356
static u32 format_get_indent(u8 *s)
Definition: format.h:72
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
vlib_main_t ** vlib_mains
Definition: buffer.c:292
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1108
static __clib_unused u8 * format_memif_tx_trace(u8 *s, va_list *args)
Definition: device.c:80
VNET_DEVICE_CLASS(af_packet_device_class)
uint32_t length
Definition: memif.h:152
uint32_t buffer_length
Definition: memif.h:151
uword CLIB_MULTIARCH_FN() memif_interface_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: device.c:257
static void __clib_constructor dpdk_interface_tx_multiarch_select(void)
Definition: device.c:359
vnet_hw_interface_rx_mode
Definition: interface.h:51
#define VLIB_BUFFER_NEXT_PRESENT
Definition: buffer.h:95
#define static_always_inline
Definition: clib.h:93
static __clib_unused void memif_clear_hw_interface_counters(u32 instance)
Definition: device.c:291
uword socket_file_index
Definition: private.h:142
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:171
uint16_t flags
Definition: memif.h:148
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
vnet_device_class_t memif_device_class
u32 per_interface_next_index
Definition: private.h:138
unsigned long u64
Definition: types.h:89
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:459
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
Definition: node.c:575
memif_desc_t desc[0]
Definition: memif.h:174
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:72
clib_spinlock_t lockp
Definition: private.h:130
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:195
#define PREDICT_FALSE(x)
Definition: clib.h:105
u32 node_index
Node index.
Definition: node.h:437
#define foreach_memif_tx_func_error
Definition: device.c:31
memif_queue_t * tx_queues
Definition: private.h:149
static_always_inline uword memif_interface_tx_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, memif_ring_type_t type)
Definition: device.c:161
u16 n_vectors
Definition: node.h:344
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
vlib_main_t * vm
Definition: buffer.c:283
static __clib_unused clib_error_t * memif_subif_add_del_function(vnet_main_t *vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add)
Definition: device.c:330
static_always_inline void memif_prefetch_buffer_and_data(vlib_main_t *vm, u32 bi)
Definition: device.c:87
#define clib_memcpy(a, b, c)
Definition: string.h:75
#define VNET_SW_INTERFACE_FLAG_ADMIN_UP
Definition: interface.h:576
unsigned int u32
Definition: types.h:88
#define MEMIF_DESC_FLAG_NEXT
Definition: memif.h:149
static_always_inline void * memif_get_buffer(memif_if_t *mif, memif_ring_t *ring, u16 slot)
Definition: private.h:228
u32 flags
Definition: private.h:131
memif_ring_t * ring
Definition: private.h:96
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:109
static void * vlib_frame_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:284
#define MEMIF_RING_FLAG_MASK_INT
Definition: memif.h:169
static __clib_unused clib_error_t * memif_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: device.c:314
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
u64 int_count
Definition: private.h:107
u64 uword
Definition: types.h:112
unsigned short u16
Definition: types.h:57
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
memif_interface_id_t id
Definition: private.h:132
memif_log2_ring_size_t log2_ring_size
Definition: private.h:97
static __clib_unused char * memif_tx_func_error_strings[]
Definition: device.c:44
vlib_node_function_t __clib_weak memif_interface_tx_avx2
Definition: device.c:357
uint16_t flags
Definition: memif.h:168
u8 data[0]
Packet data.
Definition: buffer.h:159
memif_ring_type_t
Definition: memif.h:47
volatile uint16_t head
Definition: memif.h:170
memif_queue_t * rx_queues
Definition: private.h:148
u32 flags
Definition: vhost-user.h:77
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:75
memif_main_t memif_main
Definition: memif.c:43
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
Definition: lock.h:65
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
volatile uint16_t tail
Definition: memif.h:172
u8 * format_memif_device_name(u8 *s, va_list *args)
Definition: device.c:52
#define CLIB_MULTIARCH_FN(fn)
Definition: cpu.h:59