FD.io VPP  v17.07.01-10-g3be13f0
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #define _GNU_SOURCE
19 #include <stdint.h>
20 #include <net/if.h>
21 #include <sys/ioctl.h>
22 #include <sys/uio.h>
23 
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 
28 #include <memif/memif.h>
29 #include <memif/private.h>
30 
31 #define foreach_memif_tx_func_error \
32 _(NO_FREE_SLOTS, "no free tx slots") \
33 _(TRUNC_PACKET, "packet > buffer size -- truncated in tx ring") \
34 _(PENDING_MSGS, "pending msgs in tx ring")
35 
36 typedef enum
37 {
38 #define _(f,s) MEMIF_TX_ERROR_##f,
40 #undef _
43 
44 static char *memif_tx_func_error_strings[] = {
45 #define _(n,s) s,
47 #undef _
48 };
49 
50 u8 *
51 format_memif_device_name (u8 * s, va_list * args)
52 {
53  u32 i = va_arg (*args, u32);
54 
55  s = format (s, "memif%u", i);
56  return s;
57 }
58 
59 static u8 *
60 format_memif_device (u8 * s, va_list * args)
61 {
62  u32 dev_instance = va_arg (*args, u32);
63  int verbose = va_arg (*args, int);
64  uword indent = format_get_indent (s);
65 
66  s = format (s, "MEMIF interface");
67  if (verbose)
68  {
69  s = format (s, "\n%U instance %u", format_white_space, indent + 2,
70  dev_instance);
71  }
72  return s;
73 }
74 
75 static u8 *
76 format_memif_tx_trace (u8 * s, va_list * args)
77 {
78  s = format (s, "Unimplemented...");
79  return s;
80 }
81 
84 {
85  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
88 }
89 
90 /**
91  * @brief Copy buffer to tx ring
92  *
93  * @param * vm (in)
94  * @param * node (in)
95  * @param * mif (in) pointer to memif interface
96  * @param bi (in) vlib buffer index
97  * @param * ring (in) pointer to memif ring
98  * @param * head (in/out) ring head
99  * @param mask (in) ring size - 1
100  */
103  memif_if_t * mif, u32 bi, memif_ring_t * ring,
104  u16 * head, u16 mask)
105 {
106  vlib_buffer_t *b0;
107  void *mb0;
108  u32 total = 0, len;
109 
110  mb0 = memif_get_buffer (mif, ring, *head);
111  ring->desc[*head].flags = 0;
112  do
113  {
114  b0 = vlib_get_buffer (vm, bi);
115  len = b0->current_length;
116  if (PREDICT_FALSE (ring->desc[*head].buffer_length < (total + len)))
117  {
118  if (PREDICT_TRUE (total))
119  {
120  ring->desc[*head].length = total;
121  total = 0;
122  ring->desc[*head].flags |= MEMIF_DESC_FLAG_NEXT;
123  *head = (*head + 1) & mask;
124  mb0 = memif_get_buffer (mif, ring, *head);
125  ring->desc[*head].flags = 0;
126  }
127  }
128  if (PREDICT_TRUE (ring->desc[*head].buffer_length >= (total + len)))
129  {
130  clib_memcpy (mb0 + total, vlib_buffer_get_current (b0),
132  if (len > CLIB_CACHE_LINE_BYTES)
133  clib_memcpy (mb0 + CLIB_CACHE_LINE_BYTES + total,
135  len - CLIB_CACHE_LINE_BYTES);
136  total += len;
137  }
138  else
139  {
140  vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_TRUNC_PACKET,
141  1);
142  break;
143  }
144  }
145  while ((bi = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) ? b0->next_buffer : 0));
146 
147  if (PREDICT_TRUE (total))
148  {
149  ring->desc[*head].length = total;
150  *head = (*head + 1) & mask;
151  }
152 }
153 
156  vlib_frame_t * frame, memif_if_t * mif,
157  memif_ring_type_t type)
158 {
159  u8 qid;
160  memif_ring_t *ring;
161  u32 *buffers = vlib_frame_args (frame);
162  u32 n_left = frame->n_vectors;
163  u16 ring_size, mask;
164  u16 head, tail;
165  u16 free_slots;
166  u32 thread_index = vlib_get_thread_index ();
167  u8 tx_queues = vec_len (mif->tx_queues);
168  memif_queue_t *mq;
169 
170  if (tx_queues < vec_len (vlib_mains))
171  {
172  qid = thread_index % tx_queues;
174  }
175  else
176  {
177  qid = thread_index;
178  }
179  mq = vec_elt_at_index (mif->tx_queues, qid);
180  ring = mq->ring;
181  ring_size = 1 << mq->log2_ring_size;
182  mask = ring_size - 1;
183 
184  /* free consumed buffers */
185 
186  head = ring->head;
187  tail = ring->tail;
188 
189  if (tail > head)
190  free_slots = tail - head;
191  else
192  free_slots = ring_size - head + tail;
193 
194  while (n_left > 5 && free_slots > 1)
195  {
196  if (PREDICT_TRUE (head + 5 < ring_size))
197  {
198  CLIB_PREFETCH (memif_get_buffer (mif, ring, head + 2),
199  CLIB_CACHE_LINE_BYTES, STORE);
200  CLIB_PREFETCH (memif_get_buffer (mif, ring, head + 3),
201  CLIB_CACHE_LINE_BYTES, STORE);
202  CLIB_PREFETCH (&ring->desc[head + 4], CLIB_CACHE_LINE_BYTES, STORE);
203  CLIB_PREFETCH (&ring->desc[head + 5], CLIB_CACHE_LINE_BYTES, STORE);
204  }
205  else
206  {
207  CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 2) % mask),
208  CLIB_CACHE_LINE_BYTES, STORE);
209  CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 3) % mask),
210  CLIB_CACHE_LINE_BYTES, STORE);
211  CLIB_PREFETCH (&ring->desc[(head + 4) % mask],
212  CLIB_CACHE_LINE_BYTES, STORE);
213  CLIB_PREFETCH (&ring->desc[(head + 5) % mask],
214  CLIB_CACHE_LINE_BYTES, STORE);
215  }
216 
217  memif_prefetch_buffer_and_data (vm, buffers[2]);
218  memif_prefetch_buffer_and_data (vm, buffers[3]);
219 
220  memif_copy_buffer_to_tx_ring (vm, node, mif, buffers[0], ring, &head,
221  mask);
222  memif_copy_buffer_to_tx_ring (vm, node, mif, buffers[1], ring, &head,
223  mask);
224 
225  buffers += 2;
226  n_left -= 2;
227  free_slots -= 2;
228  }
229 
230  while (n_left && free_slots)
231  {
232  memif_copy_buffer_to_tx_ring (vm, node, mif, buffers[0], ring, &head,
233  mask);
234  buffers++;
235  n_left--;
236  free_slots--;
237  }
238 
240  ring->head = head;
241 
243 
244  if (n_left)
245  {
246  vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_NO_FREE_SLOTS,
247  n_left);
248  }
249 
250  vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
251  if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0 && mq->int_fd > -1)
252  {
253  u64 b = 1;
254  CLIB_UNUSED (int r) = write (mq->int_fd, &b, sizeof (b));
255  mq->int_count++;
256  }
257 
258  return frame->n_vectors;
259 }
260 
261 static uword
263  vlib_node_runtime_t * node, vlib_frame_t * frame)
264 {
265  memif_main_t *nm = &memif_main;
266  vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
268 
269  if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
270  return memif_interface_tx_inline (vm, node, frame, mif, MEMIF_RING_S2M);
271  else
272  return memif_interface_tx_inline (vm, node, frame, mif, MEMIF_RING_M2S);
273 }
274 
275 static void
277  u32 node_index)
278 {
279  memif_main_t *apm = &memif_main;
280  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
282 
283  /* Shut off redirection */
284  if (node_index == ~0)
285  {
286  mif->per_interface_next_index = node_index;
287  return;
288  }
289 
291  vlib_node_add_next (vlib_get_main (), memif_input_node.index, node_index);
292 }
293 
294 static void
296 {
297  /* Nothing for now */
298 }
299 
300 static clib_error_t *
303 {
304  memif_main_t *mm = &memif_main;
305  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
307  memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, qid);
308 
311  else
313 
314  return 0;
315 }
316 
317 static clib_error_t *
319 {
320  memif_main_t *mm = &memif_main;
321  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
323  static clib_error_t *error = 0;
324 
326  mif->flags |= MEMIF_IF_FLAG_ADMIN_UP;
327  else
328  mif->flags &= ~MEMIF_IF_FLAG_ADMIN_UP;
329 
330  return error;
331 }
332 
333 static clib_error_t *
335  u32 hw_if_index,
336  struct vnet_sw_interface_t *st, int is_add)
337 {
338  /* Nothing for now */
339  return 0;
340 }
341 
342 /* *INDENT-OFF* */
344  .name = "memif",
345  .tx_function = memif_interface_tx,
346  .format_device_name = format_memif_device_name,
347  .format_device = format_memif_device,
348  .format_tx_trace = format_memif_tx_trace,
349  .tx_function_n_errors = MEMIF_TX_N_ERROR,
350  .tx_function_error_strings = memif_tx_func_error_strings,
351  .rx_redirect_to_node = memif_set_interface_next_node,
352  .clear_counters = memif_clear_hw_interface_counters,
353  .admin_up_down_function = memif_interface_admin_up_down,
354  .subif_add_del_function = memif_subif_add_del_function,
355  .rx_mode_change_function = memif_interface_rx_mode_change,
356 };
357 
360 /* *INDENT-ON* */
361 
362 /*
363  * fd.io coding-style-patch-verification: ON
364  *
365  * Local Variables:
366  * eval: (c-set-style "gnu")
367  * End:
368  */
memif_if_t * interfaces
Definition: private.h:189
memif_ring_type_t
Definition: memif.h:47
static_always_inline void memif_copy_buffer_to_tx_ring(vlib_main_t *vm, vlib_node_runtime_t *node, memif_if_t *mif, u32 bi, memif_ring_t *ring, u16 *head, u16 mask)
Copy buffer to tx ring.
Definition: device.c:102
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:337
#define CLIB_UNUSED(x)
Definition: clib.h:79
memif_ring_t * ring
Definition: private.h:100
memif_desc_t desc[0]
Definition: memif.h:174
memif_tx_func_error_t
Definition: device.c:36
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:290
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:468
static uword memif_interface_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: device.c:262
#define PREDICT_TRUE(x)
Definition: clib.h:98
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:104
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
static void memif_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:276
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
Definition: lock.h:83
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
#define MEMIF_RING_FLAG_MASK_INT
Definition: memif.h:169
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1081
VNET_DEVICE_CLASS(af_packet_device_class)
uint32_t length
Definition: memif.h:152
uint32_t buffer_length
Definition: memif.h:151
#define MEMIF_DESC_FLAG_NEXT
Definition: memif.h:149
vnet_hw_interface_rx_mode
Definition: interface.h:51
#define VLIB_BUFFER_NEXT_PRESENT
Definition: buffer.h:87
#define static_always_inline
Definition: clib.h:85
static uword format_get_indent(u8 *s)
Definition: format.h:72
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:164
uint16_t flags
Definition: memif.h:148
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
vnet_device_class_t memif_device_class
u32 per_interface_next_index
Definition: private.h:139
unsigned long u64
Definition: types.h:89
static clib_error_t * memif_subif_add_del_function(vnet_main_t *vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add)
Definition: device.c:334
static void memif_clear_hw_interface_counters(u32 instance)
Definition: device.c:295
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:397
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
Definition: node.c:512
static char * memif_tx_func_error_strings[]
Definition: device.c:44
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:71
clib_spinlock_t lockp
Definition: private.h:131
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:188
#define PREDICT_FALSE(x)
Definition: clib.h:97
u32 node_index
Node index.
Definition: node.h:441
static u8 * format_memif_tx_trace(u8 *s, va_list *args)
Definition: device.c:76
#define foreach_memif_tx_func_error
Definition: device.c:31
memif_queue_t * tx_queues
Definition: private.h:151
static_always_inline uword memif_interface_tx_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, memif_ring_type_t type)
Definition: device.c:155
u16 n_vectors
Definition: node.h:345
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:185
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
static_always_inline void memif_prefetch_buffer_and_data(vlib_main_t *vm, u32 bi)
Definition: device.c:83
#define clib_memcpy(a, b, c)
Definition: string.h:69
static clib_error_t * memif_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: device.c:318
static clib_error_t * memif_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
Definition: device.c:301
#define VNET_SW_INTERFACE_FLAG_ADMIN_UP
Definition: interface.h:560
unsigned int u32
Definition: types.h:88
static_always_inline void * memif_get_buffer(memif_if_t *mif, memif_ring_t *ring, u16 slot)
Definition: private.h:251
u32 flags
Definition: private.h:132
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:109
static void * vlib_frame_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:286
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
u64 int_count
Definition: private.h:111
u64 uword
Definition: types.h:112
unsigned short u16
Definition: types.h:57
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
vlib_main_t ** vlib_mains
memif_log2_ring_size_t log2_ring_size
Definition: private.h:101
static u8 * format_memif_device(u8 *s, va_list *args)
Definition: device.c:60
uint16_t flags
Definition: memif.h:168
u8 data[0]
Packet data.
Definition: buffer.h:152
volatile uint16_t head
Definition: memif.h:170
memif_queue_t * rx_queues
Definition: private.h:150
u32 flags
Definition: vhost-user.h:76
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:74
memif_main_t memif_main
Definition: memif.c:42
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
Definition: lock.h:65
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
#define VLIB_DEVICE_TX_FUNCTION_MULTIARCH(dev, fn)
Definition: interface.h:235
volatile uint16_t tail
Definition: memif.h:172
u8 * format_memif_device_name(u8 *s, va_list *args)
Definition: device.c:51