FD.io VPP  v19.08.3-2-gbabecb413
Vector Packet Processing
buffer.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2019 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <unistd.h>
17 #include <errno.h>
18 
19 #include <rte_config.h>
20 #include <rte_mbuf.h>
21 #include <rte_ethdev.h>
22 #include <rte_vfio.h>
23 
24 #include <vlib/vlib.h>
25 #include <dpdk/buffer.h>
26 
27 STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM,
28  "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
29 
30 extern struct rte_mbuf *dpdk_mbuf_template_by_pool_index;
31 #ifndef CLIB_MARCH_VARIANT
32 struct rte_mempool **dpdk_mempool_by_buffer_pool_index = 0;
34 struct rte_mbuf *dpdk_mbuf_template_by_pool_index = 0;
35 
38 {
39  uword buffer_mem_start = vm->buffer_main->buffer_mem_start;
40  struct rte_mempool *mp, *nmp;
41  struct rte_pktmbuf_pool_private priv;
42  enum rte_iova_mode iova_mode;
43  u32 *bi;
44  u8 *name = 0;
45 
46  u32 elt_size =
47  sizeof (struct rte_mbuf) + sizeof (vlib_buffer_t) + bp->data_size;
48 
49  /* create empty mempools */
54 
55  /* normal mempool */
56  name = format (name, "vpp pool %u%c", bp->index, 0);
57  mp = rte_mempool_create_empty ((char *) name, vec_len (bp->buffers),
58  elt_size, 512, sizeof (priv),
59  bp->numa_node, 0);
60  if (!mp)
61  {
62  vec_free (name);
63  return clib_error_return (0,
64  "failed to create normal mempool for numa node %u",
65  bp->index);
66  }
67  vec_reset_length (name);
68 
69  /* non-cached mempool */
70  name = format (name, "vpp pool %u (no cache)%c", bp->index, 0);
71  nmp = rte_mempool_create_empty ((char *) name, vec_len (bp->buffers),
72  elt_size, 0, sizeof (priv),
73  bp->numa_node, 0);
74  if (!nmp)
75  {
76  rte_mempool_free (mp);
77  vec_free (name);
78  return clib_error_return (0,
79  "failed to create non-cache mempool for numa nude %u",
80  bp->index);
81  }
82  vec_free (name);
83 
84  dpdk_mempool_by_buffer_pool_index[bp->index] = mp;
86 
87  mp->pool_id = nmp->pool_id = bp->index;
88 
89  rte_mempool_set_ops_byname (mp, "vpp", NULL);
90  rte_mempool_set_ops_byname (nmp, "vpp-no-cache", NULL);
91 
92  /* Call the mempool priv initializer */
93  memset (&priv, 0, sizeof (priv));
94  priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
96  priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
97  rte_pktmbuf_pool_init (mp, &priv);
98  rte_pktmbuf_pool_init (nmp, &priv);
99 
100  iova_mode = rte_eal_iova_mode ();
101 
102  /* populate mempool object buffer header */
103  /* *INDENT-OFF* */
104  vec_foreach (bi, bp->buffers)
105  {
106  struct rte_mempool_objhdr *hdr;
107  vlib_buffer_t *b = vlib_get_buffer (vm, *bi);
108  struct rte_mbuf *mb = rte_mbuf_from_vlib_buffer (b);
109  hdr = (struct rte_mempool_objhdr *) RTE_PTR_SUB (mb, sizeof (*hdr));
110  hdr->mp = mp;
111  hdr->iova = (iova_mode == RTE_IOVA_VA) ?
112  pointer_to_uword (mb) : vlib_physmem_get_pa (vm, mb);
113  STAILQ_INSERT_TAIL (&mp->elt_list, hdr, next);
114  STAILQ_INSERT_TAIL (&nmp->elt_list, hdr, next);
115  mp->populated_size++;
116  nmp->populated_size++;
117  }
118  /* *INDENT-ON* */
119 
120  /* call the object initializers */
121  rte_mempool_obj_iter (mp, rte_pktmbuf_init, 0);
122 
123  /* create mbuf header tempate from the first buffer in the pool */
128  (buffer_mem_start, *bp->buffers,
129  0)), sizeof (struct rte_mbuf));
130 
131  /* *INDENT-OFF* */
132  vec_foreach (bi, bp->buffers)
133  {
134  vlib_buffer_t *b;
135  b = vlib_buffer_ptr_from_index (buffer_mem_start, *bi, 0);
136  vlib_buffer_copy_template (b, &bp->buffer_template);
137  }
138  /* *INDENT-ON* */
139 
140  /* map DMA pages if at least one physical device exists */
141  if (rte_eth_dev_count_avail ())
142  {
143  uword i;
144  size_t page_sz;
145  vlib_physmem_map_t *pm;
146  int do_vfio_map = 1;
147 
148  pm = vlib_physmem_get_map (vm, bp->physmem_map_index);
149  page_sz = 1ULL << pm->log2_page_size;
150 
151  for (i = 0; i < pm->n_pages; i++)
152  {
153  char *va = ((char *) pm->base) + i * page_sz;
154  uword pa = (iova_mode == RTE_IOVA_VA) ?
155  pointer_to_uword (va) : pm->page_table[i];
156 
157  if (do_vfio_map &&
158  rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz))
159  do_vfio_map = 0;
160 
161  struct rte_mempool_memhdr *memhdr;
162  memhdr = clib_mem_alloc (sizeof (*memhdr));
163  memhdr->mp = mp;
164  memhdr->addr = va;
165  memhdr->iova = pa;
166  memhdr->len = page_sz;
167  memhdr->free_cb = 0;
168  memhdr->opaque = 0;
169 
170  STAILQ_INSERT_TAIL (&mp->mem_list, memhdr, next);
171  mp->nb_mem_chunks++;
172  }
173  }
174 
175  return 0;
176 }
177 
178 static int
179 dpdk_ops_vpp_alloc (struct rte_mempool *mp)
180 {
181  clib_warning ("");
182  return 0;
183 }
184 
185 static void
186 dpdk_ops_vpp_free (struct rte_mempool *mp)
187 {
188  clib_warning ("");
189 }
190 
191 #endif
192 
195 {
196  /* Only non-replicated packets (b->ref_count == 1) expected */
197 
198  struct rte_mbuf *mb = obj;
200  ASSERT (b->ref_count == 1);
203 }
204 
205 int
206 CLIB_MULTIARCH_FN (dpdk_ops_vpp_enqueue) (struct rte_mempool * mp,
207  void *const *obj_table, unsigned n)
208 {
209  const int batch_size = 32;
211  vlib_buffer_t bt;
212  u8 buffer_pool_index = mp->pool_id;
213  vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
214  u32 bufs[batch_size];
215  u32 n_left = n;
216  void *const *obj = obj_table;
217 
219 
220  while (n_left >= 4)
221  {
222  dpdk_ops_vpp_enqueue_one (&bt, obj[0]);
223  dpdk_ops_vpp_enqueue_one (&bt, obj[1]);
224  dpdk_ops_vpp_enqueue_one (&bt, obj[2]);
225  dpdk_ops_vpp_enqueue_one (&bt, obj[3]);
226  obj += 4;
227  n_left -= 4;
228  }
229 
230  while (n_left)
231  {
232  dpdk_ops_vpp_enqueue_one (&bt, obj[0]);
233  obj += 1;
234  n_left -= 1;
235  }
236 
237  while (n >= batch_size)
238  {
239  vlib_get_buffer_indices_with_offset (vm, (void **) obj_table, bufs,
240  batch_size,
241  sizeof (struct rte_mbuf));
242  vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size);
243  n -= batch_size;
244  obj_table += batch_size;
245  }
246 
247  if (n)
248  {
249  vlib_get_buffer_indices_with_offset (vm, (void **) obj_table, bufs,
250  n, sizeof (struct rte_mbuf));
251  vlib_buffer_pool_put (vm, buffer_pool_index, bufs, n);
252  }
253 
254  return 0;
255 }
256 
258 
261  struct rte_mempool *new, void *obj,
262  vlib_buffer_t * bt)
263 {
264  struct rte_mbuf *mb = obj;
266 
267  if (clib_atomic_sub_fetch (&b->ref_count, 1) == 0)
268  {
269  u32 bi = vlib_get_buffer_index (vm, b);
271  vlib_buffer_pool_put (vm, bt->buffer_pool_index, &bi, 1);
272  return;
273  }
274 }
275 
276 int
278  void *const *obj_table,
279  unsigned n)
280 {
282  vlib_buffer_t bt;
283  struct rte_mempool *mp;
284  mp = dpdk_mempool_by_buffer_pool_index[cmp->pool_id];
285  u8 buffer_pool_index = cmp->pool_id;
286  vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
288 
289  while (n >= 4)
290  {
291  dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[0], &bt);
292  dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[1], &bt);
293  dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[2], &bt);
294  dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[3], &bt);
295  obj_table += 4;
296  n -= 4;
297  }
298 
299  while (n)
300  {
301  dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[0], &bt);
302  obj_table += 1;
303  n -= 1;
304  }
305 
306  return 0;
307 }
308 
310 
312 dpdk_mbuf_init_from_template (struct rte_mbuf **mba, struct rte_mbuf *mt,
313  int count)
314 {
315  /* Assumptions about rte_mbuf layout */
316  STATIC_ASSERT_OFFSET_OF (struct rte_mbuf, buf_addr, 0);
317  STATIC_ASSERT_OFFSET_OF (struct rte_mbuf, buf_iova, 8);
318  STATIC_ASSERT_SIZEOF_ELT (struct rte_mbuf, buf_iova, 8);
319  STATIC_ASSERT_SIZEOF_ELT (struct rte_mbuf, buf_iova, 8);
320  STATIC_ASSERT_SIZEOF (struct rte_mbuf, 128);
321 
322  while (count--)
323  {
324  struct rte_mbuf *mb = mba[0];
325  int i;
326  /* bytes 0 .. 15 hold buf_addr and buf_iova which we need to preserve */
327  /* copy bytes 16 .. 31 */
328  *((u8x16 *) mb + 1) = *((u8x16 *) mt + 1);
329 
330  /* copy bytes 32 .. 127 */
331 #ifdef CLIB_HAVE_VEC256
332  for (i = 1; i < 4; i++)
333  *((u8x32 *) mb + i) = *((u8x32 *) mt + i);
334 #else
335  for (i = 2; i < 8; i++)
336  *((u8x16 *) mb + i) = *((u8x16 *) mt + i);
337 #endif
338  mba++;
339  }
340 }
341 
342 int
343 CLIB_MULTIARCH_FN (dpdk_ops_vpp_dequeue) (struct rte_mempool * mp,
344  void **obj_table, unsigned n)
345 {
346  const int batch_size = 32;
348  u32 bufs[batch_size], total = 0, n_alloc = 0;
349  u8 buffer_pool_index = mp->pool_id;
350  void **obj = obj_table;
351  struct rte_mbuf t = dpdk_mbuf_template_by_pool_index[buffer_pool_index];
352 
353  while (n >= batch_size)
354  {
355  n_alloc = vlib_buffer_alloc_from_pool (vm, bufs, batch_size,
356  buffer_pool_index);
357  if (n_alloc != batch_size)
358  goto alloc_fail;
359 
360  vlib_get_buffers_with_offset (vm, bufs, obj, batch_size,
361  -(i32) sizeof (struct rte_mbuf));
362  dpdk_mbuf_init_from_template ((struct rte_mbuf **) obj, &t, batch_size);
363  total += batch_size;
364  obj += batch_size;
365  n -= batch_size;
366  }
367 
368  if (n)
369  {
370  n_alloc = vlib_buffer_alloc_from_pool (vm, bufs, n, buffer_pool_index);
371 
372  if (n_alloc != n)
373  goto alloc_fail;
374 
375  vlib_get_buffers_with_offset (vm, bufs, obj, n,
376  -(i32) sizeof (struct rte_mbuf));
377  dpdk_mbuf_init_from_template ((struct rte_mbuf **) obj, &t, n);
378  }
379 
380  return 0;
381 
382 alloc_fail:
383  /* dpdk doesn't support partial alloc, so we need to return what we
384  already got */
385  if (n_alloc)
386  vlib_buffer_pool_put (vm, buffer_pool_index, bufs, n_alloc);
387  obj = obj_table;
388  while (total)
389  {
390  vlib_get_buffer_indices_with_offset (vm, obj, bufs, batch_size,
391  sizeof (struct rte_mbuf));
392  vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size);
393 
394  obj += batch_size;
395  total -= batch_size;
396  }
397  return -ENOENT;
398 }
399 
401 
402 #ifndef CLIB_MARCH_VARIANT
403 
404 static int
405 dpdk_ops_vpp_dequeue_no_cache (struct rte_mempool *mp, void **obj_table,
406  unsigned n)
407 {
408  clib_error ("bug");
409  return 0;
410 }
411 
412 static unsigned
413 dpdk_ops_vpp_get_count (const struct rte_mempool *mp)
414 {
415  clib_warning ("");
416  return 0;
417 }
418 
419 static unsigned
420 dpdk_ops_vpp_get_count_no_cache (const struct rte_mempool *mp)
421 {
422  struct rte_mempool *cmp;
424  return dpdk_ops_vpp_get_count (cmp);
425 }
426 
427 clib_error_t *
429 {
430  clib_error_t *err;
431  vlib_buffer_pool_t *bp;
432 
433  struct rte_mempool_ops ops = { };
434 
435  strncpy (ops.name, "vpp", 4);
436  ops.alloc = dpdk_ops_vpp_alloc;
437  ops.free = dpdk_ops_vpp_free;
438  ops.get_count = dpdk_ops_vpp_get_count;
441  rte_mempool_register_ops (&ops);
442 
443  strncpy (ops.name, "vpp-no-cache", 13);
444  ops.get_count = dpdk_ops_vpp_get_count_no_cache;
446  ops.dequeue = dpdk_ops_vpp_dequeue_no_cache;
447  rte_mempool_register_ops (&ops);
448 
449  /* *INDENT-OFF* */
451  if (bp->start && (err = dpdk_buffer_pool_init (vm, bp)))
452  return err;
453  /* *INDENT-ON* */
454  return 0;
455 }
456 
457 VLIB_BUFFER_SET_EXT_HDR_SIZE (sizeof (struct rte_mempool_objhdr) +
458  sizeof (struct rte_mbuf));
459 
460 #endif
461 
462 /** @endcond */
463 /*
464  * fd.io coding-style-patch-verification: ON
465  *
466  * Local Variables:
467  * eval: (c-set-style "gnu")
468  * End:
469  */
#define vlib_buffer_from_rte_mbuf(x)
Definition: buffer.h:20
#define CLIB_MARCH_FN_POINTER(fn)
Definition: cpu.h:87
STATIC_ASSERT_OFFSET_OF(vlib_buffer_t, template_end, 64)
#define clib_error(format, args...)
Definition: error.h:62
static int dpdk_ops_vpp_alloc(struct rte_mempool *mp)
Definition: buffer.c:179
static_always_inline void dpdk_mbuf_init_from_template(struct rte_mbuf **mba, struct rte_mbuf *mt, int count)
Definition: buffer.c:312
static_always_inline vlib_buffer_pool_t * vlib_get_buffer_pool(vlib_main_t *vm, u8 buffer_pool_index)
Definition: buffer_funcs.h:478
#define VLIB_BUFFER_PRE_DATA_SIZE
Definition: buffer.h:51
int i
vlib_buffer_main_t * buffer_main
Definition: main.h:152
uword * page_table
Definition: physmem.h:51
VLIB_BUFFER_SET_EXT_HDR_SIZE(sizeof(struct rte_mempool_objhdr)+sizeof(struct rte_mbuf))
static_always_inline void vlib_get_buffers_with_offset(vlib_main_t *vm, u32 *bi, void **b, int count, i32 offset)
Translate array of buffer indices into buffer pointers with offset.
Definition: buffer_funcs.h:178
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
vlib_physmem_map_t * vlib_physmem_get_map(vlib_main_t *vm, u32 index)
Definition: physmem.c:87
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:450
unsigned char u8
Definition: types.h:56
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:133
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define clib_memcpy(d, s, n)
Definition: string.h:180
#define static_always_inline
Definition: clib.h:100
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
clib_error_t * dpdk_buffer_pool_init(vlib_main_t *vm, vlib_buffer_pool_t *bp)
Definition: buffer.c:37
unsigned int u32
Definition: types.h:88
static void dpdk_ops_vpp_free(struct rte_mempool *mp)
Definition: buffer.c:186
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
Definition: buffer_funcs.h:257
static_always_inline void dpdk_ops_vpp_enqueue_one(vlib_buffer_t *bt, void *obj)
Definition: buffer.c:194
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
Definition: buffer_funcs.h:145
#define rte_mbuf_from_vlib_buffer(x)
Definition: buffer.h:19
vlib_buffer_t buffer_template
Definition: buffer.h:439
vlib_buffer_pool_t * buffer_pools
Definition: buffer.h:451
static_always_inline void vlib_get_buffer_indices_with_offset(vlib_main_t *vm, void **b, u32 *bi, uword count, i32 offset)
Translate array of buffer pointers into buffer indices with offset.
Definition: buffer_funcs.h:276
STATIC_ASSERT(VLIB_BUFFER_PRE_DATA_SIZE==RTE_PKTMBUF_HEADROOM, "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM")
u8 name[64]
Definition: memclnt.api:152
#define clib_atomic_sub_fetch(a, b)
Definition: atomics.h:31
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
#define STATIC_ASSERT_SIZEOF_ELT(d, e, s)
vlib_main_t * vm
Definition: buffer.c:323
static_always_inline void vlib_buffer_pool_put(vlib_main_t *vm, u8 buffer_pool_index, u32 *buffers, u32 n_buffers)
Definition: buffer_funcs.h:680
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
#define clib_warning(format, args...)
Definition: error.h:59
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_enqueue_no_cache(struct rte_mempool *cmp, void *const *obj_table, unsigned n)
Definition: buffer.c:277
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_enqueue(struct rte_mempool *mp, void *const *obj_table, unsigned n)
Definition: buffer.c:206
CLIB_MARCH_FN_REGISTRATION(dpdk_ops_vpp_enqueue)
#define VLIB_BUFFER_HDR_SIZE
Definition: buffer.h:194
signed int i32
Definition: types.h:77
static u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
Definition: buffer_funcs.h:523
struct rte_mbuf * dpdk_mbuf_template_by_pool_index
Definition: buffer.c:34
#define ASSERT(truth)
static_always_inline void dpdk_ops_vpp_enqueue_no_cache_one(vlib_main_t *vm, struct rte_mempool *old, struct rte_mempool *new, void *obj, vlib_buffer_t *bt)
Definition: buffer.c:260
size_t count
Definition: vapi.c:47
static void * vlib_buffer_ptr_from_index(uword buffer_mem_start, u32 buffer_index, uword offset)
Definition: buffer_funcs.h:71
static void * clib_mem_alloc(uword size)
Definition: mem.h:153
static uword pointer_to_uword(const void *p)
Definition: types.h:131
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
clib_error_t * dpdk_buffer_pools_create(vlib_main_t *vm)
Definition: buffer.c:428
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
struct rte_mempool ** dpdk_no_cache_mempool_by_buffer_pool_index
Definition: buffer.c:33
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
uword buffer_mem_start
Definition: buffer.h:449
static unsigned dpdk_ops_vpp_get_count_no_cache(const struct rte_mempool *mp)
Definition: buffer.c:420
static unsigned dpdk_ops_vpp_get_count(const struct rte_mempool *mp)
Definition: buffer.c:413
#define vec_foreach(var, vec)
Vector iterator.
static int dpdk_ops_vpp_dequeue_no_cache(struct rte_mempool *mp, void **obj_table, unsigned n)
Definition: buffer.c:405
struct rte_mempool ** dpdk_mempool_by_buffer_pool_index
Definition: buffer.c:32
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
#define STATIC_ASSERT_SIZEOF(d, s)
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
Definition: buffer.c:343
volatile u8 ref_count
Reference count for this buffer.
Definition: buffer.h:130
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
#define CLIB_MULTIARCH_FN(fn)
Definition: cpu.h:56