FD.io VPP  v21.01.1
Vector Packet Processing
cryptodev_dp_api.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/vnet.h>
22 #include <vpp/app/version.h>
23 
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
34 
35 #if CLIB_DEBUG > 0
36 #define always_inline static inline
37 #else
38 #define always_inline static inline __attribute__ ((__always_inline__))
39 #endif
40 
41 #define CRYPTODEV_NB_CRYPTO_OPS 1024
42 #define CRYPTODEV_MAX_INFLIGHT (CRYPTODEV_NB_CRYPTO_OPS - 1)
43 #define CRYPTODEV_AAD_MASK (CRYPTODEV_NB_CRYPTO_OPS - 1)
44 #define CRYPTODEV_DEQ_CACHE_SZ 32
45 #define CRYPTODEV_NB_SESSION 10240
46 #define CRYPTODEV_MAX_AAD_SIZE 16
47 #define CRYPTODEV_MAX_N_SGL 8 /**< maximum number of segments */
48 
49 /* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN */
50 #define foreach_vnet_aead_crypto_conversion \
51  _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 8) \
52  _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 12) \
53  _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 8) \
54  _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 12) \
55  _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 8) \
56  _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 12)
57 
58 /**
59  * crypto (alg, cryptodev_alg), hash (alg, digest-size)
60  **/
61 #define foreach_cryptodev_link_async_alg \
62  _ (AES_128_CBC, AES_CBC, SHA1, 12) \
63  _ (AES_192_CBC, AES_CBC, SHA1, 12) \
64  _ (AES_256_CBC, AES_CBC, SHA1, 12) \
65  _ (AES_128_CBC, AES_CBC, SHA224, 14) \
66  _ (AES_192_CBC, AES_CBC, SHA224, 14) \
67  _ (AES_256_CBC, AES_CBC, SHA224, 14) \
68  _ (AES_128_CBC, AES_CBC, SHA256, 16) \
69  _ (AES_192_CBC, AES_CBC, SHA256, 16) \
70  _ (AES_256_CBC, AES_CBC, SHA256, 16) \
71  _ (AES_128_CBC, AES_CBC, SHA384, 24) \
72  _ (AES_192_CBC, AES_CBC, SHA384, 24) \
73  _ (AES_256_CBC, AES_CBC, SHA384, 24) \
74  _ (AES_128_CBC, AES_CBC, SHA512, 32) \
75  _ (AES_192_CBC, AES_CBC, SHA512, 32) \
76  _ (AES_256_CBC, AES_CBC, SHA512, 32)
77 
78 typedef enum
79 {
84 
85 typedef struct
86 {
87  struct rte_cryptodev_sym_session *keys[CRYPTODEV_N_OP_TYPES];
89 
90 typedef struct
91 {
92  u32 dev_id;
93  u32 q_id;
95  char *desc;
97 
98 typedef struct
99 {
100  struct rte_mempool *sess_pool;
101  struct rte_mempool *sess_priv_pool;
103 
104 typedef struct
105 {
106  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
108  struct rte_crypto_dp_service_ctx *dp_service;
109  struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
110  struct rte_ring *cached_frame;
114  u16 cryptodev_id;
115  u16 cryptodev_q;
118 
119 typedef struct
120 {
121  cryptodev_numa_data_t *per_numa_data;
124  enum rte_iova_mode iova_mode;
125  cryptodev_inst_t *cryptodev_inst;
126  clib_bitmap_t *active_cdev_inst_mask;
127  clib_spinlock_t tlock;
129 
131 
132 static int
133 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
134  cryptodev_op_type_t op_type,
135  const vnet_crypto_key_t * key, u32 aad_len)
136 {
137  struct rte_crypto_aead_xform *aead_xform = &xform->aead;
138  memset (xform, 0, sizeof (*xform));
139  xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
140  xform->next = 0;
141 
142  if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
143  key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
144  key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
145  return -1;
146 
147  aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
148  aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
149  RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
150  aead_xform->aad_length = aad_len;
151  aead_xform->digest_length = 16;
152  aead_xform->iv.offset = 0;
153  aead_xform->iv.length = 12;
154  aead_xform->key.data = key->data;
155  aead_xform->key.length = vec_len (key->data);
156 
157  return 0;
158 }
159 
160 static int
161 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
162  cryptodev_op_type_t op_type,
163  const vnet_crypto_key_t * key)
164 {
165  struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
166  vnet_crypto_key_t *key_cipher, *key_auth;
167  enum rte_crypto_cipher_algorithm cipher_algo = ~0;
168  enum rte_crypto_auth_algorithm auth_algo = ~0;
169  u32 digest_len = ~0;
170 
171  key_cipher = vnet_crypto_get_key (key->index_crypto);
172  key_auth = vnet_crypto_get_key (key->index_integ);
173  if (!key_cipher || !key_auth)
174  return -1;
175 
176  if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
177  {
178  xform_cipher = xforms;
179  xform_auth = xforms + 1;
180  xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
181  xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
182  }
183  else
184  {
185  xform_cipher = xforms + 1;
186  xform_auth = xforms;
187  xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
188  xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
189  }
190 
191  xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
192  xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
193  xforms->next = xforms + 1;
194 
195  switch (key->async_alg)
196  {
197 #define _(a, b, c, d) \
198  case VNET_CRYPTO_ALG_##a##_##c##_TAG##d:\
199  cipher_algo = RTE_CRYPTO_CIPHER_##b; \
200  auth_algo = RTE_CRYPTO_AUTH_##c##_HMAC; \
201  digest_len = d; \
202  break;
203 
205 #undef _
206  default:
207  return -1;
208  }
209 
210  xform_cipher->cipher.algo = cipher_algo;
211  xform_cipher->cipher.key.data = key_cipher->data;
212  xform_cipher->cipher.key.length = vec_len (key_cipher->data);
213  xform_cipher->cipher.iv.length = 16;
214  xform_cipher->cipher.iv.offset = 0;
215 
216  xform_auth->auth.algo = auth_algo;
217  xform_auth->auth.digest_length = digest_len;
218  xform_auth->auth.key.data = key_auth->data;
219  xform_auth->auth.key.length = vec_len (key_auth->data);
220 
221  return 0;
222 }
223 
224 static int
226  struct rte_mempool *sess_priv_pool,
227  cryptodev_key_t * session_pair, u32 aad_len)
228 {
229  struct rte_crypto_sym_xform xforms_enc[2] = { {0} };
230  struct rte_crypto_sym_xform xforms_dec[2] = { {0} };
232  cryptodev_inst_t *dev_inst;
233  struct rte_cryptodev *cdev;
234  int ret;
235  uint8_t dev_id = 0;
236 
237  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
238  ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
239  else
240  ret = prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key,
241  aad_len);
242  if (ret)
243  return 0;
244 
245  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
247  else
248  prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
249 
250  vec_foreach (dev_inst, cmt->cryptodev_inst)
251  {
252  dev_id = dev_inst->dev_id;
253  cdev = rte_cryptodev_pmd_get_dev (dev_id);
254 
255  /* if the session is already configured for the driver type, avoid
256  configuring it again to increase the session data's refcnt */
257  if (session_pair->keys[0]->sess_data[cdev->driver_id].data &&
258  session_pair->keys[1]->sess_data[cdev->driver_id].data)
259  continue;
260 
261  ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[0],
262  xforms_enc, sess_priv_pool);
263  ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[1],
264  xforms_dec, sess_priv_pool);
265  if (ret < 0)
266  return ret;
267  }
268  session_pair->keys[0]->opaque_data = aad_len;
269  session_pair->keys[1]->opaque_data = aad_len;
270 
271  return 0;
272 }
273 
274 static void
275 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
276 {
277  u32 n_devs, i;
278 
279  if (sess == NULL)
280  return;
281 
282  n_devs = rte_cryptodev_count ();
283 
284  for (i = 0; i < n_devs; i++)
285  rte_cryptodev_sym_session_clear (i, sess);
286 
287  rte_cryptodev_sym_session_free (sess);
288 }
289 
290 static int
292 {
293  vnet_crypto_alg_t alg;
294  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
295  return 0;
296 
297  alg = key->alg;
298 
299 #define _(a, b, c, d, e, f) \
300  if (alg == VNET_CRYPTO_ALG_##a) \
301  return 0;
302 
304 #undef _
305  return -1;
306 }
307 
310  vnet_crypto_key_index_t idx, u32 aad_len)
311 {
313  cryptodev_numa_data_t *numa_data;
315  struct rte_mempool *sess_pool, *sess_priv_pool;
316  cryptodev_key_t *ckey = 0;
317  int ret = 0;
318 
319  if (kop == VNET_CRYPTO_KEY_OP_DEL)
320  {
321  if (idx >= vec_len (cmt->keys))
322  return;
323 
324  ckey = pool_elt_at_index (cmt->keys, idx);
325  cryptodev_session_del (ckey->keys[0]);
326  cryptodev_session_del (ckey->keys[1]);
327  ckey->keys[0] = 0;
328  ckey->keys[1] = 0;
329  pool_put (cmt->keys, ckey);
330  return;
331  }
332  else if (kop == VNET_CRYPTO_KEY_OP_MODIFY)
333  {
334  if (idx >= vec_len (cmt->keys))
335  return;
336 
337  ckey = pool_elt_at_index (cmt->keys, idx);
338 
339  cryptodev_session_del (ckey->keys[0]);
340  cryptodev_session_del (ckey->keys[1]);
341  ckey->keys[0] = 0;
342  ckey->keys[1] = 0;
343  }
344  else /* create key */
345  pool_get_zero (cmt->keys, ckey);
346 
347  /* do not create session for unsupported alg */
349  return;
350 
351  numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
352  sess_pool = numa_data->sess_pool;
353  sess_priv_pool = numa_data->sess_priv_pool;
354 
355  ckey->keys[0] = rte_cryptodev_sym_session_create (sess_pool);
356  if (!ckey->keys[0])
357  {
358  ret = -1;
359  goto clear_key;
360  }
361 
362  ckey->keys[1] = rte_cryptodev_sym_session_create (sess_pool);
363  if (!ckey->keys[1])
364  {
365  ret = -1;
366  goto clear_key;
367  }
368 
369  ret = cryptodev_session_create (key, sess_priv_pool, ckey, aad_len);
370 
371 clear_key:
372  if (ret != 0)
373  {
374  cryptodev_session_del (ckey->keys[0]);
375  cryptodev_session_del (ckey->keys[1]);
376  memset (ckey, 0, sizeof (*ckey));
377  pool_put (cmt->keys, ckey);
378  }
379 }
380 
381 /*static*/ void
384 {
385  cryptodev_sess_handler (vm, kop, idx, 8);
386 }
387 
391 {
392  u32 n_elts = f->n_elts, i;
393 
394  for (i = 0; i < n_elts; i++)
395  f->elts[i].status = s;
397 }
398 
400 cryptodev_frame_build_sgl (vlib_main_t * vm, enum rte_iova_mode iova_mode,
401  struct rte_crypto_vec *data_vec,
402  u16 * n_seg, vlib_buffer_t * b, u32 size)
403 {
404  struct rte_crypto_vec *vec = data_vec + 1;
406  return -1;
407 
408  while ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && size)
409  {
410  u32 len;
411  b = vlib_get_buffer (vm, b->next_buffer);
412  len = clib_min (b->current_length, size);
413  vec->base = (void *) vlib_buffer_get_current (b);
414  if (iova_mode == RTE_IOVA_VA)
415  vec->iova = pointer_to_uword (vec->base);
416  else
417  vec->iova = vlib_buffer_get_current_pa (vm, b);
418  vec->len = len;
419  size -= len;
420  vec++;
421  *n_seg += 1;
422  }
423 
424  if (size)
425  return -1;
426 
427  return 0;
428 }
429 
432  u32 * max_end)
433 {
434  union rte_crypto_sym_ofs ofs;
435  u32 crypto_end = fe->crypto_start_offset + fe->crypto_total_length;
436  u32 integ_end = fe->integ_start_offset + fe->crypto_total_length +
437  fe->integ_length_adj;
438 
439  *min_ofs = clib_min (fe->crypto_start_offset, fe->integ_start_offset);
440  *max_end = clib_max (crypto_end, integ_end);
441 
442  ofs.ofs.cipher.head = fe->crypto_start_offset - *min_ofs;
443  ofs.ofs.cipher.tail = *max_end - crypto_end;
444  ofs.ofs.auth.head = fe->integ_start_offset - *min_ofs;
445  ofs.ofs.auth.tail = *max_end - integ_end;
446 
447  return ofs.raw;
448 }
449 
453  cryptodev_op_type_t op_type)
454 {
458  struct rte_crypto_vec *vec;
459  struct rte_crypto_data iv_vec, digest_vec;
460  vlib_buffer_t **b;
461  u32 n_elts;
463  u32 last_key_index;
464  union rte_crypto_sym_ofs cofs;
465  i16 min_ofs;
466  u32 max_end;
467 
468  n_elts = frame->n_elts;
469 
470  if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
471  {
473  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
474  return -1;
475  }
476 
477  vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
478 
479  vec = cet->vec;
480  b = cet->b;
481 
482  fe = frame->elts;
483 
484  key = pool_elt_at_index (cmt->keys, fe->key_index);
485  last_key_index = fe->key_index;
486 
487  if (PREDICT_FALSE
488  (rte_cryptodev_dp_configure_service
489  (cet->cryptodev_id, cet->cryptodev_q, RTE_CRYPTO_DP_SYM_CHAIN,
490  RTE_CRYPTO_OP_WITH_SESSION,
491  (union rte_cryptodev_session_ctx) key->keys[op_type], cet->dp_service,
492  0) < 0))
493  {
495  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
496  return -1;
497  }
498 
499  cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end);
500 
501  while (n_elts)
502  {
503  u16 n_seg = 1;
504  int status;
505 
506  if (n_elts > 2)
507  {
508  CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
509  CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
510  vlib_prefetch_buffer_header (b[1], LOAD);
511  vlib_prefetch_buffer_header (b[2], LOAD);
512  }
513 
514  if (PREDICT_FALSE (last_key_index != fe->key_index))
515  {
516  cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end);
517 
518  key = pool_elt_at_index (cmt->keys, fe->key_index);
519  last_key_index = fe->key_index;
520 
521  if (PREDICT_FALSE
522  (rte_cryptodev_dp_configure_service
523  (cet->cryptodev_id, cet->cryptodev_q, RTE_CRYPTO_DP_SYM_CHAIN,
524  RTE_CRYPTO_OP_WITH_SESSION,
525  (union rte_cryptodev_session_ctx) key->keys[op_type],
526  cet->dp_service, 1) < 0))
527  {
529  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
530  return -1;
531  }
532  }
533 
534  vec->len = max_end - min_ofs;
535  if (cmt->iova_mode == RTE_IOVA_VA)
536  {
537  vec->base = (void *) (b[0]->data + min_ofs);
538  vec->iova = pointer_to_uword (b[0]->data) + min_ofs;
539  iv_vec.base = (void *) fe->iv;
540  iv_vec.iova = pointer_to_uword (fe->iv);
541  digest_vec.base = (void *) fe->tag;
542  digest_vec.iova = pointer_to_uword (fe->tag);
543  }
544  else
545  {
546  vec->base = (void *) (b[0]->data + min_ofs);
547  vec->iova = vlib_buffer_get_pa (vm, b[0]) + min_ofs;
548  iv_vec.base = (void *) fe->iv;
549  iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
550  digest_vec.base = (void *) fe->tag;
551  digest_vec.iova = vlib_physmem_get_pa (vm, fe->digest);
552  }
553 
555  {
556  vec->len = b[0]->current_data + b[0]->current_length - min_ofs;
558  (vm, cmt->iova_mode, vec, &n_seg, b[0],
559  max_end - min_ofs - vec->len) < 0)
560  {
562  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
563  return -1;
564  }
565  }
566 
567  status = rte_cryptodev_dp_submit_single_job (cet->dp_service,
568  vec, n_seg, cofs, &iv_vec,
569  &digest_vec, 0,
570  (void *) frame);
571  if (status < 0)
572  {
574  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
575  return -1;
576  }
577 
578  b++;
579  fe++;
580  n_elts--;
581  }
582 
583  rte_cryptodev_dp_submit_done (cet->dp_service, frame->n_elts);
584  cet->inflight += frame->n_elts;
585 
586  return 0;
587 }
588 
592  cryptodev_op_type_t op_type, u8 aad_len)
593 {
597  vlib_buffer_t **b;
598  u32 n_elts;
600  u32 last_key_index;
601  union rte_crypto_sym_ofs cofs;
602  struct rte_crypto_vec *vec;
603  struct rte_crypto_data iv_vec, digest_vec, aad_vec;
604  u8 sess_aad_len;
605 
606  n_elts = frame->n_elts;
607 
608  if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
609  {
611  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
612  return -1;
613  }
614 
615  vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
616 
617  vec = cet->vec;
618  fe = frame->elts;
619  b = cet->b;
620 
621  cofs.raw = 0;
622 
623  key = pool_elt_at_index (cmt->keys, fe->key_index);
624  last_key_index = fe->key_index;
625  sess_aad_len = (u8) key->keys[op_type]->opaque_data;
626  if (PREDICT_FALSE (sess_aad_len != aad_len))
628  fe->key_index, aad_len);
629 
630  if (PREDICT_FALSE
631  (rte_cryptodev_dp_configure_service
632  (cet->cryptodev_id, cet->cryptodev_q, RTE_CRYPTO_DP_SYM_AEAD,
633  RTE_CRYPTO_OP_WITH_SESSION,
634  (union rte_cryptodev_session_ctx) key->keys[op_type], cet->dp_service,
635  0) < 0))
636  {
638  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
639  return -1;
640  }
641 
642  while (n_elts)
643  {
644  u32 aad_offset = ((cet->aad_index++) & CRYPTODEV_AAD_MASK) << 4;
645  int status;
646  u16 n_seg = 1;
647 
648  if (n_elts > 1)
649  {
650  CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
651  vlib_prefetch_buffer_header (b[1], LOAD);
652  }
653 
654  if (last_key_index != fe->key_index)
655  {
656  key = pool_elt_at_index (cmt->keys, fe->key_index);
657  sess_aad_len = (u8) key->keys[op_type]->opaque_data;
658  if (PREDICT_FALSE (sess_aad_len != aad_len))
659  {
661  fe->key_index, aad_len);
662  }
663  last_key_index = fe->key_index;
664 
665  if (PREDICT_FALSE
666  (rte_cryptodev_dp_configure_service
667  (cet->cryptodev_id, cet->cryptodev_q, RTE_CRYPTO_DP_SYM_AEAD,
668  RTE_CRYPTO_OP_WITH_SESSION,
669  (union rte_cryptodev_session_ctx) key->keys[op_type],
670  cet->dp_service, 1) < 0))
671  {
673  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
674  return -1;
675  }
676  }
677 
678  if (cmt->iova_mode == RTE_IOVA_VA)
679  {
680  vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
681  vec[0].iova = pointer_to_uword (vec[0].base);
682  vec[0].len = fe->crypto_total_length;
683  iv_vec.base = (void *) fe->iv;
684  iv_vec.iova = pointer_to_uword (fe->iv);
685  digest_vec.base = (void *) fe->tag;
686  digest_vec.iova = pointer_to_uword (fe->tag);
687  aad_vec.base = (void *) (cet->aad_buf + aad_offset);
688  aad_vec.iova = cet->aad_phy_addr + aad_offset;
689  }
690  else
691  {
692  vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
693  vec[0].iova =
694  vlib_buffer_get_pa (vm, b[0]) + fe->crypto_start_offset;
695  vec[0].len = fe->crypto_total_length;
696  iv_vec.base = (void *) fe->iv;
697  iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
698  aad_vec.base = (void *) (cet->aad_buf + aad_offset);
699  aad_vec.iova = cet->aad_phy_addr + aad_offset;
700  digest_vec.base = (void *) fe->tag;
701  digest_vec.iova = vlib_physmem_get_pa (vm, fe->tag);
702  }
703 
704  if (aad_len == 8)
705  *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
706  else
707  {
708  /* aad_len == 12 */
709  *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
710  *(u32 *) (cet->aad_buf + aad_offset + 8) = *(u32 *) (fe->aad + 8);
711  }
712 
714  {
715  vec[0].len = b[0]->current_data +
718  (vm, cmt->iova_mode, vec, &n_seg, b[0],
719  fe->crypto_total_length - vec[0].len) < 0)
720  {
722  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
723  return -1;
724  }
725  }
726 
727  status =
728  rte_cryptodev_dp_submit_single_job (cet->dp_service, vec, n_seg, cofs,
729  &iv_vec, &digest_vec, &aad_vec,
730  (void *) frame);
731  if (PREDICT_FALSE (status < 0))
732  {
734  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
735  return -1;
736  }
737  fe++;
738  b++;
739  n_elts--;
740  }
741 
742  rte_cryptodev_dp_submit_done (cet->dp_service, frame->n_elts);
743  cet->inflight += frame->n_elts;
744 
745  return 0;
746 }
747 
748 static u32
750 {
752  return f->n_elts;
753 }
754 
755 static void
756 cryptodev_post_dequeue (void *frame, u32 index, u8 is_op_success)
757 {
759 
760  f->elts[index].status = is_op_success ? VNET_CRYPTO_OP_STATUS_COMPLETED :
761  VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
762 }
763 
764 #define GET_RING_OBJ(r, pos, f) do { \
765  vnet_crypto_async_frame_t **ring = (void *)&r[1]; \
766  f = ring[(r->cons.head + pos) & r->mask]; \
767 } while (0)
768 
770 cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
771  u32 * enqueue_thread_idx)
772 {
775  vnet_crypto_async_frame_t *frame, *frame_ret = 0;
776  u32 n_deq, n_success;
777  u32 n_cached_frame = rte_ring_count (cet->cached_frame), n_room_left;
778  u8 no_job_to_deq = 0;
779  u16 inflight = cet->inflight;
780 
781  n_room_left = CRYPTODEV_DEQ_CACHE_SZ - n_cached_frame - 1;
782 
783  if (n_cached_frame)
784  {
785  u32 i;
786  for (i = 0; i < n_cached_frame; i++)
787  {
789  void *f_ret;
790  u8 n_left, err, j;
791 
792  GET_RING_OBJ (cet->cached_frame, i, f);
793 
794  if (i < n_cached_frame - 2)
795  {
796  vnet_crypto_async_frame_t *f1, *f2;
797  GET_RING_OBJ (cet->cached_frame, i + 1, f1);
798  GET_RING_OBJ (cet->cached_frame, i + 2, f2);
801  }
802 
803  n_left = f->state & 0x7f;
804  err = f->state & 0x80;
805 
806  for (j = f->n_elts - n_left; j < f->n_elts && inflight; j++)
807  {
808  int ret =
809  rte_cryptodev_dp_sym_dequeue_single_job (cet->dp_service,
810  &f_ret);
811  if (ret < 0)
812  break;
813  f->elts[j].status = ret == 1 ? VNET_CRYPTO_OP_STATUS_COMPLETED :
814  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
815  err |= ret << 7;
816  inflight--;
817  }
818 
819  if (j == f->n_elts)
820  {
821  if (i == 0)
822  {
823  frame_ret = f;
826  }
827  else
828  {
829  f->state = f->n_elts - j;
830  f->state |= err;
831  }
832  if (inflight)
833  continue;
834  }
835 
836  /* to here f is not completed dequeued and no more job can be
837  * dequeued
838  */
839  f->state = f->n_elts - j;
840  f->state |= err;
841  no_job_to_deq = 1;
842  break;
843  }
844 
845  if (frame_ret)
846  {
847  rte_ring_sc_dequeue (cet->cached_frame, (void **) &frame_ret);
848  n_room_left++;
849  }
850  }
851 
852  /* no point to dequeue further */
853  if (!inflight || no_job_to_deq || !n_room_left)
854  goto end_deq;
855 
856  n_deq = rte_cryptodev_dp_sym_dequeue (cet->dp_service,
859  (void **) &frame, 0, &n_success);
860  if (!n_deq)
861  goto end_deq;
862 
863  inflight -= n_deq;
864  no_job_to_deq = n_deq < frame->n_elts;
865  /* we have to cache the frame */
866  if (frame_ret || n_cached_frame || no_job_to_deq)
867  {
868  frame->state = frame->n_elts - n_deq;
869  frame->state |= ((n_success < n_deq) << 7);
870  rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
871  n_room_left--;
872  }
873  else
874  {
875  frame->state = n_success == frame->n_elts ?
877  frame_ret = frame;
878  }
879 
880  /* see if we can dequeue more */
881  while (inflight && n_room_left && !no_job_to_deq)
882  {
883  n_deq = rte_cryptodev_dp_sym_dequeue (cet->dp_service,
886  (void **) &frame, 0, &n_success);
887  if (!n_deq)
888  break;
889  inflight -= n_deq;
890  no_job_to_deq = n_deq < frame->n_elts;
891  frame->state = frame->n_elts - n_deq;
892  frame->state |= ((n_success < n_deq) << 7);
893  rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
894  n_room_left--;
895  }
896 
897 end_deq:
898  if (inflight < cet->inflight)
899  {
900  rte_cryptodev_dp_dequeue_done (cet->dp_service,
901  cet->inflight - inflight);
902  cet->inflight = inflight;
903  }
904 
905  if (frame_ret)
906  {
907  *nb_elts_processed = frame_ret->n_elts;
908  *enqueue_thread_idx = frame_ret->enqueue_thread_index;
909  }
910 
911  return frame_ret;
912 }
913 
914 /* *INDENT-OFF* */
918 {
919  return cryptodev_frame_gcm_enqueue (vm, frame,
921 }
925 {
926  return cryptodev_frame_gcm_enqueue (vm, frame,
928 }
929 
933 {
934  return cryptodev_frame_gcm_enqueue (vm, frame,
936 }
940 {
941  return cryptodev_frame_gcm_enqueue (vm, frame,
943 }
944 
948 {
949  return cryptodev_frame_linked_algs_enqueue (vm, frame,
951 }
952 
956 {
957  return cryptodev_frame_linked_algs_enqueue (vm, frame,
959 }
960 
961 typedef enum
962 {
966 
967 /**
968  * assign a cryptodev resource to a worker.
969  * @param cet: the worker thread data
970  * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
971  * @param op: the assignment method.
972  * @return: 0 if successfully, negative number otherwise.
973  **/
976  u32 cryptodev_inst_index,
978 {
980  cryptodev_inst_t *cinst = 0;
981  uword idx;
982 
983  /* assign resource is only allowed when no inflight op is in the queue */
984  if (cet->inflight)
985  return -EBUSY;
986 
987  switch (op)
988  {
991  vec_len (cmt->cryptodev_inst))
992  return -1;
993 
994  clib_spinlock_lock (&cmt->tlock);
996  clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
997  cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
998  cet->cryptodev_id = cinst->dev_id;
999  cet->cryptodev_q = cinst->q_id;
1000  cet->dp_service = (struct rte_crypto_dp_service_ctx *)
1001  cinst->dp_service_buffer;
1002  clib_spinlock_unlock (&cmt->tlock);
1003  break;
1005  /* assigning a used cryptodev resource is not allowed */
1006  if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
1007  == 1)
1008  return -EBUSY;
1009  vec_foreach_index (idx, cmt->cryptodev_inst)
1010  {
1011  cinst = cmt->cryptodev_inst + idx;
1012  if (cinst->dev_id == cet->cryptodev_id &&
1013  cinst->q_id == cet->cryptodev_q)
1014  break;
1015  }
1016  /* invalid existing worker resource assignment */
1017  if (idx == vec_len (cmt->cryptodev_inst))
1018  return -EINVAL;
1019  clib_spinlock_lock (&cmt->tlock);
1022  cryptodev_inst_index, 1);
1023  cinst = cmt->cryptodev_inst + cryptodev_inst_index;
1024  cet->cryptodev_id = cinst->dev_id;
1025  cet->cryptodev_q = cinst->q_id;
1026  cet->dp_service = (struct rte_crypto_dp_service_ctx *)
1027  cinst->dp_service_buffer;
1028  clib_spinlock_unlock (&cmt->tlock);
1029  break;
1030  default:
1031  return -EINVAL;
1032  }
1033  return 0;
1034 }
1035 
1036 static u8 *
1037 format_cryptodev_inst (u8 * s, va_list * args)
1038 {
1040  u32 inst = va_arg (*args, u32);
1041  cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
1042  u32 thread_index = 0;
1043  struct rte_cryptodev_info info;
1044 
1045  rte_cryptodev_info_get (cit->dev_id, &info);
1046  s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
1047 
1048  vec_foreach_index (thread_index, cmt->per_thread_data)
1049  {
1050  cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
1051  if (vlib_num_workers () > 0 && thread_index == 0)
1052  continue;
1053 
1054  if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
1055  {
1056  s = format (s, "%u (%v)\n", thread_index,
1057  vlib_worker_threads[thread_index].name);
1058  break;
1059  }
1060  }
1061 
1062  if (thread_index == vec_len (cmt->per_thread_data))
1063  s = format (s, "%s\n", "free");
1064 
1065  return s;
1066 }
1067 
1068 static clib_error_t *
1070  vlib_cli_command_t * cmd)
1071 {
1073  u32 inst;
1074 
1075  vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
1076  "Assigned-to");
1077  if (vec_len (cmt->cryptodev_inst) == 0)
1078  {
1079  vlib_cli_output (vm, "(nil)\n");
1080  return 0;
1081  }
1082 
1083  vec_foreach_index (inst, cmt->cryptodev_inst)
1084  vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
1085 
1086  return 0;
1087 }
1088 
1089 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
1090  .path = "show cryptodev assignment",
1091  .short_help = "show cryptodev assignment",
1092  .function = cryptodev_show_assignment_fn,
1093 };
1094 
1095 static clib_error_t *
1097  vlib_cli_command_t * cmd)
1098 {
1101  unformat_input_t _line_input, *line_input = &_line_input;
1102  u32 thread_index, inst_index;
1103  u32 thread_present = 0, inst_present = 0;
1104  clib_error_t *error = 0;
1105  int ret;
1106 
1107  /* Get a line of input. */
1108  if (!unformat_user (input, unformat_line_input, line_input))
1109  return 0;
1110 
1111  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
1112  {
1113  if (unformat (line_input, "thread %u", &thread_index))
1114  thread_present = 1;
1115  else if (unformat (line_input, "resource %u", &inst_index))
1116  inst_present = 1;
1117  else
1118  {
1119  error = clib_error_return (0, "unknown input `%U'",
1120  format_unformat_error, line_input);
1121  return error;
1122  }
1123  }
1124 
1125  if (!thread_present || !inst_present)
1126  {
1127  error = clib_error_return (0, "mandatory argument(s) missing");
1128  return error;
1129  }
1130 
1131  if (thread_index == 0 && vlib_num_workers () > 0)
1132  {
1133  error =
1134  clib_error_return (0, "assign crypto resource for master thread");
1135  return error;
1136  }
1137 
1138  if (thread_index > vec_len (cmt->per_thread_data) ||
1139  inst_index > vec_len (cmt->cryptodev_inst))
1140  {
1141  error = clib_error_return (0, "wrong thread id or resource id");
1142  return error;
1143  }
1144 
1145  cet = cmt->per_thread_data + thread_index;
1146  ret = cryptodev_assign_resource (cet, inst_index,
1148  if (ret)
1149  {
1150  error = clib_error_return (0, "cryptodev_assign_resource returned %i",
1151  ret);
1152  return error;
1153  }
1154 
1155  return 0;
1156 }
1157 
1158 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
1159  .path = "set cryptodev assignment",
1160  .short_help = "set cryptodev assignment thread <thread_index> "
1161  "resource <inst_index>",
1162  .function = cryptodev_set_assignment_fn,
1163 };
1164 
1165 static int
1167 {
1168  const struct rte_cryptodev_symmetric_capability *cap;
1169  struct rte_cryptodev_sym_capability_idx cap_idx;
1170 
1171 #define _(a, b, c, d, e, f) \
1172  cap_idx.type = RTE_CRYPTO_SYM_XFORM_##b; \
1173  cap_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
1174  cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1175  if (!cap) \
1176  return -RTE_CRYPTO_##b##_##c; \
1177  else \
1178  { \
1179  if (cap->aead.digest_size.min > e || cap->aead.digest_size.max < e) \
1180  return -RTE_CRYPTO_##b##_##c; \
1181  if (cap->aead.aad_size.min > f || cap->aead.aad_size.max < f) \
1182  return -RTE_CRYPTO_##b##_##c; \
1183  if (cap->aead.iv_size.min > d || cap->aead.iv_size.max < d) \
1184  return -RTE_CRYPTO_##b##_##c; \
1185  }
1186 
1188 #undef _
1189 
1190 #define _(a, b, c, d) \
1191  cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
1192  cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
1193  cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1194  if (!cap) \
1195  return -RTE_CRYPTO_CIPHER_##b; \
1196  cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
1197  cap_idx.algo.auth = RTE_CRYPTO_AUTH_##c##_HMAC; \
1198  cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1199  if (!cap) \
1200  return -RTE_CRYPTO_AUTH_##c;
1201 
1203 #undef _
1204  return 0;
1205 }
1206 
1207 static u32
1209 {
1210  struct rte_cryptodev_info info;
1211  u32 n_cryptodev = rte_cryptodev_count ();
1212  u32 i, q_count = 0;
1213 
1214  for (i = 0; i < n_cryptodev; i++)
1215  {
1216  rte_cryptodev_info_get (i, &info);
1217  if (rte_cryptodev_socket_id (i) != numa)
1218  {
1219  clib_warning ("DPDK crypto resource %s is in different numa node "
1220  "as %u, ignored", info.device->name, numa);
1221  continue;
1222  }
1223  q_count += info.max_nb_queue_pairs;
1224  }
1225 
1226  return q_count;
1227 }
1228 
1229 static int
1231 {
1232  struct rte_cryptodev_info info;
1233  struct rte_cryptodev *cdev;
1236  vm->numa_node);
1237  u32 dp_size = 0;
1238  u32 i;
1239  int ret;
1240 
1241  cdev = rte_cryptodev_pmd_get_dev (cryptodev_id);
1242  rte_cryptodev_info_get (cryptodev_id, &info);
1243 
1244  if (!(info.feature_flags & RTE_CRYPTODEV_FF_DATA_PLANE_SERVICE))
1245  return -1;
1246 
1247  ret = check_cryptodev_alg_support (cryptodev_id);
1248  if (ret != 0)
1249  return ret;
1250 
1251 
1252 
1253  /** If the device is already started, we reuse it, otherwise configure
1254  * both the device and queue pair.
1255  **/
1256  if (!cdev->data->dev_started)
1257  {
1258  struct rte_cryptodev_config cfg;
1259 
1260  cfg.socket_id = vm->numa_node;
1261  cfg.nb_queue_pairs = info.max_nb_queue_pairs;
1262 
1263  rte_cryptodev_configure (cryptodev_id, &cfg);
1264 
1265  for (i = 0; i < info.max_nb_queue_pairs; i++)
1266  {
1267  struct rte_cryptodev_qp_conf qp_cfg;
1268 
1269  qp_cfg.mp_session = numa_data->sess_pool;
1270  qp_cfg.mp_session_private = numa_data->sess_priv_pool;
1271  qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
1272 
1273  ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
1274  vm->numa_node);
1275  if (ret)
1276  break;
1277  }
1278  if (i != info.max_nb_queue_pairs)
1279  return -1;
1280 
1281  /* start the device */
1282  rte_cryptodev_start (i);
1283  }
1284 
1285  ret = rte_cryptodev_get_dp_service_ctx_data_size (cryptodev_id);
1286  if (ret < 0)
1287  return -1;
1288  dp_size = ret;
1289 
1290  for (i = 0; i < info.max_nb_queue_pairs; i++)
1291  {
1292  cryptodev_inst_t *cdev_inst;
1293  vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
1294  cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
1295  cdev_inst->dev_id = cryptodev_id;
1296  cdev_inst->q_id = i;
1297  vec_validate_aligned(cdev_inst->dp_service_buffer, dp_size, 8);
1298  snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
1299  "%s_q%u", info.device->name, i);
1300  }
1301 
1302  return 0;
1303 }
1304 
1305 static int
1306 cryptodev_cmp (void *v1, void *v2)
1307 {
1308  cryptodev_inst_t *a1 = v1;
1309  cryptodev_inst_t *a2 = v2;
1310 
1311  if (a1->q_id > a2->q_id)
1312  return 1;
1313  if (a1->q_id < a2->q_id)
1314  return -1;
1315  return 0;
1316 }
1317 
1318 static int
1320 {
1322  u32 n_queues = cryptodev_count_queue (vm->numa_node);
1323  u32 i;
1324  int ret;
1325 
1326  if (n_queues < n_workers)
1327  return -1;
1328 
1329  for (i = 0; i < rte_cryptodev_count (); i++)
1330  {
1331  ret = cryptodev_configure (vm, i);
1332  if (ret)
1333  continue;
1334  }
1335 
1337 
1338  /* if there is not enough device stop cryptodev */
1339  if (vec_len (cmt->cryptodev_inst) < n_workers)
1340  return -1;
1341 
1342  return 0;
1343 }
1344 
1345 static int
1347 {
1348  u32 sess_data_sz = 0, i;
1349 
1350  if (rte_cryptodev_count () == 0)
1351  return -1;
1352 
1353  for (i = 0; i < rte_cryptodev_count (); i++)
1354  {
1355  u32 dev_sess_sz = rte_cryptodev_sym_get_private_session_size (i);
1356 
1357  sess_data_sz = dev_sess_sz > sess_data_sz ? dev_sess_sz : sess_data_sz;
1358  }
1359 
1360  return sess_data_sz;
1361 }
1362 
1363 static void
1365 {
1367  cryptodev_numa_data_t *numa_data;
1369 
1370  vec_validate (cmt->per_numa_data, vm->numa_node);
1371  numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
1372 
1373  if (numa_data->sess_pool)
1374  rte_mempool_free (numa_data->sess_pool);
1375  if (numa_data->sess_priv_pool)
1376  rte_mempool_free (numa_data->sess_priv_pool);
1377 
1378  vec_foreach (ptd, cmt->per_thread_data)
1379  {
1380  if (ptd->aad_buf)
1381  rte_free (ptd->aad_buf);
1382  if (ptd->cached_frame)
1383  rte_ring_free (ptd->cached_frame);
1384  }
1385 }
1386 
1387 clib_error_t *
1389 {
1393  cryptodev_numa_data_t *numa_data;
1394  struct rte_mempool *mp;
1395  u32 skip_master = vlib_num_workers () > 0;
1396  u32 n_workers = tm->n_vlib_mains - skip_master;
1397  u32 numa = vm->numa_node;
1398  i32 sess_sz;
1399  u32 eidx;
1400  u32 i;
1401  u8 *name = 0;
1403 
1404  cmt->iova_mode = rte_eal_iova_mode ();
1405 
1406  sess_sz = cryptodev_get_session_sz(vm, n_workers);
1407  if (sess_sz < 0)
1408  {
1409  error = clib_error_return (0, "Not enough cryptodevs");
1410  return error;
1411  }
1412 
1413  vec_validate (cmt->per_numa_data, vm->numa_node);
1414  numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
1415 
1416  /* create session pool for the numa node */
1417  name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
1418  mp = rte_cryptodev_sym_session_pool_create ((char *) name,
1420  0, 0, 0, numa);
1421  if (!mp)
1422  {
1423  error = clib_error_return (0, "Not enough memory for mp %s", name);
1424  goto err_handling;
1425  }
1426  vec_free (name);
1427 
1428  numa_data->sess_pool = mp;
1429 
1430  /* create session private pool for the numa node */
1431  name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
1432  mp = rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, 0,
1433  0, NULL, NULL, NULL, NULL, numa, 0);
1434  if (!mp)
1435  {
1436  error = clib_error_return (0, "Not enough memory for mp %s", name);
1437  vec_free (name);
1438  goto err_handling;
1439  }
1440 
1441  vec_free (name);
1442 
1443  numa_data->sess_priv_pool = mp;
1444 
1445  /* probe all cryptodev devices and get queue info */
1446  if (cryptodev_probe (vm, n_workers) < 0)
1447  {
1448  error = clib_error_return (0, "Failed to configure cryptodev");
1449  goto err_handling;
1450  }
1451 
1453  clib_spinlock_init (&cmt->tlock);
1454 
1457  for (i = skip_master; i < tm->n_vlib_mains; i++)
1458  {
1459  ptd = cmt->per_thread_data + i;
1461  ptd->aad_buf = rte_zmalloc_socket (0, CRYPTODEV_NB_CRYPTO_OPS *
1464  numa);
1465  if (ptd->aad_buf == 0)
1466  {
1467  error = clib_error_return (0, "Failed to alloc aad buf");
1468  goto err_handling;
1469  }
1470 
1471  ptd->aad_phy_addr = rte_malloc_virt2iova (ptd->aad_buf);
1472 
1473  name = format (0, "cache_frame_ring_%u%u", numa, i);
1474  ptd->cached_frame = rte_ring_create ((char *)name,
1475  CRYPTODEV_DEQ_CACHE_SZ, numa,
1476  RING_F_SC_DEQ | RING_F_SP_ENQ);
1477 
1478  if (ptd->cached_frame == 0)
1479  {
1480  error = clib_error_return (0, "Failed to frame ring");
1481  goto err_handling;
1482  }
1483  vec_free (name);
1484  }
1485 
1486  /* register handler */
1487  eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 79,
1488  "DPDK Cryptodev Engine");
1489 
1490 #define _(a, b, c, d, e, f) \
1491  vnet_crypto_register_async_handler \
1492  (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
1493  cryptodev_enqueue_gcm_aad_##f##_enc,\
1494  cryptodev_frame_dequeue); \
1495  vnet_crypto_register_async_handler \
1496  (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
1497  cryptodev_enqueue_gcm_aad_##f##_dec, \
1498  cryptodev_frame_dequeue);
1499 
1501 #undef _
1502 
1503 #define _(a, b, c, d) \
1504  vnet_crypto_register_async_handler \
1505  (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_ENC, \
1506  cryptodev_enqueue_linked_alg_enc, \
1507  cryptodev_frame_dequeue); \
1508  vnet_crypto_register_async_handler \
1509  (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_DEC, \
1510  cryptodev_enqueue_linked_alg_dec, \
1511  cryptodev_frame_dequeue);
1512 
1514 #undef _
1515 
1517 
1518  return 0;
1519 
1520 err_handling:
1522 
1523  return error;
1524 }
1525 /* *INDENT-On* */
1526 
1527 /*
1528  * fd.io coding-style-patch-verification: ON
1529  *
1530  * Local Variables:
1531  * eval: (c-set-style "gnu")
1532  * End:
1533  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:509
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
static uword vlib_buffer_get_current_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:463
#define vec_foreach_index(var, v)
Iterate over vector indices.
enum rte_iova_mode iova_mode
Definition: cryptodev.c:143
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
Definition: cache.h:60
#define clib_min(x, y)
Definition: clib.h:328
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:121
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:82
cryptodev_op_type_t
Definition: cryptodev.c:102
#define VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED
Definition: crypto.h:323
cryptodev_resource_assign_op_t
Definition: cryptodev.c:823
#define VNET_CRYPTO_KEY_TYPE_LINK
Definition: crypto.h:192
#define pool_get_zero(P, E)
Allocate an object E from a pool P and zero it.
Definition: pool.h:254
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:457
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
unsigned long u64
Definition: types.h:89
static_always_inline int cryptodev_enqueue_gcm_aad_8_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
Definition: crypto.h:239
#define EINVAL
Definition: string.h:93
u32 thread_index
Definition: main.h:250
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:630
static uword * clib_bitmap_set(uword *ai, uword i, uword value)
Sets the ith bit of a bitmap to new_value Removes trailing zeros from the bitmap. ...
Definition: bitmap.h:167
uword unformat_user(unformat_input_t *input, unformat_function_t *func,...)
Definition: unformat.c:989
vlib_main_t * vm
Definition: in2out_ed.c:1580
u32 numa_node
Definition: main.h:252
struct rte_mempool * sess_pool
Definition: cryptodev.c:124
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:520
static_always_inline int cryptodev_assign_resource(cryptodev_engine_thread_t *cet, u32 cryptodev_inst_index, cryptodev_resource_assign_op_t op)
assign a cryptodev resource to a worker.
static_always_inline u64 compute_ofs_linked_alg(vnet_crypto_async_frame_elt_t *fe, i16 *min_ofs, u32 *max_end)
unsigned char u8
Definition: types.h:56
cryptodev_inst_t * cryptodev_inst
Definition: cryptodev.c:144
u8 data[128]
Definition: ipsec_types.api:90
static uword clib_bitmap_set_no_check(uword *a, uword i, uword new_value)
Sets the ith bit of a bitmap to new_value.
Definition: bitmap.h:141
static_always_inline int cryptodev_frame_linked_algs_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type)
static int check_cryptodev_alg_support(u32 dev_id)
static_always_inline int cryptodev_frame_gcm_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type, u8 aad_len)
void vnet_crypto_register_key_handler(vlib_main_t *vm, u32 engine_index, vnet_crypto_key_handler_t *key_handler)
Definition: crypto.c:307
#define static_always_inline
Definition: clib.h:109
cryptodev_op_type_t
vnet_crypto_key_op_t
Definition: crypto.h:108
#define CRYPTODEV_DEQ_CACHE_SZ
#define vec_new(T, N)
Create new vector of given type and length (unspecified alignment, no header).
Definition: vec.h:350
static_always_inline int cryptodev_enqueue_linked_alg_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
static u32 cryptodev_get_frame_n_elts(void *frame)
description fragment has unexpected format
Definition: map.api:433
static_always_inline void cryptodev_mark_frame_err_status(vnet_crypto_async_frame_t *f, vnet_crypto_op_status_t s)
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:207
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
const cJSON *const b
Definition: cJSON.h:255
unsigned int u32
Definition: types.h:88
#define CRYPTODEV_MAX_AAD_SIZE
vnet_crypto_alg_t
Definition: crypto.h:124
#define VNET_CRYPTO_FRAME_SIZE
Definition: crypto.h:21
struct rte_crypto_dp_service_ctx * dp_service
unformat_function_t unformat_line_input
Definition: format.h:282
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:65
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
Definition: cJSON.c:84
vlib_worker_thread_t * vlib_worker_threads
Definition: threads.c:34
static_always_inline int cryptodev_frame_build_sgl(vlib_main_t *vm, enum rte_iova_mode iova_mode, struct rte_crypto_vec *data_vec, u16 *n_seg, vlib_buffer_t *b, u32 size)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:546
#define CRYPTODEV_NB_SESSION
clib_spinlock_t tlock
Definition: cryptodev.c:146
clib_bitmap_t * active_cdev_inst_mask
Definition: cryptodev.c:145
static int cryptodev_session_create(vnet_crypto_key_t *const key, struct rte_mempool *sess_priv_pool, cryptodev_key_t *session_pair, u32 aad_len)
static int prepare_linked_xform(struct rte_crypto_sym_xform *xforms, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key)
#define GET_RING_OBJ(r, pos, f)
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
static void cryptodev_session_del(struct rte_cryptodev_sym_session *sess)
u32 size
Definition: vhost_user.h:106
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:233
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:301
#define PREDICT_FALSE(x)
Definition: clib.h:121
#define foreach_vnet_aead_crypto_conversion
cryptodev_main_t cryptodev_main
#define CRYPTODEV_NB_CRYPTO_OPS
vnet_crypto_alg_t alg
Definition: crypto.h:182
cryptodev_numa_data_t * per_numa_data
Definition: cryptodev.c:140
u8 len
Definition: ip_types.api:103
vnet_crypto_async_alg_t async_alg
Definition: crypto.h:188
static_always_inline void cryptodev_sess_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx, u32 aad_len)
u32 buffer_indices[VNET_CRYPTO_FRAME_SIZE]
Definition: crypto.h:332
#define UNFORMAT_END_OF_INPUT
Definition: format.h:144
cryptodev_engine_thread_t * per_thread_data
Definition: cryptodev.c:142
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:380
static int cryptodev_cmp(void *v1, void *v2)
#define clib_warning(format, args...)
Definition: error.h:59
#define VNET_CRYPTO_FRAME_STATE_SUCCESS
Definition: crypto.h:326
static uword clib_bitmap_get(uword *ai, uword i)
Gets the ith bit value from a bitmap.
Definition: bitmap.h:197
#define VNET_CRYPTO_FRAME_STATE_ELT_ERROR
Definition: crypto.h:327
string name[64]
Definition: ip.api:44
struct rte_ring * cached_frame
#define clib_bitmap_vec_validate(v, i)
Definition: bitmap.h:112
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:158
vnet_crypto_op_status_t
Definition: crypto.h:115
#define foreach_cryptodev_link_async_alg
crypto (alg, cryptodev_alg), hash (alg, digest-size)
signed int i32
Definition: types.h:77
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:696
static clib_error_t * cryptodev_set_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
static_always_inline vnet_crypto_async_frame_t * cryptodev_frame_dequeue(vlib_main_t *vm, u32 *nb_elts_processed, u32 *enqueue_thread_idx)
static u32 cryptodev_count_queue(u32 numa)
struct rte_mempool * sess_priv_pool
Definition: cryptodev.c:125
static_always_inline int cryptodev_enqueue_gcm_aad_12_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
static void dpdk_disable_cryptodev_engine(vlib_main_t *vm)
u8 flags
share same VNET_CRYPTO_OP_FLAG_* values
Definition: crypto.h:317
u32 vnet_crypto_key_index_t
Definition: crypto.h:346
vlib_buffer_t * b[VNET_CRYPTO_FRAME_SIZE]
static uword pointer_to_uword(const void *p)
Definition: types.h:131
#define CRYPTODEV_MAX_N_SGL
maximum number of segments
#define clib_max(x, y)
Definition: clib.h:321
static u8 * format_cryptodev_inst(u8 *s, va_list *args)
typedef key
Definition: ipsec_types.api:86
static uword clib_bitmap_count_set_bits(uword *ai)
Return the number of set bits in a bitmap.
Definition: bitmap.h:468
#define CRYPTODEV_MAX_INFLIGHT
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
static_always_inline vnet_crypto_key_t * vnet_crypto_get_key(vnet_crypto_key_index_t index)
Definition: crypto.h:516
static int cryptodev_probe(vlib_main_t *vm, u32 n_workers)
static int cryptodev_check_supported_vnet_alg(vnet_crypto_key_t *key)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
void cryptodev_key_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
static_always_inline int cryptodev_enqueue_linked_alg_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1581
static int prepare_aead_xform(struct rte_crypto_sym_xform *xform, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key, u32 aad_len)
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
#define vec_sort_with_function(vec, f)
Sort a vector using the supplied element comparison function.
Definition: vec.h:1055
u32 index
Definition: flow_types.api:221
#define CRYPTODEV_AAD_MASK
static int cryptodev_configure(vlib_main_t *vm, u32 cryptodev_id)
u8 * format_unformat_error(u8 *s, va_list *va)
Definition: unformat.c:91
static clib_error_t * cryptodev_show_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
static u32 vlib_num_workers()
Definition: threads.h:377
static_always_inline int cryptodev_enqueue_gcm_aad_8_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
static void cryptodev_post_dequeue(void *frame, u32 index, u8 is_op_success)
uword clib_bitmap_t
Definition: bitmap.h:50
#define vec_foreach(var, vec)
Vector iterator.
cryptodev_resource_assign_op_t
struct rte_cryptodev_sym_session * keys[CRYPTODEV_N_OP_TYPES]
Definition: cryptodev.c:111
static int cryptodev_get_session_sz(vlib_main_t *vm, u32 n_workers)
cryptodev_key_t * keys
Definition: cryptodev.c:141
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
clib_error_t * dpdk_cryptodev_init(vlib_main_t *vm)
static uword clib_bitmap_first_clear(uword *ai)
Return the lowest numbered clear bit in a bitmap.
Definition: bitmap.h:451
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 vnet_crypto_register_engine(vlib_main_t *vm, char *name, int prio, char *desc)
Definition: crypto.c:112
struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL]
vl_api_ikev2_keys_t keys
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static_always_inline int cryptodev_enqueue_gcm_aad_12_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
vnet_crypto_op_status_t status
Definition: crypto.h:303
static openssl_per_thread_data_t * per_thread_data
Definition: main.c:37
vnet_crypto_async_frame_elt_t elts[VNET_CRYPTO_FRAME_SIZE]
Definition: crypto.h:331
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:170
signed short i16
Definition: types.h:46