FD.io VPP  v21.01.1
Vector Packet Processing
cryptodev.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/vnet.h>
22 #include <vpp/app/version.h>
23 
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
34 
35 #if CLIB_DEBUG > 0
36 #define always_inline static inline
37 #else
38 #define always_inline static inline __attribute__ ((__always_inline__))
39 #endif
40 
41 #define CRYPTODEV_NB_CRYPTO_OPS 1024
42 #define CRYPTODEV_NB_SESSION 10240
43 #define CRYPTODEV_DEF_DRIVE crypto_aesni_mb
44 
45 #define CRYPTODEV_IV_OFFSET (offsetof (cryptodev_op_t, iv))
46 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad))
47 
48 /* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN */
49 #define foreach_vnet_aead_crypto_conversion \
50  _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 8) \
51  _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 12) \
52  _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 8) \
53  _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 12) \
54  _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 8) \
55  _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 12)
56 
57 /**
58  * crypto (alg, cryptodev_alg), hash (alg, digest-size)
59  **/
60 #define foreach_cryptodev_link_async_alg \
61  _ (AES_128_CBC, AES_CBC, SHA1, 12) \
62  _ (AES_192_CBC, AES_CBC, SHA1, 12) \
63  _ (AES_256_CBC, AES_CBC, SHA1, 12) \
64  _ (AES_128_CBC, AES_CBC, SHA224, 14) \
65  _ (AES_192_CBC, AES_CBC, SHA224, 14) \
66  _ (AES_256_CBC, AES_CBC, SHA224, 14) \
67  _ (AES_128_CBC, AES_CBC, SHA256, 16) \
68  _ (AES_192_CBC, AES_CBC, SHA256, 16) \
69  _ (AES_256_CBC, AES_CBC, SHA256, 16) \
70  _ (AES_128_CBC, AES_CBC, SHA384, 24) \
71  _ (AES_192_CBC, AES_CBC, SHA384, 24) \
72  _ (AES_256_CBC, AES_CBC, SHA384, 24) \
73  _ (AES_128_CBC, AES_CBC, SHA512, 32) \
74  _ (AES_192_CBC, AES_CBC, SHA512, 32) \
75  _ (AES_256_CBC, AES_CBC, SHA512, 32)
76 
77 #define foreach_vnet_crypto_status_conversion \
78  _(SUCCESS, COMPLETED) \
79  _(NOT_PROCESSED, WORK_IN_PROGRESS) \
80  _(AUTH_FAILED, FAIL_BAD_HMAC) \
81  _(INVALID_SESSION, FAIL_ENGINE_ERR) \
82  _(INVALID_ARGS, FAIL_ENGINE_ERR) \
83  _(ERROR, FAIL_ENGINE_ERR)
84 
86 #define _(a, b) VNET_CRYPTO_OP_STATUS_##b,
88 #undef _
89 };
90 
91 typedef struct
92 {
93  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
94  struct rte_crypto_op op;
95  struct rte_crypto_sym_op sop;
96  u8 iv[16];
97  u8 aad[16];
101 
102 typedef enum
103 {
108 
109 typedef struct
110 {
111  struct rte_cryptodev_sym_session *keys[CRYPTODEV_N_OP_TYPES];
113 
114 typedef struct
115 {
118  char *desc;
120 
121 typedef struct
122 {
123  struct rte_mempool *cop_pool;
124  struct rte_mempool *sess_pool;
125  struct rte_mempool *sess_priv_pool;
127 
128 typedef struct
129 {
130  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
135  struct rte_ring *ring;
137 
138 typedef struct
139 {
143  enum rte_iova_mode iova_mode;
148 
150 
151 static int
152 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
153  cryptodev_op_type_t op_type,
154  const vnet_crypto_key_t * key, u32 aad_len)
155 {
156  struct rte_crypto_aead_xform *aead_xform = &xform->aead;
157  memset (xform, 0, sizeof (*xform));
158  xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
159  xform->next = 0;
160 
161  if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
162  key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
163  key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
164  return -1;
165 
166  aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
167  aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
168  RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
169  aead_xform->aad_length = aad_len;
170  aead_xform->digest_length = 16;
171  aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
172  aead_xform->iv.length = 12;
173  aead_xform->key.data = key->data;
174  aead_xform->key.length = vec_len (key->data);
175 
176  return 0;
177 }
178 
179 static int
180 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
181  cryptodev_op_type_t op_type,
182  const vnet_crypto_key_t * key)
183 {
184  struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
185  vnet_crypto_key_t *key_cipher, *key_auth;
186  enum rte_crypto_cipher_algorithm cipher_algo = ~0;
187  enum rte_crypto_auth_algorithm auth_algo = ~0;
188  u32 digest_len = ~0;
189 
190  key_cipher = vnet_crypto_get_key (key->index_crypto);
191  key_auth = vnet_crypto_get_key (key->index_integ);
192  if (!key_cipher || !key_auth)
193  return -1;
194 
195  if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
196  {
197  xform_cipher = xforms;
198  xform_auth = xforms + 1;
199  xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
200  xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
201  }
202  else
203  {
204  xform_cipher = xforms + 1;
205  xform_auth = xforms;
206  xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
207  xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
208  }
209 
210  xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
211  xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
212  xforms->next = xforms + 1;
213 
214  switch (key->async_alg)
215  {
216 #define _(a, b, c, d) \
217  case VNET_CRYPTO_ALG_##a##_##c##_TAG##d:\
218  cipher_algo = RTE_CRYPTO_CIPHER_##b; \
219  auth_algo = RTE_CRYPTO_AUTH_##c##_HMAC; \
220  digest_len = d; \
221  break;
222 
224 #undef _
225  default:
226  return -1;
227  }
228 
229  xform_cipher->cipher.algo = cipher_algo;
230  xform_cipher->cipher.key.data = key_cipher->data;
231  xform_cipher->cipher.key.length = vec_len (key_cipher->data);
232  xform_cipher->cipher.iv.length = 16;
233  xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
234 
235  xform_auth->auth.algo = auth_algo;
236  xform_auth->auth.digest_length = digest_len;
237  xform_auth->auth.key.data = key_auth->data;
238  xform_auth->auth.key.length = vec_len (key_auth->data);
239 
240  return 0;
241 }
242 
243 static int
245  struct rte_mempool *sess_priv_pool,
246  cryptodev_key_t * session_pair, u32 aad_len)
247 {
248  struct rte_crypto_sym_xform xforms_enc[2] = { {0} };
249  struct rte_crypto_sym_xform xforms_dec[2] = { {0} };
251  cryptodev_inst_t *dev_inst;
252  struct rte_cryptodev *cdev;
253  int ret;
254  uint8_t dev_id = 0;
255 
256  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
257  ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
258  else
259  ret = prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key,
260  aad_len);
261  if (ret)
262  return 0;
263 
264  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
266  else
267  prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
268 
269  vec_foreach (dev_inst, cmt->cryptodev_inst)
270  {
271  dev_id = dev_inst->dev_id;
272  cdev = rte_cryptodev_pmd_get_dev (dev_id);
273 
274  /* if the session is already configured for the driver type, avoid
275  configuring it again to increase the session data's refcnt */
276  if (session_pair->keys[0]->sess_data[cdev->driver_id].data &&
277  session_pair->keys[1]->sess_data[cdev->driver_id].data)
278  continue;
279 
280  ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[0],
281  xforms_enc, sess_priv_pool);
282  ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[1],
283  xforms_dec, sess_priv_pool);
284  if (ret < 0)
285  return ret;
286  }
287  session_pair->keys[0]->opaque_data = aad_len;
288  session_pair->keys[1]->opaque_data = aad_len;
289 
290  return 0;
291 }
292 
293 static void
294 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
295 {
296  u32 n_devs, i;
297 
298  if (sess == NULL)
299  return;
300 
301  n_devs = rte_cryptodev_count ();
302 
303  for (i = 0; i < n_devs; i++)
304  rte_cryptodev_sym_session_clear (i, sess);
305 
306  rte_cryptodev_sym_session_free (sess);
307 }
308 
309 static int
311 {
312  vnet_crypto_alg_t alg;
313  if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
314  return 0;
315 
316  alg = key->alg;
317 
318 #define _(a, b, c, d, e, f) \
319  if (alg == VNET_CRYPTO_ALG_##a) \
320  return 0;
321 
323 #undef _
324  return -1;
325 }
326 
329  vnet_crypto_key_index_t idx, u32 aad_len)
330 {
332  cryptodev_numa_data_t *numa_data;
334  struct rte_mempool *sess_pool, *sess_priv_pool;
335  cryptodev_key_t *ckey = 0;
336  int ret = 0;
337 
338  if (kop == VNET_CRYPTO_KEY_OP_DEL)
339  {
340  if (idx >= vec_len (cmt->keys))
341  return;
342 
343  ckey = pool_elt_at_index (cmt->keys, idx);
344  cryptodev_session_del (ckey->keys[0]);
345  cryptodev_session_del (ckey->keys[1]);
346  ckey->keys[0] = 0;
347  ckey->keys[1] = 0;
348  pool_put (cmt->keys, ckey);
349  return;
350  }
351  else if (kop == VNET_CRYPTO_KEY_OP_MODIFY)
352  {
353  if (idx >= vec_len (cmt->keys))
354  return;
355 
356  ckey = pool_elt_at_index (cmt->keys, idx);
357 
358  cryptodev_session_del (ckey->keys[0]);
359  cryptodev_session_del (ckey->keys[1]);
360  ckey->keys[0] = 0;
361  ckey->keys[1] = 0;
362  }
363  else /* create key */
364  pool_get_zero (cmt->keys, ckey);
365 
366  /* do not create session for unsupported alg */
368  return;
369 
370  numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
371  sess_pool = numa_data->sess_pool;
372  sess_priv_pool = numa_data->sess_priv_pool;
373 
374  ckey->keys[0] = rte_cryptodev_sym_session_create (sess_pool);
375  if (!ckey->keys[0])
376  {
377  ret = -1;
378  goto clear_key;
379  }
380 
381  ckey->keys[1] = rte_cryptodev_sym_session_create (sess_pool);
382  if (!ckey->keys[1])
383  {
384  ret = -1;
385  goto clear_key;
386  }
387 
388  ret = cryptodev_session_create (key, sess_priv_pool, ckey, aad_len);
389 
390 clear_key:
391  if (ret != 0)
392  {
393  cryptodev_session_del (ckey->keys[0]);
394  cryptodev_session_del (ckey->keys[1]);
395  memset (ckey, 0, sizeof (*ckey));
396  pool_put (cmt->keys, ckey);
397  }
398 }
399 
400 /*static*/ void
403 {
404  cryptodev_sess_handler (vm, kop, idx, 8);
405 }
406 
410 {
411  u32 n_elts = f->n_elts, i;
412 
413  for (i = 0; i < n_elts; i++)
414  f->elts[i].status = s;
415 }
416 
417 static_always_inline rte_iova_t
418 cryptodev_get_iova (clib_pmalloc_main_t * pm, enum rte_iova_mode mode,
419  void *data)
420 {
421  u64 index;
422  if (mode == RTE_IOVA_VA)
423  return (rte_iova_t) pointer_to_uword (data);
424 
425  index = clib_pmalloc_get_page_index (pm, data);
426  return pointer_to_uword (data) - pm->lookup_table[index];
427 }
428 
431  vlib_buffer_t * b)
432 {
433  struct rte_mbuf *first_mb = mb, *last_mb = mb; /**< last mbuf */
434  /* when input node is not dpdk, mbuf data len is not initialized, for
435  * single buffer it is not a problem since the data length is written
436  * into cryptodev operation. For chained buffer a reference data length
437  * has to be computed through vlib_buffer.
438  *
439  * even when input node is dpdk, it is possible chained vlib_buffers
440  * are updated (either added or removed a buffer) but not not mbuf fields.
441  * we have to re-link every mbuf in the chain.
442  */
443  u16 data_len = b->current_length + (b->data + b->current_data -
444  rte_pktmbuf_mtod (mb, u8 *));
445 
446  first_mb->nb_segs = 1;
447  first_mb->pkt_len = first_mb->data_len = data_len;
448 
449  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
450  {
451  b = vlib_get_buffer (vm, b->next_buffer);
452  mb = rte_mbuf_from_vlib_buffer (b);
453  if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
454  rte_pktmbuf_reset (mb);
455  last_mb->next = mb;
456  last_mb = mb;
457  mb->data_len = b->current_length;
458  mb->pkt_len = b->current_length;
459  mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
460  first_mb->nb_segs++;
461  if (PREDICT_FALSE (b->ref_count > 1))
462  mb->pool =
464  }
465 }
466 
470  cryptodev_op_type_t op_type)
471 {
474  cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
477  cryptodev_op_t **cop;
478  u32 *bi;
479  u32 n_enqueue, n_elts;
481  u32 last_key_index;
482 
483  if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
484  return -1;
485  n_elts = frame->n_elts;
486 
487  if (PREDICT_FALSE (CRYPTODEV_NB_CRYPTO_OPS - cet->inflight < n_elts))
488  {
490  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
491  return -1;
492  }
493 
494  if (PREDICT_FALSE (rte_mempool_get_bulk (numa->cop_pool,
495  (void **) cet->cops, n_elts) < 0))
496  {
498  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
499  return -1;
500  }
501 
502  cop = cet->cops;
503  fe = frame->elts;
504  bi = frame->buffer_indices;
505  cop[0]->frame = frame;
506  cop[0]->n_elts = n_elts;
507 
508  key = pool_elt_at_index (cmt->keys, fe->key_index);
509  last_key_index = fe->key_index;
510 
511  while (n_elts)
512  {
513  vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
514  struct rte_crypto_sym_op *sop = &cop[0]->sop;
515  i16 crypto_offset = fe->crypto_start_offset;
516  i16 integ_offset = fe->integ_start_offset;
517  u32 offset_diff = crypto_offset - integ_offset;
518 
519  if (n_elts > 2)
520  {
521  CLIB_PREFETCH (cop[1], CLIB_CACHE_LINE_BYTES * 3, STORE);
522  CLIB_PREFETCH (cop[2], CLIB_CACHE_LINE_BYTES * 3, STORE);
523  CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
524  CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
525  }
526  if (last_key_index != fe->key_index)
527  {
528  key = pool_elt_at_index (cmt->keys, fe->key_index);
529  last_key_index = fe->key_index;
530  }
531 
532  sop->m_src = rte_mbuf_from_vlib_buffer (b);
533  sop->m_src->data_off = VLIB_BUFFER_PRE_DATA_SIZE;
534  sop->m_dst = 0;
535  /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
536  * so we have to manually adjust mbuf data_off here so cryptodev can
537  * correctly compute the data pointer. The prepend here will be later
538  * rewritten by tx. */
539  if (PREDICT_TRUE (fe->integ_start_offset < 0))
540  {
541  sop->m_src->data_off += fe->integ_start_offset;
542  integ_offset = 0;
543  crypto_offset = offset_diff;
544  }
545  sop->session = key->keys[op_type];
546  sop->cipher.data.offset = crypto_offset;
547  sop->cipher.data.length = fe->crypto_total_length;
548  sop->auth.data.offset = integ_offset;
549  sop->auth.data.length = fe->crypto_total_length + fe->integ_length_adj;
550  sop->auth.digest.data = fe->digest;
551  sop->auth.digest.phys_addr = cryptodev_get_iova (pm, cmt->iova_mode,
552  fe->digest);
554  cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
555  else
556  /* for input nodes that are not dpdk-input, it is possible the mbuf
557  * was updated before as one of the chained mbufs. Setting nb_segs
558  * to 1 here to prevent the cryptodev PMD to access potentially
559  * invalid m_src->next pointers.
560  */
561  sop->m_src->nb_segs = 1;
562  clib_memcpy_fast (cop[0]->iv, fe->iv, 16);
563  cop++;
564  bi++;
565  fe++;
566  n_elts--;
567  }
568 
569  n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id,
570  cet->cryptodev_q,
571  (struct rte_crypto_op **)
572  cet->cops, frame->n_elts);
573  ASSERT (n_enqueue == frame->n_elts);
574  cet->inflight += n_enqueue;
575 
576  return 0;
577 }
578 
582  cryptodev_op_type_t op_type, u8 aad_len)
583 {
586  cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
589  cryptodev_op_t **cop;
590  u32 *bi;
591  u32 n_enqueue = 0, n_elts;
593  u32 last_key_index;
594  u8 sess_aad_len;
595 
596  if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
597  return -1;
598  n_elts = frame->n_elts;
599 
600  if (PREDICT_FALSE (CRYPTODEV_NB_CRYPTO_OPS - cet->inflight < n_elts))
601  {
603  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
604  return -1;
605  }
606 
607  if (PREDICT_FALSE (rte_mempool_get_bulk (numa->cop_pool,
608  (void **) cet->cops, n_elts) < 0))
609  {
611  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
612  return -1;
613  }
614 
615  cop = cet->cops;
616  fe = frame->elts;
617  bi = frame->buffer_indices;
618  cop[0]->frame = frame;
619  cop[0]->n_elts = n_elts;
620 
621  key = pool_elt_at_index (cmt->keys, fe->key_index);
622  last_key_index = fe->key_index;
623  sess_aad_len = (u8) key->keys[op_type]->opaque_data;
624  if (PREDICT_FALSE (sess_aad_len != aad_len))
626  fe->key_index, aad_len);
627 
628  while (n_elts)
629  {
630  vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
631  struct rte_crypto_sym_op *sop = &cop[0]->sop;
632  u16 crypto_offset = fe->crypto_start_offset;
633 
634  if (n_elts > 2)
635  {
636  CLIB_PREFETCH (cop[1], CLIB_CACHE_LINE_BYTES * 3, STORE);
637  CLIB_PREFETCH (cop[2], CLIB_CACHE_LINE_BYTES * 3, STORE);
638  CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
639  CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
640  }
641  if (last_key_index != fe->key_index)
642  {
643  key = pool_elt_at_index (cmt->keys, fe->key_index);
644  sess_aad_len = (u8) key->keys[op_type]->opaque_data;
645  if (PREDICT_FALSE (sess_aad_len != aad_len))
646  {
648  fe->key_index, aad_len);
649  }
650  last_key_index = fe->key_index;
651  }
652 
653  sop->m_src = rte_mbuf_from_vlib_buffer (b);
654  sop->m_dst = 0;
655  /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
656  * so we have to manually adjust mbuf data_off here so cryptodev can
657  * correctly compute the data pointer. The prepend here will be later
658  * rewritten by tx. */
659  if (PREDICT_FALSE (fe->crypto_start_offset < 0))
660  {
661  rte_pktmbuf_prepend (sop->m_src, -fe->crypto_start_offset);
662  crypto_offset = 0;
663  }
664 
665  sop->session = key->keys[op_type];
666  sop->aead.aad.data = cop[0]->aad;
667  sop->aead.aad.phys_addr = cop[0]->op.phys_addr + CRYPTODEV_AAD_OFFSET;
668  sop->aead.data.length = fe->crypto_total_length;
669  sop->aead.data.offset = crypto_offset;
670  sop->aead.digest.data = fe->tag;
671  sop->aead.digest.phys_addr = cryptodev_get_iova (pm, cmt->iova_mode,
672  fe->tag);
674  cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
675  else
676  /* for input nodes that are not dpdk-input, it is possible the mbuf
677  * was updated before as one of the chained mbufs. Setting nb_segs
678  * to 1 here to prevent the cryptodev PMD to access potentially
679  * invalid m_src->next pointers.
680  */
681  sop->m_src->nb_segs = 1;
682  clib_memcpy_fast (cop[0]->iv, fe->iv, 12);
683  clib_memcpy_fast (cop[0]->aad, fe->aad, aad_len);
684  cop++;
685  bi++;
686  fe++;
687  n_elts--;
688  }
689 
690  n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id,
691  cet->cryptodev_q,
692  (struct rte_crypto_op **)
693  cet->cops, frame->n_elts);
694  ASSERT (n_enqueue == frame->n_elts);
695  cet->inflight += n_enqueue;
696 
697  return 0;
698 }
699 
701 cryptodev_get_ring_head (struct rte_ring * ring)
702 {
703  cryptodev_op_t **r = (void *) &ring[1];
704  return r[ring->cons.head & ring->mask];
705 }
706 
708 cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
709  u32 * enqueue_thread_idx)
710 {
712  cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
714  cryptodev_op_t *cop0, **cop = cet->cops;
717  u32 n_elts, n_completed_ops = rte_ring_count (cet->ring);
718  u32 ss0 = 0, ss1 = 0, ss2 = 0, ss3 = 0; /* sum of status */
719 
720  if (cet->inflight)
721  {
722  n_elts = clib_min (CRYPTODEV_NB_CRYPTO_OPS - n_completed_ops,
724  n_elts = rte_cryptodev_dequeue_burst
725  (cet->cryptodev_id, cet->cryptodev_q,
726  (struct rte_crypto_op **) cet->cops, n_elts);
727  cet->inflight -= n_elts;
728  n_completed_ops += n_elts;
729 
730  rte_ring_sp_enqueue_burst (cet->ring, (void *) cet->cops, n_elts, NULL);
731  }
732 
733  if (PREDICT_FALSE (n_completed_ops == 0))
734  return 0;
735 
736  cop0 = cryptodev_get_ring_head (cet->ring);
737  /* not a single frame is finished */
738  if (PREDICT_FALSE (cop0->n_elts > rte_ring_count (cet->ring)))
739  return 0;
740 
741  frame = cop0->frame;
742  n_elts = cop0->n_elts;
743  n_elts = rte_ring_sc_dequeue_bulk (cet->ring, (void **) cet->cops,
744  n_elts, 0);
745  fe = frame->elts;
746 
747  while (n_elts > 4)
748  {
749  ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
750  ss1 |= fe[1].status = cryptodev_status_conversion[cop[1]->op.status];
751  ss2 |= fe[2].status = cryptodev_status_conversion[cop[2]->op.status];
752  ss3 |= fe[3].status = cryptodev_status_conversion[cop[3]->op.status];
753 
754  cop += 4;
755  fe += 4;
756  n_elts -= 4;
757  }
758 
759  while (n_elts)
760  {
761  ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
762  fe++;
763  cop++;
764  n_elts--;
765  }
766 
767  frame->state = (ss0 | ss1 | ss2 | ss3) == VNET_CRYPTO_OP_STATUS_COMPLETED ?
769 
770  rte_mempool_put_bulk (numa->cop_pool, (void **) cet->cops, frame->n_elts);
771  *nb_elts_processed = frame->n_elts;
772  *enqueue_thread_idx = frame->enqueue_thread_index;
773  return frame;
774 }
775 
776 /* *INDENT-OFF* */
780 {
781  return cryptodev_frame_gcm_enqueue (vm, frame,
783 }
787 {
788  return cryptodev_frame_gcm_enqueue (vm, frame,
790 }
791 
795 {
796  return cryptodev_frame_gcm_enqueue (vm, frame,
798 }
802 {
803  return cryptodev_frame_gcm_enqueue (vm, frame,
805 }
806 
810 {
811  return cryptodev_frame_linked_algs_enqueue (vm, frame,
813 }
814 
818 {
819  return cryptodev_frame_linked_algs_enqueue (vm, frame,
821 }
822 
823 typedef enum
824 {
828 
829 /**
830  * assign a cryptodev resource to a worker.
831  * @param cet: the worker thread data
832  * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
833  * @param op: the assignment method.
834  * @return: 0 if successfully, negative number otherwise.
835  **/
838  u32 cryptodev_inst_index,
839  cryptodev_resource_assign_op_t op)
840 {
842  cryptodev_inst_t *cinst = 0;
843  uword idx;
844 
845  /* assign resource is only allowed when no inflight op is in the queue */
846  if (cet->inflight)
847  return -EBUSY;
848 
849  switch (op)
850  {
853  vec_len (cmt->cryptodev_inst))
854  return -1;
855 
856  clib_spinlock_lock (&cmt->tlock);
858  clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
859  cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
860  cet->cryptodev_id = cinst->dev_id;
861  cet->cryptodev_q = cinst->q_id;
862  clib_spinlock_unlock (&cmt->tlock);
863  break;
865  /* assigning a used cryptodev resource is not allowed */
866  if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
867  == 1)
868  return -EBUSY;
870  {
871  cinst = cmt->cryptodev_inst + idx;
872  if (cinst->dev_id == cet->cryptodev_id &&
873  cinst->q_id == cet->cryptodev_q)
874  break;
875  }
876  /* invalid existing worker resource assignment */
877  if (idx == vec_len (cmt->cryptodev_inst))
878  return -EINVAL;
879  clib_spinlock_lock (&cmt->tlock);
882  cryptodev_inst_index, 1);
883  cinst = cmt->cryptodev_inst + cryptodev_inst_index;
884  cet->cryptodev_id = cinst->dev_id;
885  cet->cryptodev_q = cinst->q_id;
886  clib_spinlock_unlock (&cmt->tlock);
887  break;
888  default:
889  return -EINVAL;
890  }
891  return 0;
892 }
893 
894 static u8 *
895 format_cryptodev_inst (u8 * s, va_list * args)
896 {
898  u32 inst = va_arg (*args, u32);
899  cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
900  u32 thread_index = 0;
901  struct rte_cryptodev_info info;
902 
903  rte_cryptodev_info_get (cit->dev_id, &info);
904  s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
905 
906  vec_foreach_index (thread_index, cmt->per_thread_data)
907  {
908  cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
909  if (vlib_num_workers () > 0 && thread_index == 0)
910  continue;
911 
912  if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
913  {
914  s = format (s, "%u (%v)\n", thread_index,
915  vlib_worker_threads[thread_index].name);
916  break;
917  }
918  }
919 
920  if (thread_index == vec_len (cmt->per_thread_data))
921  s = format (s, "%s\n", "free");
922 
923  return s;
924 }
925 
926 static clib_error_t *
928  vlib_cli_command_t * cmd)
929 {
931  u32 inst;
932 
933  vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
934  "Assigned-to");
935  if (vec_len (cmt->cryptodev_inst) == 0)
936  {
937  vlib_cli_output (vm, "(nil)\n");
938  return 0;
939  }
940 
941  vec_foreach_index (inst, cmt->cryptodev_inst)
942  vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
943 
944  return 0;
945 }
946 
947 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
948  .path = "show cryptodev assignment",
949  .short_help = "show cryptodev assignment",
950  .function = cryptodev_show_assignment_fn,
951 };
952 
953 static clib_error_t *
955  vlib_cli_command_t * cmd)
956 {
959  unformat_input_t _line_input, *line_input = &_line_input;
960  u32 thread_index, inst_index;
961  u32 thread_present = 0, inst_present = 0;
962  clib_error_t *error = 0;
963  int ret;
964 
965  /* Get a line of input. */
966  if (!unformat_user (input, unformat_line_input, line_input))
967  return 0;
968 
969  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
970  {
971  if (unformat (line_input, "thread %u", &thread_index))
972  thread_present = 1;
973  else if (unformat (line_input, "resource %u", &inst_index))
974  inst_present = 1;
975  else
976  {
977  error = clib_error_return (0, "unknown input `%U'",
978  format_unformat_error, line_input);
979  return error;
980  }
981  }
982 
983  if (!thread_present || !inst_present)
984  {
985  error = clib_error_return (0, "mandatory argument(s) missing");
986  return error;
987  }
988 
989  if (thread_index == 0 && vlib_num_workers () > 0)
990  {
991  error =
992  clib_error_return (0, "assign crypto resource for master thread");
993  return error;
994  }
995 
996  if (thread_index > vec_len (cmt->per_thread_data) ||
997  inst_index > vec_len (cmt->cryptodev_inst))
998  {
999  error = clib_error_return (0, "wrong thread id or resource id");
1000  return error;
1001  }
1002 
1003  cet = cmt->per_thread_data + thread_index;
1004  ret = cryptodev_assign_resource (cet, inst_index,
1006  if (ret)
1007  {
1008  error = clib_error_return (0, "cryptodev_assign_resource returned %i",
1009  ret);
1010  return error;
1011  }
1012 
1013  return 0;
1014 }
1015 
1016 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
1017  .path = "set cryptodev assignment",
1018  .short_help = "set cryptodev assignment thread <thread_index> "
1019  "resource <inst_index>",
1020  .function = cryptodev_set_assignment_fn,
1021 };
1022 
1023 static int
1025 {
1026  const struct rte_cryptodev_symmetric_capability *cap;
1027  struct rte_cryptodev_sym_capability_idx cap_idx;
1028 
1029 #define _(a, b, c, d, e, f) \
1030  cap_idx.type = RTE_CRYPTO_SYM_XFORM_##b; \
1031  cap_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
1032  cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1033  if (!cap) \
1034  return -RTE_CRYPTO_##b##_##c; \
1035  else \
1036  { \
1037  if (cap->aead.digest_size.min > e || cap->aead.digest_size.max < e) \
1038  return -RTE_CRYPTO_##b##_##c; \
1039  if (cap->aead.aad_size.min > f || cap->aead.aad_size.max < f) \
1040  return -RTE_CRYPTO_##b##_##c; \
1041  if (cap->aead.iv_size.min > d || cap->aead.iv_size.max < d) \
1042  return -RTE_CRYPTO_##b##_##c; \
1043  }
1044 
1046 #undef _
1047 
1048 #define _(a, b, c, d) \
1049  cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
1050  cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
1051  cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1052  if (!cap) \
1053  return -RTE_CRYPTO_CIPHER_##b; \
1054  cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
1055  cap_idx.algo.auth = RTE_CRYPTO_AUTH_##c##_HMAC; \
1056  cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1057  if (!cap) \
1058  return -RTE_CRYPTO_AUTH_##c;
1059 
1061 #undef _
1062  return 0;
1063 }
1064 
1065 static u32
1067 {
1068  struct rte_cryptodev_info info;
1069  u32 n_cryptodev = rte_cryptodev_count ();
1070  u32 i, q_count = 0;
1071 
1072  for (i = 0; i < n_cryptodev; i++)
1073  {
1074  rte_cryptodev_info_get (i, &info);
1075  if (rte_cryptodev_socket_id (i) != numa)
1076  {
1077  clib_warning ("DPDK crypto resource %s is in different numa node "
1078  "as %u, ignored", info.device->name, numa);
1079  continue;
1080  }
1081  /* only device support symmetric crypto is used */
1082  if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
1083  continue;
1084  q_count += info.max_nb_queue_pairs;
1085  }
1086 
1087  return q_count;
1088 }
1089 
1090 static int
1091 cryptodev_configure (vlib_main_t *vm, uint32_t cryptodev_id)
1092 {
1093  struct rte_cryptodev_info info;
1094  struct rte_cryptodev *cdev;
1097  vm->numa_node);
1098  u32 i;
1099  int ret;
1100 
1101  rte_cryptodev_info_get (cryptodev_id, &info);
1102 
1103  /* do not configure the device that does not support symmetric crypto */
1104  if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
1105  return 0;
1106 
1107  ret = check_cryptodev_alg_support (cryptodev_id);
1108  if (ret != 0)
1109  return ret;
1110 
1111  cdev = rte_cryptodev_pmd_get_dev (cryptodev_id);
1112  /** If the device is already started, we reuse it, otherwise configure
1113  * both the device and queue pair.
1114  **/
1115  if (!cdev->data->dev_started)
1116  {
1117  struct rte_cryptodev_config cfg;
1118 
1119  cfg.socket_id = vm->numa_node;
1120  cfg.nb_queue_pairs = info.max_nb_queue_pairs;
1121 
1122  rte_cryptodev_configure (cryptodev_id, &cfg);
1123 
1124  for (i = 0; i < info.max_nb_queue_pairs; i++)
1125  {
1126  struct rte_cryptodev_qp_conf qp_cfg;
1127 
1128  int ret;
1129 
1130  qp_cfg.mp_session = numa_data->sess_pool;
1131  qp_cfg.mp_session_private = numa_data->sess_priv_pool;
1132  qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
1133 
1134  ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
1135  vm->numa_node);
1136  if (ret)
1137  break;
1138  }
1139  if (i != info.max_nb_queue_pairs)
1140  return -1;
1141  /* start the device */
1142  rte_cryptodev_start (i);
1143  }
1144 
1145  for (i = 0; i < cdev->data->nb_queue_pairs; i++)
1146  {
1147  cryptodev_inst_t *cdev_inst;
1148  vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
1149  cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
1150  cdev_inst->dev_id = cryptodev_id;
1151  cdev_inst->q_id = i;
1152 
1153  snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
1154  "%s_q%u", info.device->name, i);
1155  }
1156 
1157  return 0;
1158 }
1159 
1160 static int
1161 cryptodev_cmp (void *v1, void *v2)
1162 {
1163  cryptodev_inst_t *a1 = v1;
1164  cryptodev_inst_t *a2 = v2;
1165 
1166  if (a1->q_id > a2->q_id)
1167  return 1;
1168  if (a1->q_id < a2->q_id)
1169  return -1;
1170  return 0;
1171 }
1172 
1173 static int
1175 {
1177  u32 n_queues = cryptodev_count_queue (vm->numa_node);
1178  u32 i;
1179  int ret;
1180 
1181  /* If there is not enough queues, exit */
1182  if (n_queues < n_workers)
1183  return -1;
1184 
1185  for (i = 0; i < rte_cryptodev_count (); i++)
1186  {
1187  ret = cryptodev_configure (vm, i);
1188  if (ret)
1189  return ret;
1190  }
1191 
1193 
1194  return 0;
1195 }
1196 
1197 static int
1199 {
1200  u32 sess_data_sz = 0, i;
1201 
1202  if (rte_cryptodev_count () == 0)
1203  return -1;
1204 
1205  for (i = 0; i < rte_cryptodev_count (); i++)
1206  {
1207  u32 dev_sess_sz = rte_cryptodev_sym_get_private_session_size (i);
1208 
1209  sess_data_sz = dev_sess_sz > sess_data_sz ? dev_sess_sz : sess_data_sz;
1210  }
1211 
1212  return sess_data_sz;
1213 }
1214 
1215 static void
1217 {
1219  cryptodev_numa_data_t *numa_data;
1220 
1221  vec_validate (cmt->per_numa_data, vm->numa_node);
1222  numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
1223 
1224  if (numa_data->sess_pool)
1225  rte_mempool_free (numa_data->sess_pool);
1226  if (numa_data->sess_priv_pool)
1227  rte_mempool_free (numa_data->sess_priv_pool);
1228  if (numa_data->cop_pool)
1229  rte_mempool_free (numa_data->cop_pool);
1230 }
1231 
1232 static void
1233 crypto_op_init (struct rte_mempool *mempool,
1234  void *_arg __attribute__ ((unused)),
1235  void *_obj, unsigned i __attribute__ ((unused)))
1236 {
1237  struct rte_crypto_op *op = _obj;
1238 
1239  op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1240  op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1241  op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1242  op->phys_addr = rte_mempool_virt2iova (_obj);
1243  op->mempool = mempool;
1244 }
1245 
1246 
1247 clib_error_t *
1249 {
1253  cryptodev_numa_data_t *numa_data;
1254  struct rte_mempool *mp;
1255  u32 skip_master = vlib_num_workers () > 0;
1256  u32 n_workers = tm->n_vlib_mains - skip_master;
1257  u32 numa = vm->numa_node;
1258  i32 sess_sz;
1259  u64 n_cop_elts;
1260  u32 eidx;
1261  u32 i;
1262  u8 *name = 0;
1264  struct rte_crypto_op_pool_private *priv;
1265 
1266  cmt->iova_mode = rte_eal_iova_mode ();
1267 
1268  sess_sz = cryptodev_get_session_sz(vm, n_workers);
1269  if (sess_sz < 0)
1270  {
1271  error = clib_error_return (0, "Not enough cryptodevs");
1272  return error;
1273  }
1274 
1275  /* A total of 4 times n_worker threads * frame size as crypto ops */
1276  n_cop_elts = max_pow2 ((u64)n_workers * CRYPTODEV_NB_CRYPTO_OPS);
1277 
1278  vec_validate (cmt->per_numa_data, vm->numa_node);
1279  numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
1280 
1281  /* create session pool for the numa node */
1282  name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
1283  mp = rte_cryptodev_sym_session_pool_create ((char *) name,
1285  0, 0, 0, numa);
1286  if (!mp)
1287  {
1288  error = clib_error_return (0, "Not enough memory for mp %s", name);
1289  goto err_handling;
1290  }
1291  vec_free (name);
1292 
1293  numa_data->sess_pool = mp;
1294 
1295  /* create session private pool for the numa node */
1296  name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
1297  mp = rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, 0,
1298  0, NULL, NULL, NULL, NULL, numa, 0);
1299  if (!mp)
1300  {
1301  error = clib_error_return (0, "Not enough memory for mp %s", name);
1302  vec_free (name);
1303  goto err_handling;
1304  }
1305 
1306  vec_free (name);
1307 
1308  numa_data->sess_priv_pool = mp;
1309 
1310  /* create cryptodev op pool */
1311  name = format (0, "cryptodev_op_pool_%u%c", numa, 0);
1312 
1313  mp = rte_mempool_create ((char *) name, n_cop_elts,
1314  sizeof (cryptodev_op_t), VLIB_FRAME_SIZE * 2,
1315  sizeof (struct rte_crypto_op_pool_private), NULL,
1316  NULL, crypto_op_init, NULL, numa, 0);
1317  if (!mp)
1318  {
1319  error = clib_error_return (0, "Not enough memory for mp %s", name);
1320  vec_free (name);
1321  goto err_handling;
1322  }
1323 
1324  priv = rte_mempool_get_priv (mp);
1325  priv->priv_size = sizeof (struct rte_crypto_op_pool_private);
1326  priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1327  vec_free (name);
1328  numa_data->cop_pool = mp;
1329 
1330  /* probe all cryptodev devices and get queue info */
1331  if (cryptodev_probe (vm, n_workers) < 0)
1332  {
1333  error = clib_error_return (0, "Failed to configure cryptodev");
1334  goto err_handling;
1335  }
1336 
1338  clib_spinlock_init (&cmt->tlock);
1339 
1342  for (i = skip_master; i < tm->n_vlib_mains; i++)
1343  {
1344  ptd = cmt->per_thread_data + i;
1346  name = format (0, "frames_ring_%u%c", i, 0);
1347  ptd->ring = rte_ring_create((char *) name, CRYPTODEV_NB_CRYPTO_OPS,
1348  vm->numa_node, RING_F_SP_ENQ|RING_F_SC_DEQ);
1349  if (!ptd->ring)
1350  {
1351  error = clib_error_return (0, "Not enough memory for mp %s", name);
1352  vec_free (name);
1353  goto err_handling;
1354  }
1356  vec_free(name);
1357  }
1358 
1359  /* register handler */
1360  eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 79,
1361  "DPDK Cryptodev Engine");
1362 
1363 #define _(a, b, c, d, e, f) \
1364  vnet_crypto_register_async_handler \
1365  (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
1366  cryptodev_enqueue_gcm_aad_##f##_enc,\
1367  cryptodev_frame_dequeue); \
1368  vnet_crypto_register_async_handler \
1369  (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
1370  cryptodev_enqueue_gcm_aad_##f##_dec, \
1371  cryptodev_frame_dequeue);
1372 
1374 #undef _
1375 
1376 #define _(a, b, c, d) \
1377  vnet_crypto_register_async_handler \
1378  (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_ENC, \
1379  cryptodev_enqueue_linked_alg_enc, \
1380  cryptodev_frame_dequeue); \
1381  vnet_crypto_register_async_handler \
1382  (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_DEC, \
1383  cryptodev_enqueue_linked_alg_dec, \
1384  cryptodev_frame_dequeue);
1385 
1387 #undef _
1388 
1390 
1391  return 0;
1392 
1393 err_handling:
1395 
1396  return error;
1397 }
1398 /* *INDENT-On* */
1399 
1400 /*
1401  * fd.io coding-style-patch-verification: ON
1402  *
1403  * Local Variables:
1404  * eval: (c-set-style "gnu")
1405  * End:
1406  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:509
static_always_inline vnet_crypto_async_frame_t * cryptodev_frame_dequeue(vlib_main_t *vm, u32 *nb_elts_processed, u32 *enqueue_thread_idx)
Definition: cryptodev.c:708
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
struct rte_crypto_sym_op sop
Definition: cryptodev.c:95
static_always_inline int cryptodev_frame_linked_algs_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type)
Definition: cryptodev.c:468
#define vec_foreach_index(var, v)
Iterate over vector indices.
#define CRYPTODEV_NB_CRYPTO_OPS
Definition: cryptodev.c:41
enum rte_iova_mode iova_mode
Definition: cryptodev.c:143
static_always_inline int cryptodev_enqueue_linked_alg_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev.c:816
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
Definition: cache.h:60
#define clib_min(x, y)
Definition: clib.h:328
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:121
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:82
static_always_inline int cryptodev_enqueue_gcm_aad_8_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev.c:778
cryptodev_op_type_t
Definition: cryptodev.c:102
cryptodev_resource_assign_op_t
Definition: cryptodev.c:823
static int prepare_aead_xform(struct rte_crypto_sym_xform *xform, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key, u32 aad_len)
Definition: cryptodev.c:152
static clib_error_t * cryptodev_set_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: cryptodev.c:954
#define VNET_CRYPTO_KEY_TYPE_LINK
Definition: crypto.h:192
#define pool_get_zero(P, E)
Allocate an object E from a pool P and zero it.
Definition: pool.h:254
#define PREDICT_TRUE(x)
Definition: clib.h:122
vlib_physmem_main_t physmem_main
Definition: main.h:185
#define foreach_vnet_aead_crypto_conversion
Definition: cryptodev.c:49
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
unsigned long u64
Definition: types.h:89
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
Definition: crypto.h:239
void cryptodev_key_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx)
Definition: cryptodev.c:401
static u32 cryptodev_count_queue(u32 numa)
Definition: cryptodev.c:1066
#define EINVAL
Definition: string.h:93
#define VLIB_BUFFER_PRE_DATA_SIZE
Definition: buffer.h:51
u32 thread_index
Definition: main.h:250
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
u8 aad[16]
Definition: cryptodev.c:97
static u32 clib_pmalloc_get_page_index(clib_pmalloc_main_t *pm, void *va)
Definition: pmalloc.h:128
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:630
static uword * clib_bitmap_set(uword *ai, uword i, uword value)
Sets the ith bit of a bitmap to new_value Removes trailing zeros from the bitmap. ...
Definition: bitmap.h:167
uword unformat_user(unformat_input_t *input, unformat_function_t *func,...)
Definition: unformat.c:989
vlib_main_t * vm
Definition: in2out_ed.c:1580
u32 numa_node
Definition: main.h:252
#define CRYPTODEV_AAD_OFFSET
Definition: cryptodev.c:46
struct rte_mempool * sess_pool
Definition: cryptodev.c:124
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:520
unsigned char u8
Definition: types.h:56
cryptodev_inst_t * cryptodev_inst
Definition: cryptodev.c:144
u8 data[128]
Definition: ipsec_types.api:90
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:133
static_always_inline void cryptodev_sess_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx, u32 aad_len)
Definition: cryptodev.c:328
clib_pmalloc_main_t * pmalloc_main
Definition: physmem.h:64
static uword clib_bitmap_set_no_check(uword *a, uword i, uword new_value)
Sets the ith bit of a bitmap to new_value.
Definition: bitmap.h:141
void vnet_crypto_register_key_handler(vlib_main_t *vm, u32 engine_index, vnet_crypto_key_handler_t *key_handler)
Definition: crypto.c:307
static int prepare_linked_xform(struct rte_crypto_sym_xform *xforms, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key)
Definition: cryptodev.c:180
#define static_always_inline
Definition: clib.h:109
vnet_crypto_key_op_t
Definition: crypto.h:108
static const vnet_crypto_op_status_t cryptodev_status_conversion[]
Definition: cryptodev.c:85
static int cryptodev_configure(vlib_main_t *vm, uint32_t cryptodev_id)
Definition: cryptodev.c:1091
#define vec_new(T, N)
Create new vector of given type and length (unspecified alignment, no header).
Definition: vec.h:350
static_always_inline int cryptodev_frame_gcm_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type, u8 aad_len)
Definition: cryptodev.c:580
description fragment has unexpected format
Definition: map.api:433
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
const cJSON *const b
Definition: cJSON.h:255
unsigned int u32
Definition: types.h:88
#define foreach_cryptodev_link_async_alg
crypto (alg, cryptodev_alg), hash (alg, digest-size)
Definition: cryptodev.c:60
static_always_inline void cryptodev_mark_frame_err_status(vnet_crypto_async_frame_t *f, vnet_crypto_op_status_t s)
Definition: cryptodev.c:408
#define VLIB_FRAME_SIZE
Definition: node.h:378
static void crypto_op_init(struct rte_mempool *mempool, void *_arg, void *_obj, unsigned i)
Definition: cryptodev.c:1233
vnet_crypto_alg_t
Definition: crypto.h:124
#define VNET_CRYPTO_FRAME_SIZE
Definition: crypto.h:21
unformat_function_t unformat_line_input
Definition: format.h:282
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:65
Definition: cJSON.c:84
vlib_worker_thread_t * vlib_worker_threads
Definition: threads.c:34
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:546
static u8 iv[]
Definition: aes_cbc.c:24
clib_spinlock_t tlock
Definition: cryptodev.c:146
clib_bitmap_t * active_cdev_inst_mask
Definition: cryptodev.c:145
static int cryptodev_session_create(vnet_crypto_key_t *const key, struct rte_mempool *sess_priv_pool, cryptodev_key_t *session_pair, u32 aad_len)
Definition: cryptodev.c:244
static int cryptodev_get_session_sz(vlib_main_t *vm, uint32_t n_workers)
Definition: cryptodev.c:1198
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
#define rte_mbuf_from_vlib_buffer(x)
Definition: buffer.h:19
u8 data_len
Definition: ikev2_types.api:24
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:301
static_always_inline int cryptodev_assign_resource(cryptodev_engine_thread_t *cet, u32 cryptodev_inst_index, cryptodev_resource_assign_op_t op)
assign a cryptodev resource to a worker.
Definition: cryptodev.c:837
#define PREDICT_FALSE(x)
Definition: clib.h:121
vnet_crypto_alg_t alg
Definition: crypto.h:182
struct rte_crypto_op op
Definition: cryptodev.c:94
cryptodev_numa_data_t * per_numa_data
Definition: cryptodev.c:140
vl_api_tunnel_mode_t mode
Definition: gre.api:48
clib_error_t * dpdk_cryptodev_init(vlib_main_t *vm)
Definition: cryptodev.c:1248
vnet_crypto_async_alg_t async_alg
Definition: crypto.h:188
u32 buffer_indices[VNET_CRYPTO_FRAME_SIZE]
Definition: crypto.h:332
uword * lookup_table
Definition: pmalloc.h:93
#define UNFORMAT_END_OF_INPUT
Definition: format.h:144
cryptodev_engine_thread_t * per_thread_data
Definition: cryptodev.c:142
static_always_inline int cryptodev_enqueue_gcm_aad_8_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev.c:793
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
cryptodev_main_t cryptodev_main
Definition: cryptodev.c:149
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:380
cryptodev_op_t ** cops
Definition: cryptodev.c:134
static_always_inline cryptodev_op_t * cryptodev_get_ring_head(struct rte_ring *ring)
Definition: cryptodev.c:701
#define clib_warning(format, args...)
Definition: error.h:59
static_always_inline rte_iova_t cryptodev_get_iova(clib_pmalloc_main_t *pm, enum rte_iova_mode mode, void *data)
Definition: cryptodev.c:418
u8 data[]
Packet data.
Definition: buffer.h:181
#define VNET_CRYPTO_FRAME_STATE_SUCCESS
Definition: crypto.h:326
static uword max_pow2(uword x)
Definition: clib.h:244
static_always_inline int cryptodev_enqueue_gcm_aad_12_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev.c:785
static uword clib_bitmap_get(uword *ai, uword i)
Gets the ith bit value from a bitmap.
Definition: bitmap.h:197
#define VNET_CRYPTO_FRAME_STATE_ELT_ERROR
Definition: crypto.h:327
struct rte_ring * ring
Definition: cryptodev.c:135
string name[64]
Definition: ip.api:44
#define clib_bitmap_vec_validate(v, i)
Definition: bitmap.h:112
static int cryptodev_probe(vlib_main_t *vm, u32 n_workers)
Definition: cryptodev.c:1174
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:158
static void cryptodev_session_del(struct rte_cryptodev_sym_session *sess)
Definition: cryptodev.c:294
vnet_crypto_op_status_t
Definition: crypto.h:115
signed int i32
Definition: types.h:77
#define ASSERT(truth)
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:696
struct rte_mempool * cop_pool
Definition: cryptodev.c:123
static u8 * format_cryptodev_inst(u8 *s, va_list *args)
Definition: cryptodev.c:895
static int cryptodev_cmp(void *v1, void *v2)
Definition: cryptodev.c:1161
struct rte_mempool * sess_priv_pool
Definition: cryptodev.c:125
static clib_error_t * cryptodev_show_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
Definition: cryptodev.c:927
u8 flags
share same VNET_CRYPTO_OP_FLAG_* values
Definition: crypto.h:317
u32 vnet_crypto_key_index_t
Definition: crypto.h:346
static_always_inline int cryptodev_enqueue_gcm_aad_12_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev.c:800
static uword pointer_to_uword(const void *p)
Definition: types.h:131
static int check_cryptodev_alg_support(u32 dev_id)
Definition: cryptodev.c:1024
typedef key
Definition: ipsec_types.api:86
static uword clib_bitmap_count_set_bits(uword *ai)
Return the number of set bits in a bitmap.
Definition: bitmap.h:468
#define CRYPTODEV_IV_OFFSET
Definition: cryptodev.c:45
static_always_inline int cryptodev_enqueue_linked_alg_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev.c:808
#define foreach_vnet_crypto_status_conversion
Definition: cryptodev.c:77
static_always_inline vnet_crypto_key_t * vnet_crypto_get_key(vnet_crypto_key_index_t index)
Definition: crypto.h:516
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1581
struct rte_mempool ** dpdk_no_cache_mempool_by_buffer_pool_index
Definition: buffer.c:34
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
#define vec_sort_with_function(vec, f)
Sort a vector using the supplied element comparison function.
Definition: vec.h:1055
u32 index
Definition: flow_types.api:221
static int cryptodev_check_supported_vnet_alg(vnet_crypto_key_t *key)
Definition: cryptodev.c:310
static void dpdk_disable_cryptodev_engine(vlib_main_t *vm)
Definition: cryptodev.c:1216
static_always_inline void cryptodev_validate_mbuf_chain(vlib_main_t *vm, struct rte_mbuf *mb, vlib_buffer_t *b)
Definition: cryptodev.c:430
u8 * format_unformat_error(u8 *s, va_list *va)
Definition: unformat.c:91
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
static u32 vlib_num_workers()
Definition: threads.h:377
#define CRYPTODEV_NB_SESSION
Definition: cryptodev.c:42
uword clib_bitmap_t
Definition: bitmap.h:50
#define vec_foreach(var, vec)
Vector iterator.
struct rte_cryptodev_sym_session * keys[CRYPTODEV_N_OP_TYPES]
Definition: cryptodev.c:111
vnet_crypto_async_frame_t * frame
Definition: cryptodev.c:98
cryptodev_key_t * keys
Definition: cryptodev.c:141
static uword clib_bitmap_first_clear(uword *ai)
Return the lowest numbered clear bit in a bitmap.
Definition: bitmap.h:451
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 vnet_crypto_register_engine(vlib_main_t *vm, char *name, int prio, char *desc)
Definition: crypto.c:112
vl_api_ikev2_keys_t keys
volatile u8 ref_count
Reference count for this buffer.
Definition: buffer.h:130
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
vnet_crypto_op_status_t status
Definition: crypto.h:303
vnet_crypto_async_frame_elt_t elts[VNET_CRYPTO_FRAME_SIZE]
Definition: crypto.h:331
static uword unformat_check_input(unformat_input_t *i)
Definition: format.h:170
signed short i16
Definition: types.h:46