22 #include <vpp/app/version.h> 27 #include <rte_bus_vdev.h> 28 #include <rte_cryptodev.h> 29 #include <rte_crypto_sym.h> 30 #include <rte_crypto.h> 31 #include <rte_cryptodev_pmd.h> 32 #include <rte_config.h> 34 #define CRYPTODEV_NB_CRYPTO_OPS 1024 35 #define CRYPTODEV_NB_SESSION 10240 36 #define CRYPTODEV_DEF_DRIVE crypto_aesni_mb 38 #define CRYPTODEV_IV_OFFSET (offsetof (cryptodev_op_t, iv)) 39 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad)) 40 #define CRYPTODEV_DIGEST_OFFSET (offsetof (cryptodev_op_t, digest)) 43 #define foreach_vnet_aead_crypto_conversion \ 44 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 8) \ 45 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 12) \ 46 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 8) \ 47 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 12) \ 48 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 8) \ 49 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 12) 54 #define foreach_cryptodev_link_async_alg \ 55 _ (AES_128_CBC, AES_CBC, SHA1, 12) \ 56 _ (AES_192_CBC, AES_CBC, SHA1, 12) \ 57 _ (AES_256_CBC, AES_CBC, SHA1, 12) \ 58 _ (AES_128_CBC, AES_CBC, SHA224, 14) \ 59 _ (AES_192_CBC, AES_CBC, SHA224, 14) \ 60 _ (AES_256_CBC, AES_CBC, SHA224, 14) \ 61 _ (AES_128_CBC, AES_CBC, SHA256, 16) \ 62 _ (AES_192_CBC, AES_CBC, SHA256, 16) \ 63 _ (AES_256_CBC, AES_CBC, SHA256, 16) \ 64 _ (AES_128_CBC, AES_CBC, SHA384, 24) \ 65 _ (AES_192_CBC, AES_CBC, SHA384, 24) \ 66 _ (AES_256_CBC, AES_CBC, SHA384, 24) \ 67 _ (AES_128_CBC, AES_CBC, SHA512, 32) \ 68 _ (AES_192_CBC, AES_CBC, SHA512, 32) \ 69 _ (AES_256_CBC, AES_CBC, SHA512, 32) 71 #define foreach_vnet_crypto_status_conversion \ 72 _(SUCCESS, COMPLETED) \ 73 _(NOT_PROCESSED, WORK_IN_PROGRESS) \ 74 _(AUTH_FAILED, FAIL_BAD_HMAC) \ 75 _(INVALID_SESSION, FAIL_ENGINE_ERR) \ 76 _(INVALID_ARGS, FAIL_ENGINE_ERR) \ 77 _(ERROR, FAIL_ENGINE_ERR) 80 #define _(a, b) VNET_CRYPTO_OP_STATUS_##b, 88 struct rte_crypto_op op;
89 struct rte_crypto_sym_op sop;
146 cryptodev_op_type_t op_type,
149 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
150 memset (xform, 0,
sizeof (*xform));
151 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
154 if (key->
alg != VNET_CRYPTO_ALG_AES_128_GCM &&
155 key->
alg != VNET_CRYPTO_ALG_AES_192_GCM &&
156 key->
alg != VNET_CRYPTO_ALG_AES_256_GCM)
159 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
161 RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
162 aead_xform->aad_length = aad_len;
163 aead_xform->digest_length = 16;
165 aead_xform->iv.length = 12;
166 aead_xform->key.data = key->
data;
174 cryptodev_op_type_t op_type,
177 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
179 enum rte_crypto_cipher_algorithm cipher_algo = ~0;
180 enum rte_crypto_auth_algorithm auth_algo = ~0;
185 if (!key_cipher || !key_auth)
190 xform_cipher = xforms;
191 xform_auth = xforms + 1;
192 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
193 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
197 xform_cipher = xforms + 1;
199 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
200 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
203 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
204 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
205 xforms->next = xforms + 1;
209 #define _(a, b, c, d) \ 210 case VNET_CRYPTO_ALG_##a##_##c##_TAG##d:\ 211 cipher_algo = RTE_CRYPTO_CIPHER_##b; \ 212 auth_algo = RTE_CRYPTO_AUTH_##c##_HMAC; \ 222 xform_cipher->cipher.algo = cipher_algo;
223 xform_cipher->cipher.key.data = key_cipher->
data;
224 xform_cipher->cipher.key.length =
vec_len (key_cipher->
data);
225 xform_cipher->cipher.iv.length = 16;
228 xform_auth->auth.algo = auth_algo;
229 xform_auth->auth.digest_length = digest_len;
230 xform_auth->auth.key.data = key_auth->
data;
231 xform_auth->auth.key.length =
vec_len (key_auth->
data);
238 struct rte_mempool *sess_priv_pool,
241 struct rte_crypto_sym_xform xforms_enc[2] = { {0} };
242 struct rte_crypto_sym_xform xforms_dec[2] = { {0} };
245 struct rte_cryptodev *cdev;
264 dev_id = dev_inst->
dev_id;
265 cdev = rte_cryptodev_pmd_get_dev (dev_id);
269 if (session_pair->
keys[0]->sess_data[cdev->driver_id].data &&
270 session_pair->
keys[1]->sess_data[cdev->driver_id].data)
273 ret = rte_cryptodev_sym_session_init (dev_id, session_pair->
keys[0],
274 xforms_enc, sess_priv_pool);
275 ret = rte_cryptodev_sym_session_init (dev_id, session_pair->
keys[1],
276 xforms_dec, sess_priv_pool);
280 session_pair->
keys[0]->opaque_data = aad_len;
281 session_pair->
keys[1]->opaque_data = aad_len;
294 n_devs = rte_cryptodev_count ();
296 for (i = 0; i < n_devs; i++)
297 rte_cryptodev_sym_session_clear (i, sess);
299 rte_cryptodev_sym_session_free (sess);
311 #define _(a, b, c, d, e, f) \ 312 if (alg == VNET_CRYPTO_ALG_##a) \ 327 struct rte_mempool *sess_pool, *sess_priv_pool;
367 ckey->
keys[0] = rte_cryptodev_sym_session_create (sess_pool);
374 ckey->
keys[1] = rte_cryptodev_sym_session_create (sess_pool);
388 memset (ckey, 0,
sizeof (*ckey));
406 for (
i = 0;
i < n_elts;
i++)
417 rte_iova_t digest_iova = 0;
418 struct rte_mbuf *first_mb = mb, *last_mb = mb;
420 first_mb->nb_segs = 1;
422 while (b->
flags & VLIB_BUFFER_NEXT_PRESENT)
427 rte_pktmbuf_reset (mb);
438 if (b->
data <= digest &&
440 digest_iova = rte_pktmbuf_iova (mb) + digest -
441 rte_pktmbuf_mtod (mb,
u8 *);
450 cryptodev_op_type_t op_type,
459 u32 n_enqueue, n_elts;
470 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
475 (
void **) cet->
cops, n_elts) < 0))
478 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
494 struct rte_crypto_sym_op *sop = &cop[0]->
sop;
497 u32 offset_diff = crypto_offset - integ_offset;
522 crypto_offset = offset_diff;
524 sop->session = key->
keys[op_type];
525 sop->cipher.data.offset = crypto_offset;
527 sop->auth.data.offset = integ_offset;
529 sop->auth.digest.data = fe->
digest;
531 sop->auth.digest.phys_addr = rte_pktmbuf_iova (sop->m_src) +
532 fe->
digest - rte_pktmbuf_mtod (sop->m_src,
u8 *);
534 sop->auth.digest.phys_addr =
543 n_enqueue = rte_cryptodev_enqueue_burst (cet->
cryptodev_id,
545 (
struct rte_crypto_op **)
556 cryptodev_op_type_t op_type,
u8 aad_len)
564 u32 n_enqueue = 0, n_elts;
575 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
580 (
void **) cet->
cops, n_elts) < 0))
583 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
592 frame->
state = VNET_CRYPTO_OP_STATUS_COMPLETED;
600 struct rte_crypto_sym_op *sop = &cop[0]->
sop;
614 sess_aad_len = (
u8) key->
keys[op_type]->opaque_data;
635 sop->session = key->
keys[op_type];
636 sop->aead.aad.data = cop[0]->
aad;
639 sop->aead.data.offset = crypto_offset;
640 sop->aead.digest.data = fe->
tag;
642 sop->aead.digest.phys_addr = rte_pktmbuf_iova (sop->m_src) +
643 fe->
tag - rte_pktmbuf_mtod (sop->m_src,
u8 *);
645 sop->aead.digest.phys_addr =
655 n_enqueue = rte_cryptodev_enqueue_burst (cet->
cryptodev_id,
657 (
struct rte_crypto_op **)
669 return r[ring->cons.head & ring->mask];
681 u32 n_elts, n_completed_ops = rte_ring_count (cet->
ring);
682 u32 ss0 = 0, ss1 = 0, ss2 = 0, ss3 = 0;
688 n_elts = rte_cryptodev_dequeue_burst
690 (
struct rte_crypto_op **) cet->
cops, n_elts);
692 n_completed_ops += n_elts;
694 rte_ring_sp_enqueue_burst (cet->
ring, (
void *) cet->
cops, n_elts, NULL);
707 n_elts = rte_ring_sc_dequeue_bulk (cet->
ring, (
void **) cet->
cops,
713 ss0 |= fe[0].
status = cryptodev_status_conversion[cop[0]->
op.status];
714 ss1 |= fe[1].
status = cryptodev_status_conversion[cop[1]->op.status];
715 ss2 |= fe[2].
status = cryptodev_status_conversion[cop[2]->op.status];
716 ss3 |= fe[3].
status = cryptodev_status_conversion[cop[3]->op.status];
725 ss0 |= fe[0].
status = cryptodev_status_conversion[cop[0]->
op.status];
731 frame->
state = (ss0 | ss1 | ss2 | ss3) == VNET_CRYPTO_OP_STATUS_COMPLETED ?
741 #define _(a, b, c, d, e, f) \ 742 static_always_inline int \ 743 cryptodev_enqueue_##a##_AAD##f##_enc (vlib_main_t * vm, \ 744 vnet_crypto_async_frame_t * frame) \ 746 return cryptodev_frame_gcm_enqueue (vm, frame, \ 747 CRYPTODEV_OP_TYPE_ENCRYPT, f); \ 749 static_always_inline int \ 750 cryptodev_enqueue_##a##_AAD##f##_dec (vlib_main_t * vm, \ 751 vnet_crypto_async_frame_t * frame) \ 753 return cryptodev_frame_gcm_enqueue (vm, frame, \ 754 CRYPTODEV_OP_TYPE_DECRYPT, f); \ 760 #define _(a, b, c, d) \ 761 static_always_inline int \ 762 cryptodev_enqueue_##a##_##c##_TAG##d##_enc (vlib_main_t * vm, \ 763 vnet_crypto_async_frame_t * frame) \ 765 return cryptodev_frame_linked_algs_enqueue (vm, frame, \ 766 CRYPTODEV_OP_TYPE_ENCRYPT, d); \ 768 static_always_inline int \ 769 cryptodev_enqueue_##a##_##c##_TAG##d##_dec (vlib_main_t * vm, \ 770 vnet_crypto_async_frame_t * frame) \ 772 return cryptodev_frame_linked_algs_enqueue (vm, frame, \ 773 CRYPTODEV_OP_TYPE_DECRYPT, d); \ 794 u32 cryptodev_inst_index,
795 cryptodev_resource_assign_op_t op)
838 cryptodev_inst_index, 1);
854 u32 inst = va_arg (*args,
u32);
856 u32 thread_index = 0;
857 struct rte_cryptodev_info info;
859 rte_cryptodev_info_get (cit->
dev_id, &info);
860 s =
format (s,
"%-25s%-10u", info.device->name, cit->
q_id);
870 s =
format (s,
"%u (%v)\n", thread_index,
877 s =
format (s,
"%s\n",
"free");
904 .path =
"show cryptodev assignment",
905 .short_help =
"show cryptodev assignment",
916 u32 thread_index, inst_index;
917 u32 thread_present = 0, inst_present = 0;
927 if (
unformat (line_input,
"thread %u", &thread_index))
929 else if (
unformat (line_input,
"resource %u", &inst_index))
939 if (!thread_present || !inst_present)
973 .path =
"set cryptodev assignment",
974 .short_help =
"set cryptodev assignment thread <thread_index> " 975 "resource <inst_index>",
982 const struct rte_cryptodev_symmetric_capability *cap;
983 struct rte_cryptodev_sym_capability_idx cap_idx;
985 #define _(a, b, c, d, e, f) \ 986 cap_idx.type = RTE_CRYPTO_SYM_XFORM_##b; \ 987 cap_idx.algo.aead = RTE_CRYPTO_##b##_##c; \ 988 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \ 990 return -RTE_CRYPTO_##b##_##c; \ 993 if (cap->aead.digest_size.min > e || cap->aead.digest_size.max < e) \ 994 return -RTE_CRYPTO_##b##_##c; \ 995 if (cap->aead.aad_size.min > f || cap->aead.aad_size.max < f) \ 996 return -RTE_CRYPTO_##b##_##c; \ 997 if (cap->aead.iv_size.min > d || cap->aead.iv_size.max < d) \ 998 return -RTE_CRYPTO_##b##_##c; \ 1004 #define _(a, b, c, d) \ 1005 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \ 1006 cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \ 1007 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \ 1009 return -RTE_CRYPTO_CIPHER_##b; \ 1010 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \ 1011 cap_idx.algo.auth = RTE_CRYPTO_AUTH_##c##_HMAC; \ 1012 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \ 1014 return -RTE_CRYPTO_AUTH_##c; 1024 struct rte_cryptodev_info info;
1025 u32 n_cryptodev = rte_cryptodev_count ();
1028 for (i = 0; i < n_cryptodev; i++)
1030 rte_cryptodev_info_get (i, &info);
1031 if (rte_cryptodev_socket_id (i) != numa)
1033 clib_warning (
"DPDK crypto resource %s is in different numa node " 1034 "as %u, ignored", info.device->name, numa);
1037 q_count += info.max_nb_queue_pairs;
1046 struct rte_cryptodev_info info;
1047 struct rte_cryptodev *cdev;
1054 cdev = rte_cryptodev_pmd_get_dev (cryptodev_id);
1055 rte_cryptodev_info_get (cryptodev_id, &info);
1064 if (!cdev->data->dev_started)
1066 struct rte_cryptodev_config cfg;
1069 cfg.nb_queue_pairs = info.max_nb_queue_pairs;
1071 rte_cryptodev_configure (cryptodev_id, &cfg);
1073 for (i = 0; i < info.max_nb_queue_pairs; i++)
1075 struct rte_cryptodev_qp_conf qp_cfg;
1079 qp_cfg.mp_session = numa_data->
sess_pool;
1083 ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
1088 if (i != info.max_nb_queue_pairs)
1091 rte_cryptodev_start (i);
1094 for (i = 0; i < info.max_nb_queue_pairs; i++)
1098 cdev_inst->
desc =
vec_new (
char, strlen (info.device->name) + 10);
1099 cdev_inst->
dev_id = cryptodev_id;
1100 cdev_inst->
q_id =
i;
1102 snprintf (cdev_inst->
desc, strlen (info.device->name) + 9,
1103 "%s_q%u", info.device->name, i);
1112 char name[RTE_CRYPTODEV_NAME_MAX_LEN], args[128];
1117 while (dev_id < RTE_CRYPTO_MAX_DEVS)
1119 snprintf (name, RTE_CRYPTODEV_NAME_MAX_LEN - 1,
"%s%u",
1121 if (rte_cryptodev_get_dev_id (name) < 0)
1126 if (dev_id == RTE_CRYPTO_MAX_DEVS)
1129 snprintf (args, 127,
"socket_id=%u,max_nb_queue_pairs=%u",
1132 ret = rte_vdev_init(name, args);
1136 clib_warning (
"Created cryptodev device %s (%s)", name, args);
1149 if (n_queues < n_workers)
1157 for (i = 0; i < rte_cryptodev_count (); i++)
1170 u32 sess_data_sz = 0,
i;
1173 if (rte_cryptodev_count () == 0)
1175 clib_warning (
"No cryptodev device available, creating...");
1184 for (
i = 0;
i < rte_cryptodev_count ();
i++)
1186 u32 dev_sess_sz = rte_cryptodev_sym_get_private_session_size (
i);
1188 sess_data_sz = dev_sess_sz > sess_data_sz ? dev_sess_sz : sess_data_sz;
1191 return sess_data_sz;
1204 rte_mempool_free (numa_data->
sess_pool);
1208 rte_mempool_free (numa_data->
cop_pool);
1213 void *_arg __attribute__ ((unused)),
1214 void *_obj,
unsigned i __attribute__ ((unused)))
1216 struct rte_crypto_op *op = _obj;
1218 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1219 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1220 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1221 op->phys_addr = rte_mempool_virt2iova (_obj);
1222 op->mempool = mempool;
1233 struct rte_mempool *mp;
1243 struct rte_crypto_op_pool_private *priv;
1259 name =
format (0,
"vcryptodev_sess_pool_%u", numa);
1260 mp = rte_cryptodev_sym_session_pool_create ((
char *) name,
1273 name =
format (0,
"cryptodev_sess_pool_%u", numa);
1275 0, NULL, NULL, NULL, NULL, numa, 0);
1288 name =
format (0,
"cryptodev_op_pool_%u", numa);
1290 mp = rte_mempool_create ((
char *) name, n_cop_elts,
1292 sizeof (
struct rte_crypto_op_pool_private), NULL,
1301 priv = rte_mempool_get_priv (mp);
1302 priv->priv_size =
sizeof (
struct rte_crypto_op_pool_private);
1303 priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1323 name =
format (0,
"frames_ring_%u", i);
1324 ptd->
ring = rte_ring_create((
char *) name, CRYPTODEV_NB_CRYPTO_OPS,
1325 vm->
numa_node, RING_F_SP_ENQ|RING_F_SC_DEQ);
1338 "DPDK Cryptodev Engine");
1340 #define _(a, b, c, d, e, f) \ 1341 vnet_crypto_register_async_handler \ 1342 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \ 1343 cryptodev_enqueue_##a##_AAD##f##_enc, \ 1344 cryptodev_frame_dequeue); \ 1345 vnet_crypto_register_async_handler \ 1346 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \ 1347 cryptodev_enqueue_##a##_AAD##f##_dec, \ 1348 cryptodev_frame_dequeue); 1353 #define _(a, b, c, d) \ 1354 vnet_crypto_register_async_handler \ 1355 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_ENC, \ 1356 cryptodev_enqueue_##a##_##c##_TAG##d##_enc, \ 1357 cryptodev_frame_dequeue); \ 1358 vnet_crypto_register_async_handler \ 1359 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_DEC, \ 1360 cryptodev_enqueue_##a##_##c##_TAG##d##_dec, \ 1361 cryptodev_frame_dequeue); #define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
struct rte_crypto_sym_op sop
#define vec_foreach_index(var, v)
Iterate over vector indices.
#define CRYPTODEV_NB_CRYPTO_OPS
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
cryptodev_resource_assign_op_t
static int prepare_aead_xform(struct rte_crypto_sym_xform *xform, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key, u32 aad_len)
static clib_error_t * cryptodev_set_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
#define VNET_CRYPTO_KEY_TYPE_LINK
#define pool_get_zero(P, E)
Allocate an object E from a pool P and zero it.
#define foreach_vnet_aead_crypto_conversion
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
#define clib_memcpy_fast(a, b, c)
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
void cryptodev_key_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx)
static u32 cryptodev_count_queue(u32 numa)
#define VLIB_BUFFER_PRE_DATA_SIZE
u16 current_length
Nbytes between current data and the end of this buffer.
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
static uword * clib_bitmap_set(uword *ai, uword i, uword value)
Sets the ith bit of a bitmap to new_value Removes trailing zeros from the bitmap. ...
#define CRYPTODEV_AAD_OFFSET
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
clib_bitmap_t * active_cdev_inst_mask
static_always_inline int cryptodev_frame_linked_algs_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type, u32 digest_len)
u8 buffer_pool_index
index of buffer pool this buffer belongs.
static_always_inline void cryptodev_sess_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx, u32 aad_len)
static_always_inline vnet_crypto_async_frame_t * cryptodev_frame_dequeue(vlib_main_t *vm)
static uword clib_bitmap_set_no_check(uword *a, uword i, uword new_value)
Sets the ith bit of a bitmap to new_value.
void vnet_crypto_register_key_handler(vlib_main_t *vm, u32 engine_index, vnet_crypto_key_handler_t *key_handler)
static int prepare_linked_xform(struct rte_crypto_sym_xform *xforms, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key)
#define static_always_inline
static const vnet_crypto_op_status_t cryptodev_status_conversion[]
static int cryptodev_configure(vlib_main_t *vm, uint32_t cryptodev_id)
#define vec_new(T, N)
Create new vector of given type and length (unspecified alignment, no header).
static_always_inline int cryptodev_frame_gcm_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type, u8 aad_len)
struct rte_mempool * sess_pool
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
#define foreach_cryptodev_link_async_alg
crypto (alg, cryptodev_alg), hash (alg, digest-size)
static_always_inline void cryptodev_mark_frame_err_status(vnet_crypto_async_frame_t *f, vnet_crypto_op_status_t s)
static void crypto_op_init(struct rte_mempool *mempool, void *_arg, void *_obj, unsigned i)
#define VNET_CRYPTO_FRAME_SIZE
static void clib_spinlock_init(clib_spinlock_t *p)
vlib_worker_thread_t * vlib_worker_threads
cryptodev_engine_thread_t * per_thread_data
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static int cryptodev_session_create(vnet_crypto_key_t *const key, struct rte_mempool *sess_priv_pool, cryptodev_key_t *session_pair, u32 aad_len)
static int cryptodev_get_session_sz(vlib_main_t *vm, uint32_t n_workers)
#define rte_mbuf_from_vlib_buffer(x)
#define pool_put(P, E)
Free an object E in pool P.
static_always_inline int cryptodev_assign_resource(cryptodev_engine_thread_t *cet, u32 cryptodev_inst_index, cryptodev_resource_assign_op_t op)
assign a cryptodev resource to a worker.
clib_error_t * dpdk_cryptodev_init(vlib_main_t *vm)
vnet_crypto_async_alg_t async_alg
u32 buffer_indices[VNET_CRYPTO_FRAME_SIZE]
#define CLIB_PREFETCH(addr, size, type)
static int cryptodev_create_device(vlib_main_t *vm, u32 n_queues)
sll srl srl sll sra u16x4 i
cryptodev_main_t cryptodev_main
#define vec_free(V)
Free vector's memory (no header).
static_always_inline cryptodev_op_t * cryptodev_get_ring_head(struct rte_ring *ring)
#define clib_warning(format, args...)
#define VNET_CRYPTO_FRAME_STATE_SUCCESS
static uword max_pow2(uword x)
static uword clib_bitmap_get(uword *ai, uword i)
Gets the ith bit value from a bitmap.
#define VNET_CRYPTO_FRAME_STATE_ELT_ERROR
cryptodev_numa_data_t * per_numa_data
cryptodev_inst_t * cryptodev_inst
#define clib_bitmap_vec_validate(v, i)
static int cryptodev_probe(vlib_main_t *vm, u32 n_workers)
#define VLIB_CLI_COMMAND(x,...)
static void cryptodev_session_del(struct rte_cryptodev_sym_session *sess)
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
struct rte_mempool * cop_pool
static u8 * format_cryptodev_inst(u8 *s, va_list *args)
static clib_error_t * cryptodev_show_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
u8 flags
share same VNET_CRYPTO_OP_FLAG_* values
u32 vnet_crypto_key_index_t
struct rte_mempool * sess_priv_pool
static int check_cryptodev_alg_support(u32 dev_id)
static uword clib_bitmap_count_set_bits(uword *ai)
Return the number of set bits in a bitmap.
#define CRYPTODEV_IV_OFFSET
#define foreach_vnet_crypto_status_conversion
static_always_inline vnet_crypto_key_t * vnet_crypto_get_key(vnet_crypto_key_index_t index)
struct rte_cryptodev_sym_session * keys[CRYPTODEV_N_OP_TYPES]
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
struct rte_mempool ** dpdk_no_cache_mempool_by_buffer_pool_index
VLIB buffer representation.
static int cryptodev_check_supported_vnet_alg(vnet_crypto_key_t *key)
static void dpdk_disable_cryptodev_engine(vlib_main_t *vm)
#define CRYPTODEV_DEF_DRIVE
static vlib_thread_main_t * vlib_get_thread_main()
static u32 vlib_num_workers()
#define CRYPTODEV_NB_SESSION
#define vec_foreach(var, vec)
Vector iterator.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
vnet_crypto_async_frame_t * frame
static uword clib_bitmap_first_clear(uword *ai)
Return the lowest numbered clear bit in a bitmap.
#define CLIB_CACHE_LINE_BYTES
u32 vnet_crypto_register_engine(vlib_main_t *vm, char *name, int prio, char *desc)
static_always_inline rte_iova_t cryptodev_validate_mbuf_chain(vlib_main_t *vm, struct rte_mbuf *mb, vlib_buffer_t *b, u8 *digest)
volatile u8 ref_count
Reference count for this buffer.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
vnet_crypto_op_status_t status
vnet_crypto_async_frame_elt_t elts[VNET_CRYPTO_FRAME_SIZE]