22 #include <vpp/app/version.h> 28 #include <rte_bus_vdev.h> 29 #include <rte_cryptodev.h> 30 #include <rte_crypto_sym.h> 31 #include <rte_crypto.h> 32 #include <rte_cryptodev_pmd.h> 33 #include <rte_config.h> 36 #define always_inline static inline 38 #define always_inline static inline __attribute__ ((__always_inline__)) 41 #define CRYPTODEV_NB_CRYPTO_OPS 1024 42 #define CRYPTODEV_NB_SESSION 10240 43 #define CRYPTODEV_DEF_DRIVE crypto_aesni_mb 45 #define CRYPTODEV_IV_OFFSET (offsetof (cryptodev_op_t, iv)) 46 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad)) 49 #define foreach_vnet_aead_crypto_conversion \ 50 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 8) \ 51 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 12) \ 52 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 8) \ 53 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 12) \ 54 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 8) \ 55 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 12) 60 #define foreach_cryptodev_link_async_alg \ 61 _ (AES_128_CBC, AES_CBC, SHA1, 12) \ 62 _ (AES_192_CBC, AES_CBC, SHA1, 12) \ 63 _ (AES_256_CBC, AES_CBC, SHA1, 12) \ 64 _ (AES_128_CBC, AES_CBC, SHA224, 14) \ 65 _ (AES_192_CBC, AES_CBC, SHA224, 14) \ 66 _ (AES_256_CBC, AES_CBC, SHA224, 14) \ 67 _ (AES_128_CBC, AES_CBC, SHA256, 16) \ 68 _ (AES_192_CBC, AES_CBC, SHA256, 16) \ 69 _ (AES_256_CBC, AES_CBC, SHA256, 16) \ 70 _ (AES_128_CBC, AES_CBC, SHA384, 24) \ 71 _ (AES_192_CBC, AES_CBC, SHA384, 24) \ 72 _ (AES_256_CBC, AES_CBC, SHA384, 24) \ 73 _ (AES_128_CBC, AES_CBC, SHA512, 32) \ 74 _ (AES_192_CBC, AES_CBC, SHA512, 32) \ 75 _ (AES_256_CBC, AES_CBC, SHA512, 32) 77 #define foreach_vnet_crypto_status_conversion \ 78 _(SUCCESS, COMPLETED) \ 79 _(NOT_PROCESSED, WORK_IN_PROGRESS) \ 80 _(AUTH_FAILED, FAIL_BAD_HMAC) \ 81 _(INVALID_SESSION, FAIL_ENGINE_ERR) \ 82 _(INVALID_ARGS, FAIL_ENGINE_ERR) \ 83 _(ERROR, FAIL_ENGINE_ERR) 86 #define _(a, b) VNET_CRYPTO_OP_STATUS_##b, 94 struct rte_crypto_op op;
95 struct rte_crypto_sym_op sop;
143 enum rte_iova_mode iova_mode;
153 cryptodev_op_type_t op_type,
156 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
157 memset (xform, 0,
sizeof (*xform));
158 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
161 if (key->
alg != VNET_CRYPTO_ALG_AES_128_GCM &&
162 key->
alg != VNET_CRYPTO_ALG_AES_192_GCM &&
163 key->
alg != VNET_CRYPTO_ALG_AES_256_GCM)
166 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
168 RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
169 aead_xform->aad_length = aad_len;
170 aead_xform->digest_length = 16;
172 aead_xform->iv.length = 12;
173 aead_xform->key.data = key->
data;
181 cryptodev_op_type_t op_type,
184 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
186 enum rte_crypto_cipher_algorithm cipher_algo = ~0;
187 enum rte_crypto_auth_algorithm auth_algo = ~0;
192 if (!key_cipher || !key_auth)
197 xform_cipher = xforms;
198 xform_auth = xforms + 1;
199 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
200 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
204 xform_cipher = xforms + 1;
206 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
207 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
210 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
211 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
212 xforms->next = xforms + 1;
216 #define _(a, b, c, d) \ 217 case VNET_CRYPTO_ALG_##a##_##c##_TAG##d:\ 218 cipher_algo = RTE_CRYPTO_CIPHER_##b; \ 219 auth_algo = RTE_CRYPTO_AUTH_##c##_HMAC; \ 229 xform_cipher->cipher.algo = cipher_algo;
230 xform_cipher->cipher.key.data = key_cipher->
data;
231 xform_cipher->cipher.key.length =
vec_len (key_cipher->
data);
232 xform_cipher->cipher.iv.length = 16;
235 xform_auth->auth.algo = auth_algo;
236 xform_auth->auth.digest_length = digest_len;
237 xform_auth->auth.key.data = key_auth->
data;
238 xform_auth->auth.key.length =
vec_len (key_auth->
data);
245 struct rte_mempool *sess_priv_pool,
248 struct rte_crypto_sym_xform xforms_enc[2] = { {0} };
249 struct rte_crypto_sym_xform xforms_dec[2] = { {0} };
252 struct rte_cryptodev *cdev;
271 dev_id = dev_inst->
dev_id;
272 cdev = rte_cryptodev_pmd_get_dev (dev_id);
276 if (session_pair->
keys[0]->sess_data[cdev->driver_id].data &&
277 session_pair->
keys[1]->sess_data[cdev->driver_id].data)
280 ret = rte_cryptodev_sym_session_init (dev_id, session_pair->
keys[0],
281 xforms_enc, sess_priv_pool);
282 ret = rte_cryptodev_sym_session_init (dev_id, session_pair->
keys[1],
283 xforms_dec, sess_priv_pool);
287 session_pair->
keys[0]->opaque_data = aad_len;
288 session_pair->
keys[1]->opaque_data = aad_len;
301 n_devs = rte_cryptodev_count ();
303 for (i = 0; i < n_devs; i++)
304 rte_cryptodev_sym_session_clear (i, sess);
306 rte_cryptodev_sym_session_free (sess);
318 #define _(a, b, c, d, e, f) \ 319 if (alg == VNET_CRYPTO_ALG_##a) \ 334 struct rte_mempool *sess_pool, *sess_priv_pool;
374 ckey->
keys[0] = rte_cryptodev_sym_session_create (sess_pool);
381 ckey->
keys[1] = rte_cryptodev_sym_session_create (sess_pool);
395 memset (ckey, 0,
sizeof (*ckey));
413 for (
i = 0;
i < n_elts;
i++)
423 if (mode == RTE_IOVA_VA)
434 struct rte_mbuf *first_mb = mb, *last_mb = mb;
445 rte_pktmbuf_mtod (mb,
u8 *));
447 first_mb->nb_segs = 1;
448 first_mb->pkt_len = first_mb->data_len =
data_len;
450 while (b->
flags & VLIB_BUFFER_NEXT_PRESENT)
455 rte_pktmbuf_reset (mb);
471 cryptodev_op_type_t op_type)
480 u32 n_enqueue, n_elts;
491 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
496 (
void **) cet->
cops, n_elts) < 0))
499 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
515 struct rte_crypto_sym_op *sop = &cop[0]->
sop;
518 u32 offset_diff = crypto_offset - integ_offset;
544 crypto_offset = offset_diff;
546 sop->session = key->
keys[op_type];
547 sop->cipher.data.offset = crypto_offset;
549 sop->auth.data.offset = integ_offset;
551 sop->auth.digest.data = fe->
digest;
562 sop->m_src->nb_segs = 1;
570 n_enqueue = rte_cryptodev_enqueue_burst (cet->
cryptodev_id,
572 (
struct rte_crypto_op **)
583 cryptodev_op_type_t op_type,
u8 aad_len)
592 u32 n_enqueue = 0, n_elts;
604 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
609 (
void **) cet->
cops, n_elts) < 0))
612 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
624 sess_aad_len = (
u8) key->
keys[op_type]->opaque_data;
632 struct rte_crypto_sym_op *sop = &cop[0]->
sop;
645 sess_aad_len = (
u8) key->
keys[op_type]->opaque_data;
666 sop->session = key->
keys[op_type];
667 sop->aead.aad.data = cop[0]->
aad;
670 sop->aead.data.offset = crypto_offset;
671 sop->aead.digest.data = fe->
tag;
682 sop->m_src->nb_segs = 1;
691 n_enqueue = rte_cryptodev_enqueue_burst (cet->
cryptodev_id,
693 (
struct rte_crypto_op **)
705 return r[ring->cons.head & ring->mask];
710 u32 * enqueue_thread_idx)
718 u32 n_elts, n_completed_ops = rte_ring_count (cet->
ring);
719 u32 ss0 = 0, ss1 = 0, ss2 = 0, ss3 = 0;
725 n_elts = rte_cryptodev_dequeue_burst
727 (
struct rte_crypto_op **) cet->
cops, n_elts);
729 n_completed_ops += n_elts;
731 rte_ring_sp_enqueue_burst (cet->
ring, (
void *) cet->
cops, n_elts, NULL);
744 n_elts = rte_ring_sc_dequeue_bulk (cet->
ring, (
void **) cet->
cops,
750 ss0 |= fe[0].
status = cryptodev_status_conversion[cop[0]->
op.status];
751 ss1 |= fe[1].
status = cryptodev_status_conversion[cop[1]->op.status];
752 ss2 |= fe[2].
status = cryptodev_status_conversion[cop[2]->op.status];
753 ss3 |= fe[3].
status = cryptodev_status_conversion[cop[3]->op.status];
762 ss0 |= fe[0].
status = cryptodev_status_conversion[cop[0]->
op.status];
768 frame->
state = (ss0 | ss1 | ss2 | ss3) == VNET_CRYPTO_OP_STATUS_COMPLETED ?
772 *nb_elts_processed = frame->
n_elts;
839 u32 cryptodev_inst_index,
840 cryptodev_resource_assign_op_t op)
883 cryptodev_inst_index, 1);
899 u32 inst = va_arg (*args,
u32);
901 u32 thread_index = 0;
902 struct rte_cryptodev_info info;
904 rte_cryptodev_info_get (cit->
dev_id, &info);
905 s =
format (s,
"%-25s%-10u", info.device->name, cit->
q_id);
915 s =
format (s,
"%u (%v)\n", thread_index,
922 s =
format (s,
"%s\n",
"free");
949 .path =
"show cryptodev assignment",
950 .short_help =
"show cryptodev assignment",
961 u32 thread_index, inst_index;
962 u32 thread_present = 0, inst_present = 0;
972 if (
unformat (line_input,
"thread %u", &thread_index))
974 else if (
unformat (line_input,
"resource %u", &inst_index))
984 if (!thread_present || !inst_present)
1018 .path =
"set cryptodev assignment",
1019 .short_help =
"set cryptodev assignment thread <thread_index> " 1020 "resource <inst_index>",
1027 const struct rte_cryptodev_symmetric_capability *cap;
1028 struct rte_cryptodev_sym_capability_idx cap_idx;
1030 #define _(a, b, c, d, e, f) \ 1031 cap_idx.type = RTE_CRYPTO_SYM_XFORM_##b; \ 1032 cap_idx.algo.aead = RTE_CRYPTO_##b##_##c; \ 1033 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \ 1035 return -RTE_CRYPTO_##b##_##c; \ 1038 if (cap->aead.digest_size.min > e || cap->aead.digest_size.max < e) \ 1039 return -RTE_CRYPTO_##b##_##c; \ 1040 if (cap->aead.aad_size.min > f || cap->aead.aad_size.max < f) \ 1041 return -RTE_CRYPTO_##b##_##c; \ 1042 if (cap->aead.iv_size.min > d || cap->aead.iv_size.max < d) \ 1043 return -RTE_CRYPTO_##b##_##c; \ 1049 #define _(a, b, c, d) \ 1050 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \ 1051 cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \ 1052 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \ 1054 return -RTE_CRYPTO_CIPHER_##b; \ 1055 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \ 1056 cap_idx.algo.auth = RTE_CRYPTO_AUTH_##c##_HMAC; \ 1057 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \ 1059 return -RTE_CRYPTO_AUTH_##c; 1069 struct rte_cryptodev_info info;
1070 u32 n_cryptodev = rte_cryptodev_count ();
1073 for (i = 0; i < n_cryptodev; i++)
1075 rte_cryptodev_info_get (i, &info);
1076 if (rte_cryptodev_socket_id (i) != numa)
1078 clib_warning (
"DPDK crypto resource %s is in different numa node " 1079 "as %u, ignored", info.device->name, numa);
1082 q_count += info.max_nb_queue_pairs;
1091 struct rte_cryptodev_info info;
1092 struct rte_cryptodev *cdev;
1099 cdev = rte_cryptodev_pmd_get_dev (cryptodev_id);
1100 rte_cryptodev_info_get (cryptodev_id, &info);
1109 if (!cdev->data->dev_started)
1111 struct rte_cryptodev_config cfg;
1114 cfg.nb_queue_pairs = info.max_nb_queue_pairs;
1116 rte_cryptodev_configure (cryptodev_id, &cfg);
1118 for (i = 0; i < info.max_nb_queue_pairs; i++)
1120 struct rte_cryptodev_qp_conf qp_cfg;
1124 qp_cfg.mp_session = numa_data->
sess_pool;
1128 ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
1133 if (i != info.max_nb_queue_pairs)
1136 rte_cryptodev_start (i);
1139 for (i = 0; i < info.max_nb_queue_pairs; i++)
1143 cdev_inst->
desc =
vec_new (
char, strlen (info.device->name) + 10);
1144 cdev_inst->
dev_id = cryptodev_id;
1145 cdev_inst->
q_id =
i;
1147 snprintf (cdev_inst->
desc, strlen (info.device->name) + 9,
1148 "%s_q%u", info.device->name, i);
1157 char name[RTE_CRYPTODEV_NAME_MAX_LEN], args[128];
1162 while (dev_id < RTE_CRYPTO_MAX_DEVS)
1164 snprintf (name, RTE_CRYPTODEV_NAME_MAX_LEN - 1,
"%s%u",
1166 if (rte_cryptodev_get_dev_id (name) < 0)
1171 if (dev_id == RTE_CRYPTO_MAX_DEVS)
1174 snprintf (args, 127,
"socket_id=%u,max_nb_queue_pairs=%u",
1177 ret = rte_vdev_init(name, args);
1181 clib_warning (
"Created cryptodev device %s (%s)", name, args);
1208 if (n_queues < n_workers)
1216 for (i = 0; i < rte_cryptodev_count (); i++)
1231 u32 sess_data_sz = 0,
i;
1234 if (rte_cryptodev_count () == 0)
1236 clib_warning (
"No cryptodev device available, creating...");
1245 for (
i = 0;
i < rte_cryptodev_count ();
i++)
1247 u32 dev_sess_sz = rte_cryptodev_sym_get_private_session_size (
i);
1249 sess_data_sz = dev_sess_sz > sess_data_sz ? dev_sess_sz : sess_data_sz;
1252 return sess_data_sz;
1265 rte_mempool_free (numa_data->
sess_pool);
1269 rte_mempool_free (numa_data->
cop_pool);
1274 void *_arg __attribute__ ((unused)),
1275 void *_obj,
unsigned i __attribute__ ((unused)))
1277 struct rte_crypto_op *op = _obj;
1279 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1280 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1281 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1282 op->phys_addr = rte_mempool_virt2iova (_obj);
1283 op->mempool = mempool;
1294 struct rte_mempool *mp;
1304 struct rte_crypto_op_pool_private *priv;
1322 name =
format (0,
"vcryptodev_sess_pool_%u%c", numa, 0);
1323 mp = rte_cryptodev_sym_session_pool_create ((
char *) name,
1336 name =
format (0,
"cryptodev_sess_pool_%u%c", numa, 0);
1338 0, NULL, NULL, NULL, NULL, numa, 0);
1351 name =
format (0,
"cryptodev_op_pool_%u%c", numa, 0);
1353 mp = rte_mempool_create ((
char *) name, n_cop_elts,
1355 sizeof (
struct rte_crypto_op_pool_private), NULL,
1364 priv = rte_mempool_get_priv (mp);
1365 priv->priv_size =
sizeof (
struct rte_crypto_op_pool_private);
1366 priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1386 name =
format (0,
"frames_ring_%u%c", i, 0);
1387 ptd->
ring = rte_ring_create((
char *) name, CRYPTODEV_NB_CRYPTO_OPS,
1388 vm->
numa_node, RING_F_SP_ENQ|RING_F_SC_DEQ);
1401 "DPDK Cryptodev Engine");
1403 #define _(a, b, c, d, e, f) \ 1404 vnet_crypto_register_async_handler \ 1405 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \ 1406 cryptodev_enqueue_gcm_aad_##f##_enc,\ 1407 cryptodev_frame_dequeue); \ 1408 vnet_crypto_register_async_handler \ 1409 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \ 1410 cryptodev_enqueue_gcm_aad_##f##_dec, \ 1411 cryptodev_frame_dequeue); 1416 #define _(a, b, c, d) \ 1417 vnet_crypto_register_async_handler \ 1418 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_ENC, \ 1419 cryptodev_enqueue_linked_alg_enc, \ 1420 cryptodev_frame_dequeue); \ 1421 vnet_crypto_register_async_handler \ 1422 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_DEC, \ 1423 cryptodev_enqueue_linked_alg_dec, \ 1424 cryptodev_frame_dequeue); #define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
static_always_inline vnet_crypto_async_frame_t * cryptodev_frame_dequeue(vlib_main_t *vm, u32 *nb_elts_processed, u32 *enqueue_thread_idx)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
struct rte_crypto_sym_op sop
static_always_inline int cryptodev_frame_linked_algs_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type)
#define vec_foreach_index(var, v)
Iterate over vector indices.
#define CRYPTODEV_NB_CRYPTO_OPS
enum rte_iova_mode iova_mode
static_always_inline int cryptodev_enqueue_linked_alg_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
static_always_inline int cryptodev_enqueue_gcm_aad_8_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
cryptodev_resource_assign_op_t
static int prepare_aead_xform(struct rte_crypto_sym_xform *xform, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key, u32 aad_len)
static clib_error_t * cryptodev_set_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
#define VNET_CRYPTO_KEY_TYPE_LINK
#define pool_get_zero(P, E)
Allocate an object E from a pool P and zero it.
vlib_physmem_main_t physmem_main
#define foreach_vnet_aead_crypto_conversion
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
#define clib_memcpy_fast(a, b, c)
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
void cryptodev_key_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx)
static u32 cryptodev_count_queue(u32 numa)
#define VLIB_BUFFER_PRE_DATA_SIZE
u16 current_length
Nbytes between current data and the end of this buffer.
static u32 clib_pmalloc_get_page_index(clib_pmalloc_main_t *pm, void *va)
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
static uword * clib_bitmap_set(uword *ai, uword i, uword value)
Sets the ith bit of a bitmap to new_value Removes trailing zeros from the bitmap. ...
#define CRYPTODEV_AAD_OFFSET
struct rte_mempool * sess_pool
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
cryptodev_inst_t * cryptodev_inst
u8 buffer_pool_index
index of buffer pool this buffer belongs.
static_always_inline void cryptodev_sess_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx, u32 aad_len)
clib_pmalloc_main_t * pmalloc_main
static uword clib_bitmap_set_no_check(uword *a, uword i, uword new_value)
Sets the ith bit of a bitmap to new_value.
void vnet_crypto_register_key_handler(vlib_main_t *vm, u32 engine_index, vnet_crypto_key_handler_t *key_handler)
static int prepare_linked_xform(struct rte_crypto_sym_xform *xforms, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key)
#define static_always_inline
static const vnet_crypto_op_status_t cryptodev_status_conversion[]
static int cryptodev_configure(vlib_main_t *vm, uint32_t cryptodev_id)
#define vec_new(T, N)
Create new vector of given type and length (unspecified alignment, no header).
static_always_inline int cryptodev_frame_gcm_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type, u8 aad_len)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
#define foreach_cryptodev_link_async_alg
crypto (alg, cryptodev_alg), hash (alg, digest-size)
static_always_inline void cryptodev_mark_frame_err_status(vnet_crypto_async_frame_t *f, vnet_crypto_op_status_t s)
static void crypto_op_init(struct rte_mempool *mempool, void *_arg, void *_obj, unsigned i)
#define VNET_CRYPTO_FRAME_SIZE
static void clib_spinlock_init(clib_spinlock_t *p)
vlib_worker_thread_t * vlib_worker_threads
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
clib_bitmap_t * active_cdev_inst_mask
static int cryptodev_session_create(vnet_crypto_key_t *const key, struct rte_mempool *sess_priv_pool, cryptodev_key_t *session_pair, u32 aad_len)
static int cryptodev_get_session_sz(vlib_main_t *vm, uint32_t n_workers)
#define rte_mbuf_from_vlib_buffer(x)
#define pool_put(P, E)
Free an object E in pool P.
static_always_inline int cryptodev_assign_resource(cryptodev_engine_thread_t *cet, u32 cryptodev_inst_index, cryptodev_resource_assign_op_t op)
assign a cryptodev resource to a worker.
cryptodev_numa_data_t * per_numa_data
vl_api_tunnel_mode_t mode
clib_error_t * dpdk_cryptodev_init(vlib_main_t *vm)
vnet_crypto_async_alg_t async_alg
u32 buffer_indices[VNET_CRYPTO_FRAME_SIZE]
cryptodev_engine_thread_t * per_thread_data
static_always_inline int cryptodev_enqueue_gcm_aad_8_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
#define CLIB_PREFETCH(addr, size, type)
static int cryptodev_create_device(vlib_main_t *vm, u32 n_queues)
sll srl srl sll sra u16x4 i
cryptodev_main_t cryptodev_main
#define vec_free(V)
Free vector's memory (no header).
static_always_inline cryptodev_op_t * cryptodev_get_ring_head(struct rte_ring *ring)
#define clib_warning(format, args...)
static_always_inline rte_iova_t cryptodev_get_iova(clib_pmalloc_main_t *pm, enum rte_iova_mode mode, void *data)
#define VNET_CRYPTO_FRAME_STATE_SUCCESS
static uword max_pow2(uword x)
static_always_inline int cryptodev_enqueue_gcm_aad_12_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
static uword clib_bitmap_get(uword *ai, uword i)
Gets the ith bit value from a bitmap.
#define VNET_CRYPTO_FRAME_STATE_ELT_ERROR
#define clib_bitmap_vec_validate(v, i)
static int cryptodev_probe(vlib_main_t *vm, u32 n_workers)
#define VLIB_CLI_COMMAND(x,...)
static void cryptodev_session_del(struct rte_cryptodev_sym_session *sess)
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
struct rte_mempool * cop_pool
static u8 * format_cryptodev_inst(u8 *s, va_list *args)
static int cryptodev_cmp(void *v1, void *v2)
struct rte_mempool * sess_priv_pool
static clib_error_t * cryptodev_show_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
u8 flags
share same VNET_CRYPTO_OP_FLAG_* values
u32 vnet_crypto_key_index_t
static_always_inline int cryptodev_enqueue_gcm_aad_12_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
static uword pointer_to_uword(const void *p)
static int check_cryptodev_alg_support(u32 dev_id)
static uword clib_bitmap_count_set_bits(uword *ai)
Return the number of set bits in a bitmap.
#define CRYPTODEV_IV_OFFSET
static_always_inline int cryptodev_enqueue_linked_alg_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
#define foreach_vnet_crypto_status_conversion
static_always_inline vnet_crypto_key_t * vnet_crypto_get_key(vnet_crypto_key_index_t index)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
struct rte_mempool ** dpdk_no_cache_mempool_by_buffer_pool_index
VLIB buffer representation.
#define vec_sort_with_function(vec, f)
Sort a vector using the supplied element comparison function.
static int cryptodev_check_supported_vnet_alg(vnet_crypto_key_t *key)
static void dpdk_disable_cryptodev_engine(vlib_main_t *vm)
#define CRYPTODEV_DEF_DRIVE
static_always_inline void cryptodev_validate_mbuf_chain(vlib_main_t *vm, struct rte_mbuf *mb, vlib_buffer_t *b)
static vlib_thread_main_t * vlib_get_thread_main()
static u32 vlib_num_workers()
#define CRYPTODEV_NB_SESSION
#define vec_foreach(var, vec)
Vector iterator.
struct rte_cryptodev_sym_session * keys[CRYPTODEV_N_OP_TYPES]
vnet_crypto_async_frame_t * frame
static uword clib_bitmap_first_clear(uword *ai)
Return the lowest numbered clear bit in a bitmap.
#define CLIB_CACHE_LINE_BYTES
u32 vnet_crypto_register_engine(vlib_main_t *vm, char *name, int prio, char *desc)
volatile u8 ref_count
Reference count for this buffer.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
vnet_crypto_op_status_t status
vnet_crypto_async_frame_elt_t elts[VNET_CRYPTO_FRAME_SIZE]