22 #include <vpp/app/version.h> 28 #include <rte_bus_vdev.h> 29 #include <rte_cryptodev.h> 30 #include <rte_crypto_sym.h> 31 #include <rte_crypto.h> 32 #include <rte_cryptodev_pmd.h> 33 #include <rte_config.h> 36 #define always_inline static inline 38 #define always_inline static inline __attribute__ ((__always_inline__)) 41 #define CRYPTODEV_NB_CRYPTO_OPS 1024 42 #define CRYPTODEV_MAX_INFLIGHT (CRYPTODEV_NB_CRYPTO_OPS - 1) 43 #define CRYPTODEV_AAD_MASK (CRYPTODEV_NB_CRYPTO_OPS - 1) 44 #define CRYPTODEV_DEQ_CACHE_SZ 32 45 #define CRYPTODEV_NB_SESSION 10240 46 #define CRYPTODEV_MAX_AAD_SIZE 16 47 #define CRYPTODEV_MAX_N_SGL 8 50 #define foreach_vnet_aead_crypto_conversion \ 51 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 8) \ 52 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 12) \ 53 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 8) \ 54 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 12) \ 55 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 8) \ 56 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 12) 61 #define foreach_cryptodev_link_async_alg \ 62 _ (AES_128_CBC, AES_CBC, SHA1, 12) \ 63 _ (AES_192_CBC, AES_CBC, SHA1, 12) \ 64 _ (AES_256_CBC, AES_CBC, SHA1, 12) \ 65 _ (AES_128_CBC, AES_CBC, SHA224, 14) \ 66 _ (AES_192_CBC, AES_CBC, SHA224, 14) \ 67 _ (AES_256_CBC, AES_CBC, SHA224, 14) \ 68 _ (AES_128_CBC, AES_CBC, SHA256, 16) \ 69 _ (AES_192_CBC, AES_CBC, SHA256, 16) \ 70 _ (AES_256_CBC, AES_CBC, SHA256, 16) \ 71 _ (AES_128_CBC, AES_CBC, SHA384, 24) \ 72 _ (AES_192_CBC, AES_CBC, SHA384, 24) \ 73 _ (AES_256_CBC, AES_CBC, SHA384, 24) \ 74 _ (AES_128_CBC, AES_CBC, SHA512, 32) \ 75 _ (AES_192_CBC, AES_CBC, SHA512, 32) \ 76 _ (AES_256_CBC, AES_CBC, SHA512, 32) 100 struct rte_mempool *sess_pool;
101 struct rte_mempool *sess_priv_pool;
124 enum rte_iova_mode iova_mode;
137 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
138 memset (xform, 0,
sizeof (*xform));
139 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
142 if (key->
alg != VNET_CRYPTO_ALG_AES_128_GCM &&
143 key->
alg != VNET_CRYPTO_ALG_AES_192_GCM &&
144 key->
alg != VNET_CRYPTO_ALG_AES_256_GCM)
147 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
149 RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
150 aead_xform->aad_length = aad_len;
151 aead_xform->digest_length = 16;
152 aead_xform->iv.offset = 0;
153 aead_xform->iv.length = 12;
154 aead_xform->key.data = key->
data;
165 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
167 enum rte_crypto_cipher_algorithm cipher_algo = ~0;
168 enum rte_crypto_auth_algorithm auth_algo = ~0;
173 if (!key_cipher || !key_auth)
178 xform_cipher = xforms;
179 xform_auth = xforms + 1;
180 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
181 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
185 xform_cipher = xforms + 1;
187 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
188 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
191 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
192 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
193 xforms->next = xforms + 1;
197 #define _(a, b, c, d) \ 198 case VNET_CRYPTO_ALG_##a##_##c##_TAG##d:\ 199 cipher_algo = RTE_CRYPTO_CIPHER_##b; \ 200 auth_algo = RTE_CRYPTO_AUTH_##c##_HMAC; \ 210 xform_cipher->cipher.algo = cipher_algo;
211 xform_cipher->cipher.key.data = key_cipher->
data;
212 xform_cipher->cipher.key.length =
vec_len (key_cipher->
data);
213 xform_cipher->cipher.iv.length = 16;
214 xform_cipher->cipher.iv.offset = 0;
216 xform_auth->auth.algo = auth_algo;
217 xform_auth->auth.digest_length = digest_len;
218 xform_auth->auth.key.data = key_auth->
data;
219 xform_auth->auth.key.length =
vec_len (key_auth->
data);
226 struct rte_mempool *sess_priv_pool,
229 struct rte_crypto_sym_xform xforms_enc[2] = { {0} };
230 struct rte_crypto_sym_xform xforms_dec[2] = { {0} };
233 struct rte_cryptodev *cdev;
252 dev_id = dev_inst->
dev_id;
253 cdev = rte_cryptodev_pmd_get_dev (dev_id);
257 if (session_pair->
keys[0]->sess_data[cdev->driver_id].data &&
258 session_pair->
keys[1]->sess_data[cdev->driver_id].data)
261 ret = rte_cryptodev_sym_session_init (dev_id, session_pair->
keys[0],
262 xforms_enc, sess_priv_pool);
263 ret = rte_cryptodev_sym_session_init (dev_id, session_pair->
keys[1],
264 xforms_dec, sess_priv_pool);
268 session_pair->
keys[0]->opaque_data = aad_len;
269 session_pair->
keys[1]->opaque_data = aad_len;
282 n_devs = rte_cryptodev_count ();
284 for (i = 0; i < n_devs; i++)
285 rte_cryptodev_sym_session_clear (i, sess);
287 rte_cryptodev_sym_session_free (sess);
299 #define _(a, b, c, d, e, f) \ 300 if (alg == VNET_CRYPTO_ALG_##a) \ 315 struct rte_mempool *sess_pool, *sess_priv_pool;
355 ckey->
keys[0] = rte_cryptodev_sym_session_create (sess_pool);
362 ckey->
keys[1] = rte_cryptodev_sym_session_create (sess_pool);
376 memset (ckey, 0,
sizeof (*ckey));
394 for (
i = 0;
i < n_elts;
i++)
401 struct rte_crypto_vec *data_vec,
404 struct rte_crypto_vec *vec = data_vec + 1;
408 while ((b->
flags & VLIB_BUFFER_NEXT_PRESENT) && size)
414 if (iova_mode == RTE_IOVA_VA)
434 union rte_crypto_sym_ofs ofs;
440 *max_end =
clib_max (crypto_end, integ_end);
443 ofs.ofs.cipher.tail = *max_end - crypto_end;
445 ofs.ofs.auth.tail = *max_end - integ_end;
458 struct rte_crypto_vec *vec;
459 struct rte_crypto_data iv_vec, digest_vec;
464 union rte_crypto_sym_ofs cofs;
473 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
488 (rte_cryptodev_dp_configure_service
490 RTE_CRYPTO_OP_WITH_SESSION,
491 (
union rte_cryptodev_session_ctx) key->
keys[op_type], cet->
dp_service,
495 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
522 (rte_cryptodev_dp_configure_service
524 RTE_CRYPTO_OP_WITH_SESSION,
525 (
union rte_cryptodev_session_ctx) key->
keys[op_type],
529 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
534 vec->len = max_end - min_ofs;
537 vec->base = (
void *) (b[0]->
data + min_ofs);
539 iv_vec.base = (
void *) fe->
iv;
541 digest_vec.base = (
void *) fe->
tag;
546 vec->base = (
void *) (b[0]->
data + min_ofs);
548 iv_vec.base = (
void *) fe->
iv;
550 digest_vec.base = (
void *) fe->
tag;
556 vec->len = b[0]->current_data + b[0]->current_length - min_ofs;
559 max_end - min_ofs - vec->len) < 0)
562 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
567 status = rte_cryptodev_dp_submit_single_job (cet->
dp_service,
568 vec, n_seg, cofs, &iv_vec,
574 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
601 union rte_crypto_sym_ofs cofs;
602 struct rte_crypto_vec *vec;
603 struct rte_crypto_data iv_vec, digest_vec, aad_vec;
611 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
625 sess_aad_len = (
u8) key->
keys[op_type]->opaque_data;
631 (rte_cryptodev_dp_configure_service
633 RTE_CRYPTO_OP_WITH_SESSION,
634 (
union rte_cryptodev_session_ctx) key->
keys[op_type], cet->
dp_service,
638 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
657 sess_aad_len = (
u8) key->
keys[op_type]->opaque_data;
666 (rte_cryptodev_dp_configure_service
668 RTE_CRYPTO_OP_WITH_SESSION,
669 (
union rte_cryptodev_session_ctx) key->
keys[op_type],
673 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
683 iv_vec.base = (
void *) fe->
iv;
685 digest_vec.base = (
void *) fe->
tag;
687 aad_vec.base = (
void *) (cet->
aad_buf + aad_offset);
696 iv_vec.base = (
void *) fe->
iv;
698 aad_vec.base = (
void *) (cet->
aad_buf + aad_offset);
700 digest_vec.base = (
void *) fe->
tag;
722 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
728 rte_cryptodev_dp_submit_single_job (cet->
dp_service, vec, n_seg, cofs,
729 &iv_vec, &digest_vec, &aad_vec,
734 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
742 rte_cryptodev_dp_submit_done (cet->dp_service,
frame->n_elts);
743 cet->inflight +=
frame->n_elts;
760 f->
elts[
index].
status = is_op_success ? VNET_CRYPTO_OP_STATUS_COMPLETED :
761 VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
764 #define GET_RING_OBJ(r, pos, f) do { \ 765 vnet_crypto_async_frame_t **ring = (void *)&r[1]; \ 766 f = ring[(r->cons.head + pos) & r->mask]; \ 771 u32 * enqueue_thread_idx)
776 u32 n_deq, n_success;
778 u8 no_job_to_deq = 0;
786 for (i = 0; i < n_cached_frame; i++)
794 if (i < n_cached_frame - 2)
803 n_left = f->
state & 0x7f;
804 err = f->
state & 0x80;
806 for (j = f->
n_elts - n_left; j < f->n_elts && inflight; j++)
809 rte_cryptodev_dp_sym_dequeue_single_job (cet->
dp_service,
813 f->
elts[j].
status = ret == 1 ? VNET_CRYPTO_OP_STATUS_COMPLETED :
814 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
847 rte_ring_sc_dequeue (cet->
cached_frame, (
void **) &frame_ret);
853 if (!inflight || no_job_to_deq || !n_room_left)
856 n_deq = rte_cryptodev_dp_sym_dequeue (cet->
dp_service,
859 (
void **) &frame, 0, &n_success);
864 no_job_to_deq = n_deq < frame->n_elts;
866 if (frame_ret || n_cached_frame || no_job_to_deq)
868 frame->state = frame->n_elts - n_deq;
869 frame->state |= ((n_success < n_deq) << 7);
870 rte_ring_sp_enqueue (cet->
cached_frame, (
void *) frame);
875 frame->state = n_success == frame->n_elts ?
881 while (inflight && n_room_left && !no_job_to_deq)
883 n_deq = rte_cryptodev_dp_sym_dequeue (cet->
dp_service,
886 (
void **) &frame, 0, &n_success);
890 no_job_to_deq = n_deq < frame->n_elts;
891 frame->state = frame->n_elts - n_deq;
892 frame->state |= ((n_success < n_deq) << 7);
893 rte_ring_sp_enqueue (cet->
cached_frame, (
void *) frame);
898 if (inflight < cet->inflight)
900 rte_cryptodev_dp_dequeue_done (cet->
dp_service,
907 *nb_elts_processed = frame_ret->
n_elts;
976 u32 cryptodev_inst_index,
1000 cet->
dp_service = (
struct rte_crypto_dp_service_ctx *)
1022 cryptodev_inst_index, 1);
1026 cet->
dp_service = (
struct rte_crypto_dp_service_ctx *)
1040 u32 inst = va_arg (*args,
u32);
1042 u32 thread_index = 0;
1043 struct rte_cryptodev_info info;
1045 rte_cryptodev_info_get (cit->
dev_id, &info);
1046 s =
format (s,
"%-25s%-10u", info.device->name, cit->
q_id);
1056 s =
format (s,
"%u (%v)\n", thread_index,
1063 s =
format (s,
"%s\n",
"free");
1090 .path =
"show cryptodev assignment",
1091 .short_help =
"show cryptodev assignment",
1102 u32 thread_index, inst_index;
1103 u32 thread_present = 0, inst_present = 0;
1113 if (
unformat (line_input,
"thread %u", &thread_index))
1115 else if (
unformat (line_input,
"resource %u", &inst_index))
1125 if (!thread_present || !inst_present)
1159 .path =
"set cryptodev assignment",
1160 .short_help =
"set cryptodev assignment thread <thread_index> " 1161 "resource <inst_index>",
1168 const struct rte_cryptodev_symmetric_capability *cap;
1169 struct rte_cryptodev_sym_capability_idx cap_idx;
1171 #define _(a, b, c, d, e, f) \ 1172 cap_idx.type = RTE_CRYPTO_SYM_XFORM_##b; \ 1173 cap_idx.algo.aead = RTE_CRYPTO_##b##_##c; \ 1174 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \ 1176 return -RTE_CRYPTO_##b##_##c; \ 1179 if (cap->aead.digest_size.min > e || cap->aead.digest_size.max < e) \ 1180 return -RTE_CRYPTO_##b##_##c; \ 1181 if (cap->aead.aad_size.min > f || cap->aead.aad_size.max < f) \ 1182 return -RTE_CRYPTO_##b##_##c; \ 1183 if (cap->aead.iv_size.min > d || cap->aead.iv_size.max < d) \ 1184 return -RTE_CRYPTO_##b##_##c; \ 1190 #define _(a, b, c, d) \ 1191 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \ 1192 cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \ 1193 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \ 1195 return -RTE_CRYPTO_CIPHER_##b; \ 1196 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \ 1197 cap_idx.algo.auth = RTE_CRYPTO_AUTH_##c##_HMAC; \ 1198 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \ 1200 return -RTE_CRYPTO_AUTH_##c; 1210 struct rte_cryptodev_info info;
1211 u32 n_cryptodev = rte_cryptodev_count ();
1214 for (i = 0; i < n_cryptodev; i++)
1216 rte_cryptodev_info_get (i, &info);
1217 if (rte_cryptodev_socket_id (i) != numa)
1219 clib_warning (
"DPDK crypto resource %s is in different numa node " 1220 "as %u, ignored", info.device->name, numa);
1223 q_count += info.max_nb_queue_pairs;
1232 struct rte_cryptodev_info info;
1233 struct rte_cryptodev *cdev;
1241 cdev = rte_cryptodev_pmd_get_dev (cryptodev_id);
1242 rte_cryptodev_info_get (cryptodev_id, &info);
1244 if (!(info.feature_flags & RTE_CRYPTODEV_FF_DATA_PLANE_SERVICE))
1256 if (!cdev->data->dev_started)
1258 struct rte_cryptodev_config cfg;
1261 cfg.nb_queue_pairs = info.max_nb_queue_pairs;
1263 rte_cryptodev_configure (cryptodev_id, &cfg);
1265 for (i = 0; i < info.max_nb_queue_pairs; i++)
1267 struct rte_cryptodev_qp_conf qp_cfg;
1269 qp_cfg.mp_session = numa_data->
sess_pool;
1273 ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
1278 if (i != info.max_nb_queue_pairs)
1282 rte_cryptodev_start (i);
1285 ret = rte_cryptodev_get_dp_service_ctx_data_size (cryptodev_id);
1290 for (i = 0; i < info.max_nb_queue_pairs; i++)
1294 cdev_inst->
desc =
vec_new (
char, strlen (info.device->name) + 10);
1295 cdev_inst->
dev_id = cryptodev_id;
1296 cdev_inst->
q_id =
i;
1298 snprintf (cdev_inst->
desc, strlen (info.device->name) + 9,
1299 "%s_q%u", info.device->name, i);
1326 if (n_queues < n_workers)
1329 for (i = 0; i < rte_cryptodev_count (); i++)
1348 u32 sess_data_sz = 0,
i;
1350 if (rte_cryptodev_count () == 0)
1356 for (
i = 0;
i < rte_cryptodev_count ();
i++)
1358 u32 dev_sess_sz = rte_cryptodev_sym_get_private_session_size (
i);
1360 sess_data_sz = dev_sess_sz > sess_data_sz ? dev_sess_sz : sess_data_sz;
1363 return sess_data_sz;
1377 rte_mempool_free (numa_data->
sess_pool);
1397 struct rte_mempool *mp;
1420 name =
format (0,
"vcryptodev_sess_pool_%u%c", numa, 0);
1421 mp = rte_cryptodev_sym_session_pool_create ((
char *) name,
1434 name =
format (0,
"cryptodev_sess_pool_%u%c", numa, 0);
1436 0, NULL, NULL, NULL, NULL, numa, 0);
1476 name =
format (0,
"cache_frame_ring_%u%u", numa, i);
1479 RING_F_SC_DEQ | RING_F_SP_ENQ);
1491 "DPDK Cryptodev Engine");
1493 #define _(a, b, c, d, e, f) \ 1494 vnet_crypto_register_async_handler \ 1495 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \ 1496 cryptodev_enqueue_gcm_aad_##f##_enc,\ 1497 cryptodev_frame_dequeue); \ 1498 vnet_crypto_register_async_handler \ 1499 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \ 1500 cryptodev_enqueue_gcm_aad_##f##_dec, \ 1501 cryptodev_frame_dequeue); 1506 #define _(a, b, c, d) \ 1507 vnet_crypto_register_async_handler \ 1508 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_ENC, \ 1509 cryptodev_enqueue_linked_alg_enc, \ 1510 cryptodev_frame_dequeue); \ 1511 vnet_crypto_register_async_handler \ 1512 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_DEC, \ 1513 cryptodev_enqueue_linked_alg_dec, \ 1514 cryptodev_frame_dequeue); #define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static uword vlib_buffer_get_current_pa(vlib_main_t *vm, vlib_buffer_t *b)
#define vec_foreach_index(var, v)
Iterate over vector indices.
enum rte_iova_mode iova_mode
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
#define VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED
cryptodev_resource_assign_op_t
#define VNET_CRYPTO_KEY_TYPE_LINK
#define pool_get_zero(P, E)
Allocate an object E from a pool P and zero it.
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
static_always_inline int cryptodev_enqueue_gcm_aad_8_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
u16 current_length
Nbytes between current data and the end of this buffer.
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
static uword * clib_bitmap_set(uword *ai, uword i, uword value)
Sets the ith bit of a bitmap to new_value Removes trailing zeros from the bitmap. ...
struct rte_mempool * sess_pool
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
static_always_inline int cryptodev_assign_resource(cryptodev_engine_thread_t *cet, u32 cryptodev_inst_index, cryptodev_resource_assign_op_t op)
assign a cryptodev resource to a worker.
static_always_inline u64 compute_ofs_linked_alg(vnet_crypto_async_frame_elt_t *fe, i16 *min_ofs, u32 *max_end)
cryptodev_inst_t * cryptodev_inst
static uword clib_bitmap_set_no_check(uword *a, uword i, uword new_value)
Sets the ith bit of a bitmap to new_value.
static_always_inline int cryptodev_frame_linked_algs_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type)
static int check_cryptodev_alg_support(u32 dev_id)
static_always_inline int cryptodev_frame_gcm_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type, u8 aad_len)
void vnet_crypto_register_key_handler(vlib_main_t *vm, u32 engine_index, vnet_crypto_key_handler_t *key_handler)
#define static_always_inline
#define CRYPTODEV_DEQ_CACHE_SZ
#define vec_new(T, N)
Create new vector of given type and length (unspecified alignment, no header).
static_always_inline int cryptodev_enqueue_linked_alg_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
static u32 cryptodev_get_frame_n_elts(void *frame)
static_always_inline void cryptodev_mark_frame_err_status(vnet_crypto_async_frame_t *f, vnet_crypto_op_status_t s)
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
#define CRYPTODEV_MAX_AAD_SIZE
#define VNET_CRYPTO_FRAME_SIZE
struct rte_crypto_dp_service_ctx * dp_service
static void clib_spinlock_init(clib_spinlock_t *p)
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
vlib_worker_thread_t * vlib_worker_threads
static_always_inline int cryptodev_frame_build_sgl(vlib_main_t *vm, enum rte_iova_mode iova_mode, struct rte_crypto_vec *data_vec, u16 *n_seg, vlib_buffer_t *b, u32 size)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
#define CRYPTODEV_NB_SESSION
clib_bitmap_t * active_cdev_inst_mask
static int cryptodev_session_create(vnet_crypto_key_t *const key, struct rte_mempool *sess_priv_pool, cryptodev_key_t *session_pair, u32 aad_len)
static int prepare_linked_xform(struct rte_crypto_sym_xform *xforms, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key)
#define GET_RING_OBJ(r, pos, f)
static void cryptodev_session_del(struct rte_cryptodev_sym_session *sess)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
#define pool_put(P, E)
Free an object E in pool P.
#define foreach_vnet_aead_crypto_conversion
cryptodev_main_t cryptodev_main
#define CRYPTODEV_NB_CRYPTO_OPS
cryptodev_numa_data_t * per_numa_data
vnet_crypto_async_alg_t async_alg
static_always_inline void cryptodev_sess_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx, u32 aad_len)
u32 buffer_indices[VNET_CRYPTO_FRAME_SIZE]
cryptodev_engine_thread_t * per_thread_data
#define CLIB_PREFETCH(addr, size, type)
sll srl srl sll sra u16x4 i
#define vec_free(V)
Free vector's memory (no header).
static int cryptodev_cmp(void *v1, void *v2)
#define clib_warning(format, args...)
#define VNET_CRYPTO_FRAME_STATE_SUCCESS
static uword clib_bitmap_get(uword *ai, uword i)
Gets the ith bit value from a bitmap.
#define VNET_CRYPTO_FRAME_STATE_ELT_ERROR
struct rte_ring * cached_frame
#define clib_bitmap_vec_validate(v, i)
#define VLIB_CLI_COMMAND(x,...)
#define foreach_cryptodev_link_async_alg
crypto (alg, cryptodev_alg), hash (alg, digest-size)
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
static clib_error_t * cryptodev_set_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
static_always_inline vnet_crypto_async_frame_t * cryptodev_frame_dequeue(vlib_main_t *vm, u32 *nb_elts_processed, u32 *enqueue_thread_idx)
static u32 cryptodev_count_queue(u32 numa)
struct rte_mempool * sess_priv_pool
static_always_inline int cryptodev_enqueue_gcm_aad_12_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
static void dpdk_disable_cryptodev_engine(vlib_main_t *vm)
u8 flags
share same VNET_CRYPTO_OP_FLAG_* values
u32 vnet_crypto_key_index_t
vlib_buffer_t * b[VNET_CRYPTO_FRAME_SIZE]
static uword pointer_to_uword(const void *p)
#define CRYPTODEV_MAX_N_SGL
maximum number of segments
static u8 * format_cryptodev_inst(u8 *s, va_list *args)
static uword clib_bitmap_count_set_bits(uword *ai)
Return the number of set bits in a bitmap.
#define CRYPTODEV_MAX_INFLIGHT
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
static_always_inline vnet_crypto_key_t * vnet_crypto_get_key(vnet_crypto_key_index_t index)
static int cryptodev_probe(vlib_main_t *vm, u32 n_workers)
static int cryptodev_check_supported_vnet_alg(vnet_crypto_key_t *key)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
void cryptodev_key_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx)
u32 next_buffer
Next buffer for this linked-list of buffers.
static_always_inline int cryptodev_enqueue_linked_alg_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
static int prepare_aead_xform(struct rte_crypto_sym_xform *xform, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key, u32 aad_len)
VLIB buffer representation.
#define vec_sort_with_function(vec, f)
Sort a vector using the supplied element comparison function.
#define CRYPTODEV_AAD_MASK
static int cryptodev_configure(vlib_main_t *vm, u32 cryptodev_id)
static clib_error_t * cryptodev_show_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
static vlib_thread_main_t * vlib_get_thread_main()
static u32 vlib_num_workers()
static_always_inline int cryptodev_enqueue_gcm_aad_8_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
static void cryptodev_post_dequeue(void *frame, u32 index, u8 is_op_success)
#define vec_foreach(var, vec)
Vector iterator.
cryptodev_resource_assign_op_t
struct rte_cryptodev_sym_session * keys[CRYPTODEV_N_OP_TYPES]
static int cryptodev_get_session_sz(vlib_main_t *vm, u32 n_workers)
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
clib_error_t * dpdk_cryptodev_init(vlib_main_t *vm)
static uword clib_bitmap_first_clear(uword *ai)
Return the lowest numbered clear bit in a bitmap.
#define CLIB_CACHE_LINE_BYTES
u32 vnet_crypto_register_engine(vlib_main_t *vm, char *name, int prio, char *desc)
struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL]
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static_always_inline int cryptodev_enqueue_gcm_aad_12_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
vnet_crypto_op_status_t status
static openssl_per_thread_data_t * per_thread_data
vnet_crypto_async_frame_elt_t elts[VNET_CRYPTO_FRAME_SIZE]