22 #include <vpp/app/version.h> 28 #include <rte_bus_vdev.h> 29 #include <rte_cryptodev.h> 30 #include <rte_crypto_sym.h> 31 #include <rte_crypto.h> 32 #include <rte_cryptodev_pmd.h> 33 #include <rte_config.h> 36 #define always_inline static inline 38 #define always_inline static inline __attribute__ ((__always_inline__)) 41 #define CRYPTODEV_NB_CRYPTO_OPS 1024 42 #define CRYPTODEV_NB_SESSION 10240 43 #define CRYPTODEV_DEF_DRIVE crypto_aesni_mb 45 #define CRYPTODEV_IV_OFFSET (offsetof (cryptodev_op_t, iv)) 46 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad)) 49 #define foreach_vnet_aead_crypto_conversion \ 50 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 8) \ 51 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 12) \ 52 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 8) \ 53 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 12) \ 54 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 8) \ 55 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 12) 60 #define foreach_cryptodev_link_async_alg \ 61 _ (AES_128_CBC, AES_CBC, SHA1, 12) \ 62 _ (AES_192_CBC, AES_CBC, SHA1, 12) \ 63 _ (AES_256_CBC, AES_CBC, SHA1, 12) \ 64 _ (AES_128_CBC, AES_CBC, SHA224, 14) \ 65 _ (AES_192_CBC, AES_CBC, SHA224, 14) \ 66 _ (AES_256_CBC, AES_CBC, SHA224, 14) \ 67 _ (AES_128_CBC, AES_CBC, SHA256, 16) \ 68 _ (AES_192_CBC, AES_CBC, SHA256, 16) \ 69 _ (AES_256_CBC, AES_CBC, SHA256, 16) \ 70 _ (AES_128_CBC, AES_CBC, SHA384, 24) \ 71 _ (AES_192_CBC, AES_CBC, SHA384, 24) \ 72 _ (AES_256_CBC, AES_CBC, SHA384, 24) \ 73 _ (AES_128_CBC, AES_CBC, SHA512, 32) \ 74 _ (AES_192_CBC, AES_CBC, SHA512, 32) \ 75 _ (AES_256_CBC, AES_CBC, SHA512, 32) 77 #define foreach_vnet_crypto_status_conversion \ 78 _(SUCCESS, COMPLETED) \ 79 _(NOT_PROCESSED, WORK_IN_PROGRESS) \ 80 _(AUTH_FAILED, FAIL_BAD_HMAC) \ 81 _(INVALID_SESSION, FAIL_ENGINE_ERR) \ 82 _(INVALID_ARGS, FAIL_ENGINE_ERR) \ 83 _(ERROR, FAIL_ENGINE_ERR) 86 #define _(a, b) VNET_CRYPTO_OP_STATUS_##b, 94 struct rte_crypto_op op;
95 struct rte_crypto_sym_op sop;
143 enum rte_iova_mode iova_mode;
153 cryptodev_op_type_t op_type,
156 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
157 memset (xform, 0,
sizeof (*xform));
158 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
161 if (key->
alg != VNET_CRYPTO_ALG_AES_128_GCM &&
162 key->
alg != VNET_CRYPTO_ALG_AES_192_GCM &&
163 key->
alg != VNET_CRYPTO_ALG_AES_256_GCM)
166 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
168 RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
169 aead_xform->aad_length = aad_len;
170 aead_xform->digest_length = 16;
172 aead_xform->iv.length = 12;
173 aead_xform->key.data = key->
data;
181 cryptodev_op_type_t op_type,
184 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
186 enum rte_crypto_cipher_algorithm cipher_algo = ~0;
187 enum rte_crypto_auth_algorithm auth_algo = ~0;
192 if (!key_cipher || !key_auth)
197 xform_cipher = xforms;
198 xform_auth = xforms + 1;
199 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
200 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
204 xform_cipher = xforms + 1;
206 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
207 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
210 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
211 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
212 xforms->next = xforms + 1;
216 #define _(a, b, c, d) \ 217 case VNET_CRYPTO_ALG_##a##_##c##_TAG##d:\ 218 cipher_algo = RTE_CRYPTO_CIPHER_##b; \ 219 auth_algo = RTE_CRYPTO_AUTH_##c##_HMAC; \ 229 xform_cipher->cipher.algo = cipher_algo;
230 xform_cipher->cipher.key.data = key_cipher->
data;
231 xform_cipher->cipher.key.length =
vec_len (key_cipher->
data);
232 xform_cipher->cipher.iv.length = 16;
235 xform_auth->auth.algo = auth_algo;
236 xform_auth->auth.digest_length = digest_len;
237 xform_auth->auth.key.data = key_auth->
data;
238 xform_auth->auth.key.length =
vec_len (key_auth->
data);
245 struct rte_mempool *sess_priv_pool,
248 struct rte_crypto_sym_xform xforms_enc[2] = { {0} };
249 struct rte_crypto_sym_xform xforms_dec[2] = { {0} };
252 struct rte_cryptodev *cdev;
271 dev_id = dev_inst->
dev_id;
272 cdev = rte_cryptodev_pmd_get_dev (dev_id);
276 if (session_pair->
keys[0]->sess_data[cdev->driver_id].data &&
277 session_pair->
keys[1]->sess_data[cdev->driver_id].data)
280 ret = rte_cryptodev_sym_session_init (dev_id, session_pair->
keys[0],
281 xforms_enc, sess_priv_pool);
282 ret = rte_cryptodev_sym_session_init (dev_id, session_pair->
keys[1],
283 xforms_dec, sess_priv_pool);
287 session_pair->
keys[0]->opaque_data = aad_len;
288 session_pair->
keys[1]->opaque_data = aad_len;
301 n_devs = rte_cryptodev_count ();
303 for (i = 0; i < n_devs; i++)
304 rte_cryptodev_sym_session_clear (i, sess);
306 rte_cryptodev_sym_session_free (sess);
318 #define _(a, b, c, d, e, f) \ 319 if (alg == VNET_CRYPTO_ALG_##a) \ 334 struct rte_mempool *sess_pool, *sess_priv_pool;
374 ckey->
keys[0] = rte_cryptodev_sym_session_create (sess_pool);
381 ckey->
keys[1] = rte_cryptodev_sym_session_create (sess_pool);
395 memset (ckey, 0,
sizeof (*ckey));
413 for (
i = 0;
i < n_elts;
i++)
422 if (mode == RTE_IOVA_VA)
433 struct rte_mbuf *first_mb = mb, *last_mb = mb;
444 rte_pktmbuf_mtod (mb,
u8 *));
446 first_mb->nb_segs = 1;
447 first_mb->pkt_len = first_mb->data_len =
data_len;
449 while (b->
flags & VLIB_BUFFER_NEXT_PRESENT)
454 rte_pktmbuf_reset (mb);
470 cryptodev_op_type_t op_type)
479 u32 n_enqueue, n_elts;
490 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
495 (
void **) cet->
cops, n_elts) < 0))
498 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
514 struct rte_crypto_sym_op *sop = &cop[0]->
sop;
517 u32 offset_diff = crypto_offset - integ_offset;
543 crypto_offset = offset_diff;
545 sop->session = key->
keys[op_type];
546 sop->cipher.data.offset = crypto_offset;
548 sop->auth.data.offset = integ_offset;
550 sop->auth.digest.data = fe->
digest;
561 sop->m_src->nb_segs = 1;
569 n_enqueue = rte_cryptodev_enqueue_burst (cet->
cryptodev_id,
571 (
struct rte_crypto_op **)
582 cryptodev_op_type_t op_type,
u8 aad_len)
591 u32 n_enqueue = 0, n_elts;
603 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
608 (
void **) cet->
cops, n_elts) < 0))
611 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
623 sess_aad_len = (
u8) key->
keys[op_type]->opaque_data;
631 struct rte_crypto_sym_op *sop = &cop[0]->
sop;
644 sess_aad_len = (
u8) key->
keys[op_type]->opaque_data;
665 sop->session = key->
keys[op_type];
666 sop->aead.aad.data = cop[0]->
aad;
669 sop->aead.data.offset = crypto_offset;
670 sop->aead.digest.data = fe->
tag;
681 sop->m_src->nb_segs = 1;
690 n_enqueue = rte_cryptodev_enqueue_burst (cet->
cryptodev_id,
692 (
struct rte_crypto_op **)
704 return r[ring->cons.head & ring->mask];
709 u32 * enqueue_thread_idx)
717 u32 n_elts, n_completed_ops = rte_ring_count (cet->
ring);
718 u32 ss0 = 0, ss1 = 0, ss2 = 0, ss3 = 0;
724 n_elts = rte_cryptodev_dequeue_burst
726 (
struct rte_crypto_op **) cet->
cops, n_elts);
728 n_completed_ops += n_elts;
730 rte_ring_sp_enqueue_burst (cet->
ring, (
void *) cet->
cops, n_elts, NULL);
743 n_elts = rte_ring_sc_dequeue_bulk (cet->
ring, (
void **) cet->
cops,
749 ss0 |= fe[0].
status = cryptodev_status_conversion[cop[0]->
op.status];
750 ss1 |= fe[1].
status = cryptodev_status_conversion[cop[1]->op.status];
751 ss2 |= fe[2].
status = cryptodev_status_conversion[cop[2]->op.status];
752 ss3 |= fe[3].
status = cryptodev_status_conversion[cop[3]->op.status];
761 ss0 |= fe[0].
status = cryptodev_status_conversion[cop[0]->
op.status];
767 frame->
state = (ss0 | ss1 | ss2 | ss3) == VNET_CRYPTO_OP_STATUS_COMPLETED ?
771 *nb_elts_processed = frame->
n_elts;
838 u32 cryptodev_inst_index,
839 cryptodev_resource_assign_op_t op)
882 cryptodev_inst_index, 1);
898 u32 inst = va_arg (*args,
u32);
900 u32 thread_index = 0;
901 struct rte_cryptodev_info info;
903 rte_cryptodev_info_get (cit->
dev_id, &info);
904 s =
format (s,
"%-25s%-10u", info.device->name, cit->
q_id);
914 s =
format (s,
"%u (%v)\n", thread_index,
921 s =
format (s,
"%s\n",
"free");
948 .path =
"show cryptodev assignment",
949 .short_help =
"show cryptodev assignment",
960 u32 thread_index, inst_index;
961 u32 thread_present = 0, inst_present = 0;
971 if (
unformat (line_input,
"thread %u", &thread_index))
973 else if (
unformat (line_input,
"resource %u", &inst_index))
983 if (!thread_present || !inst_present)
1017 .path =
"set cryptodev assignment",
1018 .short_help =
"set cryptodev assignment thread <thread_index> " 1019 "resource <inst_index>",
1026 const struct rte_cryptodev_symmetric_capability *cap;
1027 struct rte_cryptodev_sym_capability_idx cap_idx;
1029 #define _(a, b, c, d, e, f) \ 1030 cap_idx.type = RTE_CRYPTO_SYM_XFORM_##b; \ 1031 cap_idx.algo.aead = RTE_CRYPTO_##b##_##c; \ 1032 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \ 1034 return -RTE_CRYPTO_##b##_##c; \ 1037 if (cap->aead.digest_size.min > e || cap->aead.digest_size.max < e) \ 1038 return -RTE_CRYPTO_##b##_##c; \ 1039 if (cap->aead.aad_size.min > f || cap->aead.aad_size.max < f) \ 1040 return -RTE_CRYPTO_##b##_##c; \ 1041 if (cap->aead.iv_size.min > d || cap->aead.iv_size.max < d) \ 1042 return -RTE_CRYPTO_##b##_##c; \ 1048 #define _(a, b, c, d) \ 1049 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \ 1050 cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \ 1051 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \ 1053 return -RTE_CRYPTO_CIPHER_##b; \ 1054 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \ 1055 cap_idx.algo.auth = RTE_CRYPTO_AUTH_##c##_HMAC; \ 1056 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \ 1058 return -RTE_CRYPTO_AUTH_##c; 1068 struct rte_cryptodev_info info;
1069 u32 n_cryptodev = rte_cryptodev_count ();
1072 for (i = 0; i < n_cryptodev; i++)
1074 rte_cryptodev_info_get (i, &info);
1075 if (rte_cryptodev_socket_id (i) != numa)
1077 clib_warning (
"DPDK crypto resource %s is in different numa node " 1078 "as %u, ignored", info.device->name, numa);
1082 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
1084 q_count += info.max_nb_queue_pairs;
1093 struct rte_cryptodev_info info;
1094 struct rte_cryptodev *cdev;
1101 rte_cryptodev_info_get (cryptodev_id, &info);
1104 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
1111 cdev = rte_cryptodev_pmd_get_dev (cryptodev_id);
1115 if (!cdev->data->dev_started)
1117 struct rte_cryptodev_config cfg;
1120 cfg.nb_queue_pairs = info.max_nb_queue_pairs;
1122 rte_cryptodev_configure (cryptodev_id, &cfg);
1124 for (i = 0; i < info.max_nb_queue_pairs; i++)
1126 struct rte_cryptodev_qp_conf qp_cfg;
1130 qp_cfg.mp_session = numa_data->
sess_pool;
1134 ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
1139 if (i != info.max_nb_queue_pairs)
1142 rte_cryptodev_start (i);
1145 for (i = 0; i < cdev->data->nb_queue_pairs; i++)
1149 cdev_inst->
desc =
vec_new (
char, strlen (info.device->name) + 10);
1150 cdev_inst->
dev_id = cryptodev_id;
1151 cdev_inst->
q_id =
i;
1153 snprintf (cdev_inst->
desc, strlen (info.device->name) + 9,
1154 "%s_q%u", info.device->name, i);
1182 if (n_queues < n_workers)
1185 for (i = 0; i < rte_cryptodev_count (); i++)
1200 u32 sess_data_sz = 0,
i;
1202 if (rte_cryptodev_count () == 0)
1205 for (
i = 0;
i < rte_cryptodev_count ();
i++)
1207 u32 dev_sess_sz = rte_cryptodev_sym_get_private_session_size (
i);
1209 sess_data_sz = dev_sess_sz > sess_data_sz ? dev_sess_sz : sess_data_sz;
1212 return sess_data_sz;
1225 rte_mempool_free (numa_data->
sess_pool);
1229 rte_mempool_free (numa_data->
cop_pool);
1234 void *_arg __attribute__ ((unused)),
1235 void *_obj,
unsigned i __attribute__ ((unused)))
1237 struct rte_crypto_op *op = _obj;
1239 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1240 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1241 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1242 op->phys_addr = rte_mempool_virt2iova (_obj);
1243 op->mempool = mempool;
1254 struct rte_mempool *mp;
1264 struct rte_crypto_op_pool_private *priv;
1282 name =
format (0,
"vcryptodev_sess_pool_%u%c", numa, 0);
1283 mp = rte_cryptodev_sym_session_pool_create ((
char *) name,
1296 name =
format (0,
"cryptodev_sess_pool_%u%c", numa, 0);
1298 0, NULL, NULL, NULL, NULL, numa, 0);
1311 name =
format (0,
"cryptodev_op_pool_%u%c", numa, 0);
1313 mp = rte_mempool_create ((
char *) name, n_cop_elts,
1315 sizeof (
struct rte_crypto_op_pool_private), NULL,
1324 priv = rte_mempool_get_priv (mp);
1325 priv->priv_size =
sizeof (
struct rte_crypto_op_pool_private);
1326 priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1346 name =
format (0,
"frames_ring_%u%c", i, 0);
1347 ptd->
ring = rte_ring_create((
char *) name, CRYPTODEV_NB_CRYPTO_OPS,
1348 vm->
numa_node, RING_F_SP_ENQ|RING_F_SC_DEQ);
1361 "DPDK Cryptodev Engine");
1363 #define _(a, b, c, d, e, f) \ 1364 vnet_crypto_register_async_handler \ 1365 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \ 1366 cryptodev_enqueue_gcm_aad_##f##_enc,\ 1367 cryptodev_frame_dequeue); \ 1368 vnet_crypto_register_async_handler \ 1369 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \ 1370 cryptodev_enqueue_gcm_aad_##f##_dec, \ 1371 cryptodev_frame_dequeue); 1376 #define _(a, b, c, d) \ 1377 vnet_crypto_register_async_handler \ 1378 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_ENC, \ 1379 cryptodev_enqueue_linked_alg_enc, \ 1380 cryptodev_frame_dequeue); \ 1381 vnet_crypto_register_async_handler \ 1382 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_DEC, \ 1383 cryptodev_enqueue_linked_alg_dec, \ 1384 cryptodev_frame_dequeue); #define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
static_always_inline vnet_crypto_async_frame_t * cryptodev_frame_dequeue(vlib_main_t *vm, u32 *nb_elts_processed, u32 *enqueue_thread_idx)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
struct rte_crypto_sym_op sop
static_always_inline int cryptodev_frame_linked_algs_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type)
#define vec_foreach_index(var, v)
Iterate over vector indices.
#define CRYPTODEV_NB_CRYPTO_OPS
enum rte_iova_mode iova_mode
static_always_inline int cryptodev_enqueue_linked_alg_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
static_always_inline int cryptodev_enqueue_gcm_aad_8_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
cryptodev_resource_assign_op_t
static int prepare_aead_xform(struct rte_crypto_sym_xform *xform, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key, u32 aad_len)
static clib_error_t * cryptodev_set_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
#define VNET_CRYPTO_KEY_TYPE_LINK
#define pool_get_zero(P, E)
Allocate an object E from a pool P and zero it.
vlib_physmem_main_t physmem_main
#define foreach_vnet_aead_crypto_conversion
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
#define clib_memcpy_fast(a, b, c)
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
void cryptodev_key_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx)
static u32 cryptodev_count_queue(u32 numa)
#define VLIB_BUFFER_PRE_DATA_SIZE
u16 current_length
Nbytes between current data and the end of this buffer.
static u32 clib_pmalloc_get_page_index(clib_pmalloc_main_t *pm, void *va)
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
static uword * clib_bitmap_set(uword *ai, uword i, uword value)
Sets the ith bit of a bitmap to new_value Removes trailing zeros from the bitmap. ...
#define CRYPTODEV_AAD_OFFSET
struct rte_mempool * sess_pool
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
cryptodev_inst_t * cryptodev_inst
u8 buffer_pool_index
index of buffer pool this buffer belongs.
static_always_inline void cryptodev_sess_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx, u32 aad_len)
clib_pmalloc_main_t * pmalloc_main
static uword clib_bitmap_set_no_check(uword *a, uword i, uword new_value)
Sets the ith bit of a bitmap to new_value.
void vnet_crypto_register_key_handler(vlib_main_t *vm, u32 engine_index, vnet_crypto_key_handler_t *key_handler)
static int prepare_linked_xform(struct rte_crypto_sym_xform *xforms, cryptodev_op_type_t op_type, const vnet_crypto_key_t *key)
#define static_always_inline
static const vnet_crypto_op_status_t cryptodev_status_conversion[]
static int cryptodev_configure(vlib_main_t *vm, uint32_t cryptodev_id)
#define vec_new(T, N)
Create new vector of given type and length (unspecified alignment, no header).
static_always_inline int cryptodev_frame_gcm_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type, u8 aad_len)
description fragment has unexpected format
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
#define foreach_cryptodev_link_async_alg
crypto (alg, cryptodev_alg), hash (alg, digest-size)
static_always_inline void cryptodev_mark_frame_err_status(vnet_crypto_async_frame_t *f, vnet_crypto_op_status_t s)
static void crypto_op_init(struct rte_mempool *mempool, void *_arg, void *_obj, unsigned i)
#define VNET_CRYPTO_FRAME_SIZE
static void clib_spinlock_init(clib_spinlock_t *p)
vlib_worker_thread_t * vlib_worker_threads
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
clib_bitmap_t * active_cdev_inst_mask
static int cryptodev_session_create(vnet_crypto_key_t *const key, struct rte_mempool *sess_priv_pool, cryptodev_key_t *session_pair, u32 aad_len)
static int cryptodev_get_session_sz(vlib_main_t *vm, uint32_t n_workers)
#define rte_mbuf_from_vlib_buffer(x)
#define pool_put(P, E)
Free an object E in pool P.
static_always_inline int cryptodev_assign_resource(cryptodev_engine_thread_t *cet, u32 cryptodev_inst_index, cryptodev_resource_assign_op_t op)
assign a cryptodev resource to a worker.
cryptodev_numa_data_t * per_numa_data
vl_api_tunnel_mode_t mode
clib_error_t * dpdk_cryptodev_init(vlib_main_t *vm)
vnet_crypto_async_alg_t async_alg
u32 buffer_indices[VNET_CRYPTO_FRAME_SIZE]
cryptodev_engine_thread_t * per_thread_data
static_always_inline int cryptodev_enqueue_gcm_aad_8_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
#define CLIB_PREFETCH(addr, size, type)
sll srl srl sll sra u16x4 i
cryptodev_main_t cryptodev_main
#define vec_free(V)
Free vector's memory (no header).
static_always_inline cryptodev_op_t * cryptodev_get_ring_head(struct rte_ring *ring)
#define clib_warning(format, args...)
static_always_inline rte_iova_t cryptodev_get_iova(clib_pmalloc_main_t *pm, enum rte_iova_mode mode, void *data)
#define VNET_CRYPTO_FRAME_STATE_SUCCESS
static uword max_pow2(uword x)
static_always_inline int cryptodev_enqueue_gcm_aad_12_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
static uword clib_bitmap_get(uword *ai, uword i)
Gets the ith bit value from a bitmap.
#define VNET_CRYPTO_FRAME_STATE_ELT_ERROR
#define clib_bitmap_vec_validate(v, i)
static int cryptodev_probe(vlib_main_t *vm, u32 n_workers)
#define VLIB_CLI_COMMAND(x,...)
static void cryptodev_session_del(struct rte_cryptodev_sym_session *sess)
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
struct rte_mempool * cop_pool
static u8 * format_cryptodev_inst(u8 *s, va_list *args)
static int cryptodev_cmp(void *v1, void *v2)
struct rte_mempool * sess_priv_pool
static clib_error_t * cryptodev_show_assignment_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
u8 flags
share same VNET_CRYPTO_OP_FLAG_* values
u32 vnet_crypto_key_index_t
static_always_inline int cryptodev_enqueue_gcm_aad_12_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
static uword pointer_to_uword(const void *p)
static int check_cryptodev_alg_support(u32 dev_id)
static uword clib_bitmap_count_set_bits(uword *ai)
Return the number of set bits in a bitmap.
#define CRYPTODEV_IV_OFFSET
static_always_inline int cryptodev_enqueue_linked_alg_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
#define foreach_vnet_crypto_status_conversion
static_always_inline vnet_crypto_key_t * vnet_crypto_get_key(vnet_crypto_key_index_t index)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
struct rte_mempool ** dpdk_no_cache_mempool_by_buffer_pool_index
VLIB buffer representation.
#define vec_sort_with_function(vec, f)
Sort a vector using the supplied element comparison function.
static int cryptodev_check_supported_vnet_alg(vnet_crypto_key_t *key)
static void dpdk_disable_cryptodev_engine(vlib_main_t *vm)
static_always_inline void cryptodev_validate_mbuf_chain(vlib_main_t *vm, struct rte_mbuf *mb, vlib_buffer_t *b)
static vlib_thread_main_t * vlib_get_thread_main()
static u32 vlib_num_workers()
#define CRYPTODEV_NB_SESSION
#define vec_foreach(var, vec)
Vector iterator.
struct rte_cryptodev_sym_session * keys[CRYPTODEV_N_OP_TYPES]
vnet_crypto_async_frame_t * frame
static uword clib_bitmap_first_clear(uword *ai)
Return the lowest numbered clear bit in a bitmap.
#define CLIB_CACHE_LINE_BYTES
u32 vnet_crypto_register_engine(vlib_main_t *vm, char *name, int prio, char *desc)
volatile u8 ref_count
Reference count for this buffer.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
vnet_crypto_op_status_t status
vnet_crypto_async_frame_elt_t elts[VNET_CRYPTO_FRAME_SIZE]