|
FD.io VPP
v21.10.1-2-g0a485f517
Vector Packet Processing
|
Go to the documentation of this file.
26 #include <rte_bus_vdev.h>
27 #include <rte_cryptodev.h>
28 #include <rte_crypto_sym.h>
29 #include <rte_crypto.h>
30 #include <rte_cryptodev_pmd.h>
31 #include <rte_ring_peek_zc.h>
32 #include <rte_config.h>
37 #define always_inline static inline
39 #define always_inline static inline __attribute__ ((__always_inline__))
42 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad))
44 #define foreach_vnet_crypto_status_conversion \
45 _ (SUCCESS, COMPLETED) \
46 _ (NOT_PROCESSED, WORK_IN_PROGRESS) \
47 _ (AUTH_FAILED, FAIL_BAD_HMAC) \
48 _ (INVALID_SESSION, FAIL_ENGINE_ERR) \
49 _ (INVALID_ARGS, FAIL_ENGINE_ERR) \
50 _ (ERROR, FAIL_ENGINE_ERR)
53 #define _(a, b) VNET_CRYPTO_OP_STATUS_##b,
63 if (
mode == RTE_IOVA_VA)
74 struct rte_mbuf *first_mb = mb, *last_mb = mb;
87 first_mb->nb_segs = 1;
88 first_mb->pkt_len = first_mb->data_len =
data_len;
90 while (
b->
flags & VLIB_BUFFER_NEXT_PRESENT)
95 rte_pktmbuf_reset (mb);
110 void *_arg __attribute__ ((unused)),
void *_obj,
111 unsigned i __attribute__ ((unused)))
113 struct rte_crypto_op *op = _obj;
115 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
116 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
117 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
118 op->phys_addr = rte_mempool_virt2iova (_obj);
119 op->mempool = mempool;
131 struct rte_cryptodev_sym_session *sess = 0;
134 u32 n_enqueue, n_elts;
135 u32 last_key_index = ~0;
139 n_elts =
frame->n_elts;
144 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
149 rte_mempool_get_bulk (cet->
cop_pool, (
void **) cet->
cops, n_elts) < 0))
152 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
158 bi =
frame->buffer_indices;
165 struct rte_crypto_sym_op *sop = &cop[0]->
sop;
168 u32 offset_diff = crypto_offset - integ_offset;
188 frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
206 crypto_offset = offset_diff;
209 sop->cipher.data.offset = crypto_offset;
211 sop->auth.data.offset = integ_offset;
213 sop->auth.digest.data = fe->
digest;
214 sop->auth.digest.phys_addr =
224 sop->m_src->nb_segs = 1;
233 (
struct rte_crypto_op **) cet->
cops,
250 struct rte_cryptodev_sym_session *sess = 0;
253 u32 n_enqueue = 0, n_elts;
254 u32 last_key_index = ~0;
258 n_elts =
frame->n_elts;
263 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
268 rte_mempool_get_bulk (cet->
cop_pool, (
void **) cet->
cops, n_elts) < 0))
271 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
277 bi =
frame->buffer_indices;
284 struct rte_crypto_sym_op *sop = &cop[0]->
sop;
305 frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
319 frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
341 sop->aead.aad.data = cop[0]->
aad;
344 sop->aead.data.offset = crypto_offset;
345 sop->aead.digest.data = fe->
tag;
346 sop->aead.digest.phys_addr =
356 sop->m_src->nb_segs = 1;
366 (
struct rte_crypto_op **) cet->
cops,
379 n = rte_ring_dequeue_bulk_start (
r, (
void **) cops, 1, 0);
380 rte_ring_dequeue_finish (
r, 0);
385 if (rte_ring_count (
r) < n)
388 n_elts = rte_ring_sc_dequeue_bulk (
r, (
void **) cops, n, 0);
396 u32 *enqueue_thread_idx)
403 u32 n_elts, n_completed_ops = rte_ring_count (cet->
ring);
404 u32 ss0 = 0, ss1 = 0, ss2 = 0, ss3 = 0;
408 n_elts = rte_cryptodev_dequeue_burst (
415 n_completed_ops += n_elts;
417 rte_ring_sp_enqueue_burst (cet->
ring, (
void **) cet->
cops, n_elts,
452 frame->state = (ss0 | ss1 | ss2 | ss3) == VNET_CRYPTO_OP_STATUS_COMPLETED ?
457 *nb_elts_processed =
frame->n_elts;
458 *enqueue_thread_idx =
frame->enqueue_thread_index;
513 struct rte_cryptodev_sym_capability_idx cap_auth_idx;
514 struct rte_cryptodev_sym_capability_idx cap_cipher_idx;
515 struct rte_cryptodev_sym_capability_idx cap_aead_idx;
524 cet->
cop_pool = rte_mempool_create (
526 sizeof (
struct rte_crypto_op_pool_private), NULL, NULL,
crypto_op_init,
531 0,
"Failed to create cryptodev op pool %s",
name);
540 RING_F_SP_ENQ | RING_F_SC_DEQ);
544 0,
"Failed to create cryptodev op pool %s",
name);
554 #define _(a, b, c, d, e, f, g) \
555 cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD; \
556 cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
557 if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f)) \
559 vnet_crypto_register_async_handler ( \
560 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
561 cryptodev_enqueue_aead_aad_##f##_enc, cryptodev_frame_dequeue); \
562 vnet_crypto_register_async_handler ( \
563 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
564 cryptodev_enqueue_aead_aad_##f##_dec, cryptodev_frame_dequeue); \
569 #define _(a, b, c, d, e) \
570 cap_auth_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
571 cap_auth_idx.algo.auth = RTE_CRYPTO_AUTH_##d##_HMAC; \
572 cap_cipher_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
573 cap_cipher_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
574 if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) && \
575 cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1)) \
577 vnet_crypto_register_async_handler ( \
578 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC, \
579 cryptodev_enqueue_linked_alg_enc, cryptodev_frame_dequeue); \
580 vnet_crypto_register_async_handler ( \
581 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC, \
582 cryptodev_enqueue_linked_alg_dec, cryptodev_frame_dequeue); \
594 rte_ring_free (cet->
ring);
u32 next_buffer
Next buffer for this linked-list of buffers.
u8 buffer_pool_index
index of buffer pool this buffer belongs.
static_always_inline vnet_crypto_async_frame_t * cryptodev_frame_dequeue(vlib_main_t *vm, u32 *nb_elts_processed, u32 *enqueue_thread_idx)
enum rte_iova_mode iova_mode
struct rte_mempool * cop_pool
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
static_always_inline int cryptodev_frame_linked_algs_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static uword pointer_to_uword(const void *p)
static_always_inline u16 cryptodev_ring_deq(struct rte_ring *r, cryptodev_op_t **cops)
@ CRYPTODEV_OP_TYPE_DECRYPT
#define clib_error_return(e, args...)
#define VNET_CRYPTO_FRAME_SIZE
vl_api_tunnel_mode_t mode
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
@ CRYPTODEV_OP_TYPE_ENCRYPT
clib_pmalloc_main_t * pmalloc_main
static u32 clib_pmalloc_get_page_index(clib_pmalloc_main_t *pm, void *va)
vnet_hw_if_output_node_runtime_t * r
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
static_always_inline int cryptodev_frame_aead_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type, u8 aad_len)
@ VNET_CRYPTO_FRAME_STATE_ELT_ERROR
static_always_inline int cryptodev_enqueue_aead_aad_12_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
#define foreach_vnet_crypto_status_conversion
clib_error_t * cryptodev_register_cop_hdl(vlib_main_t *vm, u32 eidx)
#define CLIB_PREFETCH(addr, size, type)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
static_always_inline int cryptodev_enqueue_linked_alg_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
cryptodev_main_t cryptodev_main
vnet_crypto_op_status_t status
vlib_physmem_main_t physmem_main
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static void error_exit(int code)
static_always_inline rte_iova_t cryptodev_get_iova(clib_pmalloc_main_t *pm, enum rte_iova_mode mode, void *data)
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
#define static_always_inline
cryptodev_engine_thread_t * per_thread_data
void cryptodev_sess_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx, u32 aad_len)
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment)
volatile u8 ref_count
Reference count for this buffer.
u8 flags
share same VNET_CRYPTO_OP_FLAG_* values
static_always_inline void cryptodev_mark_frame_err_status(vnet_crypto_async_frame_t *f, vnet_crypto_op_status_t s)
#define CRYPTODEV_AAD_OFFSET
u16 current_length
Nbytes between current data and the end of this buffer.
#define CRYPTODEV_NB_CRYPTO_OPS
static_always_inline int cryptodev_enqueue_aead_aad_12_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
struct rte_mempool ** dpdk_no_cache_mempool_by_buffer_pool_index
#define rte_mbuf_from_vlib_buffer(x)
static const vnet_crypto_op_status_t cryptodev_status_conversion[]
#define vec_free(V)
Free vector's memory (no header).
description fragment has unexpected format
static_always_inline int cryptodev_enqueue_aead_aad_8_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
#define VLIB_BUFFER_PRE_DATA_SIZE
static_always_inline void clib_prefetch_load(void *p)
@ VNET_CRYPTO_FRAME_STATE_SUCCESS
#define vec_foreach(var, vec)
Vector iterator.
static vlib_main_t * vlib_get_main_by_index(u32 thread_index)
#define foreach_vnet_aead_crypto_conversion
u8 aad[CRYPTODEV_MAX_AAD_SIZE]
static_always_inline int cryptodev_enqueue_aead_aad_8_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
static_always_inline int cryptodev_enqueue_linked_alg_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
vnet_crypto_async_frame_t * frame
#define CRYPTODEV_MAX_INFLIGHT
struct rte_crypto_sym_op sop
static_always_inline void crypto_op_init(struct rte_mempool *mempool, void *_arg, void *_obj, unsigned i)
#define foreach_cryptodev_link_async_alg
crypto (alg, cryptodev_alg, key_size), hash (alg, digest-size)
int cryptodev_session_create(vlib_main_t *vm, vnet_crypto_key_index_t idx, u32 aad_len)
static_always_inline void cryptodev_validate_mbuf_chain(vlib_main_t *vm, struct rte_mbuf *mb, vlib_buffer_t *b)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
VLIB buffer representation.