15 #ifndef __DPDK_IPSEC_H__ 16 #define __DPDK_IPSEC_H__ 23 #include <rte_config.h> 24 #include <rte_crypto.h> 25 #include <rte_cryptodev.h> 28 #define always_inline static inline 30 #define always_inline static inline __attribute__ ((__always_inline__)) 33 #define DPDK_CRYPTO_N_QUEUE_DESC 2048 34 #define DPDK_CRYPTO_NB_SESS_OBJS 20000 36 #define foreach_dpdk_crypto_input_next \ 37 _(DROP, "error-drop") \ 38 _(IP4_LOOKUP, "ip4-lookup") \ 39 _(IP6_LOOKUP, "ip6-lookup") \ 40 _(INTERFACE_OUTPUT, "interface-output") \ 41 _(MIDCHAIN, "adj-midchain-tx") \ 42 _(DECRYPT4_POST, "dpdk-esp4-decrypt-post") \ 43 _(DECRYPT6_POST, "dpdk-esp6-decrypt-post") 47 #define _(f,s) DPDK_CRYPTO_INPUT_NEXT_##f, 53 #define MAX_QP_PER_LCORE 16 76 struct rte_crypto_op **
ops;
86 enum rte_crypto_sym_xform_type
type;
177 { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0 };
190 sizeof (
struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op);
192 return ((op_size + align - 1) & ~(align - 1)) +
sizeof (
dpdk_op_priv_t);
198 const u32 align = 16;
201 offset =
sizeof (
struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op);
202 offset = (offset + align - 1) & ~(align - 1);
234 return (sess_by_sa->
dev_mask & (1L << drv_id)) ? sess_by_sa->
session : NULL;
245 struct rte_cryptodev_sym_session *sess;
267 is_aead = ((sa->
crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) ||
268 (sa->
crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) ||
269 (sa->
crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256));
274 if (cipher_res == auth_res)
290 ret = rte_mempool_get_bulk (data->
crypto_op, (
void **) ops, n);
308 rte_mempool_put_bulk (data->
crypto_op, (
void **) ops, n);
329 n_ops = res->
n_ops < n_ops ? res->
n_ops : n_ops;
330 enq = rte_cryptodev_enqueue_burst (res->
dev_id, res->
qp_id,
358 struct rte_crypto_op *op,
void *session,
359 u32 cipher_off,
u32 cipher_len,
360 u32 auth_off,
u32 auth_len,
361 u8 * aad,
u8 * digest,
u64 digest_paddr)
363 struct rte_crypto_sym_op *sym_op;
365 sym_op = (
struct rte_crypto_sym_op *) (op + 1);
368 sym_op->session = session;
372 sym_op->aead.data.offset = cipher_off;
373 sym_op->aead.data.length = cipher_len;
375 sym_op->aead.aad.data = aad;
376 sym_op->aead.aad.phys_addr =
377 op->phys_addr + (uintptr_t) aad - (uintptr_t) op;
379 sym_op->aead.digest.data = digest;
380 sym_op->aead.digest.phys_addr = digest_paddr;
384 sym_op->cipher.data.offset = cipher_off;
385 sym_op->cipher.data.length = cipher_len;
387 sym_op->auth.data.offset = auth_off;
388 sym_op->auth.data.length = auth_len;
390 sym_op->auth.digest.data = digest;
391 sym_op->auth.digest.phys_addr = digest_paddr;
static_always_inline void crypto_op_setup(u8 is_aead, struct rte_mbuf *mb0, struct rte_crypto_op *op, void *session, u32 cipher_off, u32 cipher_len, u32 auth_off, u32 auth_len, u8 *aad, u8 *digest, u64 digest_paddr)
u8 pad[3]
log2 (size of the packing page block)
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
clib_error_t * create_sym_session(struct rte_cryptodev_sym_session **session, u32 sa_idx, crypto_resource_t *res, crypto_worker_main_t *cwm, u8 is_outbound)
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
#define foreach_dpdk_crypto_input_next
static_always_inline i32 crypto_alloc_ops(u8 numa, struct rte_crypto_op **ops, u32 n)
ipsec_integ_alg_t integ_alg
static_always_inline void crypto_set_icb(dpdk_gcm_cnt_blk *icb, u32 salt, u32 seq, u32 seq_hi)
static_always_inline u32 crypto_op_get_priv_offset(void)
struct rte_cryptodev_sym_session * session
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
dpdk_crypto_main_t dpdk_crypto_main
#define DPDK_CRYPTO_N_QUEUE_DESC
static_always_inline clib_error_t * crypto_get_session(struct rte_cryptodev_sym_session **session, u32 sa_idx, crypto_resource_t *res, crypto_worker_main_t *cwm, u8 is_outbound)
static_always_inline void crypto_free_ops(u8 numa, struct rte_crypto_op **ops, u32 n)
#define static_always_inline
u16 cipher_resource_idx[IPSEC_CRYPTO_N_ALG]
static_always_inline void crypto_enqueue_ops(vlib_main_t *vm, crypto_worker_main_t *cwm, u32 node_index, u32 error, u8 numa, u8 encrypt)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static const u8 pad_data[]
static_always_inline void add_session_by_drv_and_sa_idx(struct rte_cryptodev_sym_session *session, crypto_data_t *data, u32 drv_id, u32 sa_idx)
struct rte_cryptodev_sym_session * session
void crypto_auto_placement(void)
vl_api_fib_path_type_t type
crypto_alg_t * cipher_algs
static_always_inline u32 crypto_op_len(void)
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
struct rte_mempool ** session_drv
crypto_session_by_drv_t * session_by_drv_id_and_sa_index
crypto_session_disposal_t * session_disposal
struct rte_mempool * crypto_op
crypto_worker_main_t * workers_main
crypto_resource_t * resource
struct rte_mempool * session_h
#define CLIB_ALIGN_MARK(name, alignment)
uword * session_by_sa_index
struct rte_crypto_op ** ops
struct clib_bihash_value offset
template key/value backing page structure
static_always_inline u16 get_resource(crypto_worker_main_t *cwm, ipsec_sa_t *sa)
ipsec_crypto_alg_t crypto_alg
u16 auth_resource_idx[IPSEC_INTEG_N_ALG]
#define vec_foreach(var, vec)
Vector iterator.
static_always_inline struct rte_cryptodev_sym_session * get_session_by_drv_and_sa_idx(crypto_data_t *data, u32 drv_id, u32 sa_idx)
#define CLIB_CACHE_LINE_BYTES
struct rte_crypto_op * ops[VLIB_FRAME_SIZE]
static_always_inline dpdk_op_priv_t * crypto_op_get_priv(struct rte_crypto_op *op)