20 #include <intel-ipsec-mb.h> 24 #include <vpp/app/version.h> 28 #define HMAC_MAX_BLOCK_SIZE SHA_512_BLOCK_SIZE 29 #define EXPANDED_KEY_N_BYTES (16 * 15) 66 #define foreach_ipsecmb_hmac_op \ 67 _(SHA1, SHA1, sha1, 64, 20, 20) \ 68 _(SHA224, SHA_224, sha224, 64, 32, 28) \ 69 _(SHA256, SHA_256, sha256, 64, 32, 32) \ 70 _(SHA384, SHA_384, sha384, 128, 64, 48) \ 71 _(SHA512, SHA_512, sha512, 128, 64, 64) 76 #define foreach_ipsecmb_cbc_cipher_op \ 84 #define foreach_ipsecmb_gcm_cipher_op \ 95 if (STS_COMPLETED != job->status)
97 op->
status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
98 *n_fail = *n_fail + 1;
104 if ((memcmp (op->
digest, job->auth_tag_output, len)))
106 *n_fail = *n_fail + 1;
107 op->
status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
111 else if (len == digest_size)
116 op->
status = VNET_CRYPTO_OP_STATUS_COMPLETED;
121 u32 n_ops,
u32 block_size,
u32 hash_size,
122 u32 digest_size, JOB_HASH_ALG alg)
129 u8 scratch[n_ops][digest_size];
134 for (i = 0; i < n_ops; i++)
139 job = IMB_GET_NEXT_JOB (ptd->
mgr);
142 job->hash_start_src_offset_in_bytes = 0;
143 job->msg_len_to_hash_in_bytes = op->
len;
145 job->auth_tag_output_len_in_bytes = digest_size;
146 job->auth_tag_output = scratch[
i];
148 job->cipher_mode = NULL_CIPHER;
149 job->cipher_direction = DECRYPT;
150 job->chain_order = HASH_CIPHER;
152 job->u.HMAC._hashed_auth_key_xor_ipad = kd;
153 job->u.HMAC._hashed_auth_key_xor_opad = kd + hash_size;
156 job = IMB_SUBMIT_JOB (ptd->
mgr);
162 while ((job = IMB_FLUSH_JOB (ptd->
mgr)))
165 return n_ops - n_fail;
168 #define _(a, b, c, d, e, f) \ 169 static_always_inline u32 \ 170 ipsecmb_ops_hmac_##a (vlib_main_t * vm, \ 171 vnet_crypto_op_t * ops[], \ 173 { return ipsecmb_ops_hmac_inline (vm, ops, n_ops, d, e, f, b); } \ 183 if (STS_COMPLETED != job->status)
185 op->
status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
186 *n_fail = *n_fail + 1;
189 op->
status = VNET_CRYPTO_OP_STATUS_COMPLETED;
203 for (i = 0; i < n_ops; i++)
210 job = IMB_GET_NEXT_JOB (ptd->
mgr);
214 job->msg_len_to_cipher_in_bytes = op->
len;
215 job->cipher_start_src_offset_in_bytes = 0;
217 job->hash_alg = NULL_HASH;
218 job->cipher_mode = CBC;
220 job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
225 _mm_storeu_si128 ((__m128i *) op->
iv,
iv);
229 job->aes_key_len_in_bytes = key_len / 8;
233 job->iv_len_in_bytes = AES_BLOCK_SIZE;
237 job = IMB_SUBMIT_JOB (ptd->
mgr);
243 while ((job = IMB_FLUSH_JOB (ptd->
mgr)))
246 return n_ops - n_fail;
250 static_always_inline u32 \ 251 ipsecmb_ops_cbc_cipher_enc_##a (vlib_main_t * vm, \ 252 vnet_crypto_op_t * ops[], \ 254 { return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, ENCRYPT); } \ 256 static_always_inline u32 \ 257 ipsecmb_ops_cbc_cipher_dec_##a (vlib_main_t * vm, \ 258 vnet_crypto_op_t * ops[], \ 260 { return ipsecmb_ops_cbc_cipher_inline (vm, ops, n_ops, b, DECRYPT); } \ 266 static_always_inline u32 \ 267 ipsecmb_ops_gcm_cipher_enc_##a##_chained (vlib_main_t * vm, \ 268 vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops) \ 270 ipsecmb_main_t *imbm = &ipsecmb_main; \ 271 ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ 273 MB_MGR *m = ptd->mgr; \ 274 vnet_crypto_op_chunk_t *chp; \ 277 for (i = 0; i < n_ops; i++) \ 279 struct gcm_key_data *kd; \ 280 struct gcm_context_data ctx; \ 281 vnet_crypto_op_t *op = ops[i]; \ 283 kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \ 284 ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS); \ 285 IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len); \ 286 chp = chunks + op->chunk_index; \ 287 for (j = 0; j < op->n_chunks; j++) \ 289 IMB_AES##b##_GCM_ENC_UPDATE (m, kd, &ctx, chp->dst, chp->src, \ 293 IMB_AES##b##_GCM_ENC_FINALIZE(m, kd, &ctx, op->tag, op->tag_len); \ 295 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \ 301 static_always_inline u32 \ 302 ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \ 305 ipsecmb_main_t *imbm = &ipsecmb_main; \ 306 ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ 308 MB_MGR *m = ptd->mgr; \ 311 for (i = 0; i < n_ops; i++) \ 313 struct gcm_key_data *kd; \ 314 struct gcm_context_data ctx; \ 315 vnet_crypto_op_t *op = ops[i]; \ 317 kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \ 318 IMB_AES##b##_GCM_ENC (m, kd, &ctx, op->dst, op->src, op->len, op->iv, \ 319 op->aad, op->aad_len, op->tag, op->tag_len); \ 321 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \ 327 static_always_inline u32 \ 328 ipsecmb_ops_gcm_cipher_dec_##a##_chained (vlib_main_t * vm, \ 329 vnet_crypto_op_t * ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops) \ 331 ipsecmb_main_t *imbm = &ipsecmb_main; \ 332 ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ 334 MB_MGR *m = ptd->mgr; \ 335 vnet_crypto_op_chunk_t *chp; \ 336 u32 i, j, n_failed = 0; \ 338 for (i = 0; i < n_ops; i++) \ 340 struct gcm_key_data *kd; \ 341 struct gcm_context_data ctx; \ 342 vnet_crypto_op_t *op = ops[i]; \ 345 kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \ 346 ASSERT (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS); \ 347 IMB_AES##b##_GCM_INIT(m, kd, &ctx, op->iv, op->aad, op->aad_len); \ 348 chp = chunks + op->chunk_index; \ 349 for (j = 0; j < op->n_chunks; j++) \ 351 IMB_AES##b##_GCM_DEC_UPDATE (m, kd, &ctx, chp->dst, chp->src, \ 355 IMB_AES##b##_GCM_DEC_FINALIZE(m, kd, &ctx, scratch, op->tag_len); \ 357 if ((memcmp (op->tag, scratch, op->tag_len))) \ 359 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; \ 363 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \ 366 return n_ops - n_failed; \ 369 static_always_inline u32 \ 370 ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \ 373 ipsecmb_main_t *imbm = &ipsecmb_main; \ 374 ipsecmb_per_thread_data_t *ptd = vec_elt_at_index (imbm->per_thread_data, \ 376 MB_MGR *m = ptd->mgr; \ 377 u32 i, n_failed = 0; \ 379 for (i = 0; i < n_ops; i++) \ 381 struct gcm_key_data *kd; \ 382 struct gcm_context_data ctx; \ 383 vnet_crypto_op_t *op = ops[i]; \ 386 kd = (struct gcm_key_data *) imbm->key_data[op->key_index]; \ 387 IMB_AES##b##_GCM_DEC (m, kd, &ctx, op->dst, op->src, op->len, op->iv, \ 388 op->aad, op->aad_len, scratch, op->tag_len); \ 390 if ((memcmp (op->tag, scratch, op->tag_len))) \ 392 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC; \ 396 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED; \ 399 return n_ops - n_failed; \ 412 if ((fd = open (
"/dev/urandom", O_RDONLY)) < 0)
488 u64 pad[block_qw], key_hash[block_qw];
496 for (i = 0; i < block_qw; i++)
497 pad[i] = key_hash[i] ^ 0x3636363636363636;
500 for (i = 0; i < block_qw; i++)
501 pad[i] = key_hash[i] ^ 0x5c5c5c5c5c5c5c5c;
526 name =
format (0,
"Intel(R) Multi-Buffer Crypto for IPsec Library %s%c",
536 ptd->
mgr = alloc_mb_mgr (0);
537 if (clib_cpu_supports_avx512f ())
538 init_mb_mgr_avx512 (ptd->
mgr);
539 else if (clib_cpu_supports_avx2 ())
540 init_mb_mgr_avx2 (ptd->
mgr);
542 init_mb_mgr_sse (ptd->
mgr);
552 #define _(a, b, c, d, e, f) \ 553 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \ 554 ipsecmb_ops_hmac_##a); \ 555 ad = imbm->alg_data + VNET_CRYPTO_ALG_HMAC_##a; \ 556 ad->block_size = d; \ 557 ad->data_size = e * 2; \ 558 ad->hash_one_block = m-> c##_one_block; \ 559 ad->hash_fn = m-> c; \ 564 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \ 565 ipsecmb_ops_cbc_cipher_enc_##a); \ 566 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \ 567 ipsecmb_ops_cbc_cipher_dec_##a); \ 568 ad = imbm->alg_data + VNET_CRYPTO_ALG_##a; \ 569 ad->data_size = sizeof (ipsecmb_aes_cbc_key_data_t); \ 570 ad->keyexp = m->keyexp_##b; \ 575 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \ 576 ipsecmb_ops_gcm_cipher_enc_##a); \ 577 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \ 578 ipsecmb_ops_gcm_cipher_dec_##a); \ 579 vnet_crypto_register_chained_ops_handler \ 580 (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \ 581 ipsecmb_ops_gcm_cipher_enc_##a##_chained); \ 582 vnet_crypto_register_chained_ops_handler \ 583 (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \ 584 ipsecmb_ops_gcm_cipher_dec_##a##_chained); \ 585 ad = imbm->alg_data + VNET_CRYPTO_ALG_##a; \ 586 ad->data_size = sizeof (struct gcm_key_data); \ 587 ad->aes_gcm_pre = m->gcm##b##_pre; \ 599 .runs_after =
VLIB_INITS (
"vnet_crypto_init"),
606 .version = VPP_BUILD_VER,
607 .description =
"Intel IPSEC Multi-buffer Crypto Engine",
u8 pad[3]
log2 (size of the packing page block)
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
#define VNET_CRYPTO_KEY_TYPE_LINK
#define EXPANDED_KEY_N_BYTES
#define clib_memcpy_fast(a, b, c)
static void ipsecmb_retire_hmac_job(JOB_AES_HMAC *job, u32 *n_fail, u32 digest_size)
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
#define foreach_ipsecmb_hmac_op
static ipsecmb_main_t ipsecmb_main
u8 dec_key_exp[EXPANDED_KEY_N_BYTES]
void vnet_crypto_register_key_handler(vlib_main_t *vm, u32 engine_index, vnet_crypto_key_handler_t *key_handler)
clib_error_t * crypto_ipsecmb_iv_init(ipsecmb_main_t *imbm)
#define static_always_inline
#define VLIB_INIT_FUNCTION(x)
struct ipsecmb_main_t_ ipsecmb_main_t
static clib_error_t * crypto_ipsecmb_init(vlib_main_t *vm)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static void ipsecmb_retire_cipher_job(JOB_AES_HMAC *job, u32 *n_fail)
static_always_inline u32 ipsecmb_ops_hmac_inline(vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops, u32 block_size, u32 hash_size, u32 digest_size, JOB_HASH_ALG alg)
ipsecmb_per_thread_data_t * per_thread_data
#define clib_error_return_unix(e, args...)
#define VNET_CRYPTO_OP_FLAG_HMAC_CHECK
#define foreach_ipsecmb_cbc_cipher_op
#define VNET_CRYPTO_OP_FLAG_INIT_IV
sll srl srl sll sra u16x4 i
#define foreach_ipsecmb_gcm_cipher_op
static void crypto_ipsecmb_key_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx)
ipsecmb_alg_data_t alg_data[VNET_CRYPTO_N_ALGS]
u32 vnet_crypto_key_index_t
u8 enc_key_exp[EXPANDED_KEY_N_BYTES]
static void clib_mem_free_s(void *p)
static_always_inline void clib_memset_u8(void *p, u8 val, uword count)
hash_one_block_t hash_one_block
static foreach_aarch64_flags int clib_cpu_supports_aes()
static_always_inline vnet_crypto_key_t * vnet_crypto_get_key(vnet_crypto_key_index_t index)
#define HMAC_MAX_BLOCK_SIZE
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
aes_gcm_pre_t aes_gcm_pre
vnet_crypto_op_status_t status
static void * clib_mem_alloc_aligned(uword size, uword align)
static_always_inline u32 ipsecmb_ops_cbc_cipher_inline(vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops, u32 key_len, JOB_CIPHER_DIRECTION direction)
static vlib_thread_main_t * vlib_get_thread_main()
#define vec_foreach(var, vec)
Vector iterator.
#define CLIB_CACHE_LINE_BYTES
u32 vnet_crypto_register_engine(vlib_main_t *vm, char *name, int prio, char *desc)