FD.io VPP  v19.04.4-rc0-5-ge88582fac
Vector Packet Processing
ipsecmb.c
Go to the documentation of this file.
1 /*
2  * ipsecmb.c - Intel IPSec Multi-buffer library Crypto Engine
3  *
4  * Copyright (c) 2019 Cisco Systemss
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <fcntl.h>
19 
20 #include <intel-ipsec-mb.h>
21 
22 #include <vnet/vnet.h>
23 #include <vnet/plugin/plugin.h>
24 #include <vpp/app/version.h>
25 #include <vnet/crypto/crypto.h>
26 #include <vppinfra/cpu.h>
27 
28 typedef struct
29 {
30  MB_MGR *mgr;
31  __m128i cbc_iv;
33 
34 typedef struct ipsecmb_main_t_
35 {
38 
39 /**
40  * AES GCM key=expansion VFT
41  */
42 typedef void (*ase_gcm_pre_t) (const void *key,
43  struct gcm_key_data * key_data);
44 
45 typedef struct ipsecmb_gcm_pre_vft_t_
46 {
51 
53 
54 #define INIT_IPSEC_MB_GCM_PRE(_arch) \
55  ipsecmb_gcm_pre_vft.ase_gcm_pre_128 = aes_gcm_pre_128_##_arch; \
56  ipsecmb_gcm_pre_vft.ase_gcm_pre_192 = aes_gcm_pre_192_##_arch; \
57  ipsecmb_gcm_pre_vft.ase_gcm_pre_256 = aes_gcm_pre_256_##_arch;
58 
60 
61 #define foreach_ipsecmb_hmac_op \
62  _(SHA1, SHA1, sha1) \
63  _(SHA256, SHA_256, sha256) \
64  _(SHA384, SHA_384, sha384) \
65  _(SHA512, SHA_512, sha512)
66 
67 /*
68  * (Alg, key-len-bits, key-len-bytes, iv-len-bytes)
69  */
70 #define foreach_ipsecmb_cbc_cipher_op \
71  _(AES_128_CBC, 128, 16, 16) \
72  _(AES_192_CBC, 192, 24, 16) \
73  _(AES_256_CBC, 256, 32, 16)
74 
75 /*
76  * (Alg, key-len-bits, key-len-bytes, iv-len-bytes)
77  */
78 #define foreach_ipsecmb_gcm_cipher_op \
79  _(AES_128_GCM, 128, 16, 12) \
80  _(AES_192_GCM, 192, 24, 12) \
81  _(AES_256_GCM, 256, 32, 12)
82 
83 always_inline void
84 hash_expand_keys (const MB_MGR * mgr,
85  const u8 * key,
86  u32 length,
87  u8 block_size,
88  u8 ipad[256], u8 opad[256], hash_one_block_t fn)
89 {
90  u8 buf[block_size];
91  int i = 0;
92 
93  if (length > block_size)
94  {
95  return;
96  }
97 
98  memset (buf, 0x36, sizeof (buf));
99  for (i = 0; i < length; i++)
100  {
101  buf[i] ^= key[i];
102  }
103  fn (buf, ipad);
104 
105  memset (buf, 0x5c, sizeof (buf));
106 
107  for (i = 0; i < length; i++)
108  {
109  buf[i] ^= key[i];
110  }
111  fn (buf, opad);
112 }
113 
114 always_inline void
115 ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail)
116 {
117  vnet_crypto_op_t *op = job->user_data;
118 
119  if (STS_COMPLETED != job->status)
120  {
121  op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
122  *n_fail = *n_fail + 1;
123  }
124  else
125  op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
126 
128  {
129  if ((memcmp (op->digest, job->auth_tag_output, op->digest_len)))
130  {
131  *n_fail = *n_fail + 1;
132  op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
133  }
134  }
135  else
136  clib_memcpy_fast (op->digest, job->auth_tag_output, op->digest_len);
137 }
138 
141  const ipsecmb_per_thread_data_t * ptd,
142  vnet_crypto_op_t * ops[],
143  u32 n_ops,
144  u32 block_size,
145  hash_one_block_t fn, JOB_HASH_ALG alg)
146 {
147  JOB_AES_HMAC *job;
148  u32 i, n_fail = 0;
149  u8 scratch[n_ops][64];
150 
151  /*
152  * queue all the jobs first ...
153  */
154  for (i = 0; i < n_ops; i++)
155  {
156  vnet_crypto_op_t *op = ops[i];
157  u8 ipad[256], opad[256];
158 
159  hash_expand_keys (ptd->mgr, op->key, op->key_len,
160  block_size, ipad, opad, fn);
161 
162  job = IMB_GET_NEXT_JOB (ptd->mgr);
163 
164  job->src = op->src;
165  job->hash_start_src_offset_in_bytes = 0;
166  job->msg_len_to_hash_in_bytes = op->len;
167  job->hash_alg = alg;
168  job->auth_tag_output_len_in_bytes = op->digest_len;
169  job->auth_tag_output = scratch[i];
170 
171  job->cipher_mode = NULL_CIPHER;
172  job->cipher_direction = DECRYPT;
173  job->chain_order = HASH_CIPHER;
174 
175  job->aes_key_len_in_bytes = op->key_len;
176 
177  job->u.HMAC._hashed_auth_key_xor_ipad = ipad;
178  job->u.HMAC._hashed_auth_key_xor_opad = opad;
179  job->user_data = op;
180 
181  job = IMB_SUBMIT_JOB (ptd->mgr);
182 
183  if (job)
184  ipsecmb_retire_hmac_job (job, &n_fail);
185  }
186 
187  /*
188  * .. then flush (i.e. complete) them
189  * We will have queued enough to satisfy the 'multi' buffer
190  */
191  while ((job = IMB_FLUSH_JOB (ptd->mgr)))
192  {
193  ipsecmb_retire_hmac_job (job, &n_fail);
194  }
195 
196  return n_ops - n_fail;
197 }
198 
199 #define _(a, b, c) \
200 static_always_inline u32 \
201 ipsecmb_ops_hmac_##a (vlib_main_t * vm, \
202  vnet_crypto_op_t * ops[], \
203  u32 n_ops) \
204 { \
205  ipsecmb_per_thread_data_t *ptd; \
206  ipsecmb_main_t *imbm; \
207  \
208  imbm = &ipsecmb_main; \
209  ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); \
210  \
211  return ipsecmb_ops_hmac_inline (vm, ptd, ops, n_ops, \
212  b##_BLOCK_SIZE, \
213  ptd->mgr->c##_one_block, \
214  b); \
215  }
217 #undef _
218 
219 #define EXPANDED_KEY_N_BYTES (16 * 15)
220 
221 always_inline void
222 ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail)
223 {
224  vnet_crypto_op_t *op = job->user_data;
225 
226  if (STS_COMPLETED != job->status)
227  {
228  op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
229  *n_fail = *n_fail + 1;
230  }
231  else
232  op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
233 }
234 
238  vnet_crypto_op_t * ops[],
239  u32 n_ops, u32 key_len, u32 iv_len,
240  keyexp_t fn, JOB_CIPHER_DIRECTION direction)
241 {
242  JOB_AES_HMAC *job;
243  u32 i, n_fail = 0;
244 
245  /*
246  * queue all the jobs first ...
247  */
248  for (i = 0; i < n_ops; i++)
249  {
250  u8 aes_enc_key_expanded[EXPANDED_KEY_N_BYTES];
251  u8 aes_dec_key_expanded[EXPANDED_KEY_N_BYTES];
252  vnet_crypto_op_t *op = ops[i];
253  __m128i iv;
254 
255  fn (op->key, aes_enc_key_expanded, aes_dec_key_expanded);
256 
257  job = IMB_GET_NEXT_JOB (ptd->mgr);
258 
259  job->src = op->src;
260  job->dst = op->dst;
261  job->msg_len_to_cipher_in_bytes = op->len;
262  job->cipher_start_src_offset_in_bytes = 0;
263 
264  job->hash_alg = NULL_HASH;
265  job->cipher_mode = CBC;
266  job->cipher_direction = direction;
267  job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
268 
269  if ((direction == ENCRYPT) && (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV))
270  {
271  iv = ptd->cbc_iv;
272  _mm_storeu_si128 ((__m128i *) op->iv, iv);
273  ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
274  }
275 
276  job->aes_key_len_in_bytes = key_len;
277  job->aes_enc_key_expanded = aes_enc_key_expanded;
278  job->aes_dec_key_expanded = aes_dec_key_expanded;
279  job->iv = op->iv;
280  job->iv_len_in_bytes = iv_len;
281 
282  job->user_data = op;
283 
284  job = IMB_SUBMIT_JOB (ptd->mgr);
285 
286  if (job)
287  ipsecmb_retire_cipher_job (job, &n_fail);
288  }
289 
290  /*
291  * .. then flush (i.e. complete) them
292  * We will have queued enough to satisfy the 'multi' buffer
293  */
294  while ((job = IMB_FLUSH_JOB (ptd->mgr)))
295  {
296  ipsecmb_retire_cipher_job (job, &n_fail);
297  }
298 
299  return n_ops - n_fail;
300 }
301 
302 #define _(a, b, c, d) \
303 static_always_inline u32 \
304 ipsecmb_ops_cbc_cipher_enc_##a (vlib_main_t * vm, \
305  vnet_crypto_op_t * ops[], \
306  u32 n_ops) \
307 { \
308  ipsecmb_per_thread_data_t *ptd; \
309  ipsecmb_main_t *imbm; \
310  \
311  imbm = &ipsecmb_main; \
312  ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); \
313  \
314  return ipsecmb_ops_cbc_cipher_inline (vm, ptd, ops, n_ops, c, d, \
315  ptd->mgr->keyexp_##b, \
316  ENCRYPT); \
317  }
319 #undef _
320 
321 #define _(a, b, c, d) \
322 static_always_inline u32 \
323 ipsecmb_ops_cbc_cipher_dec_##a (vlib_main_t * vm, \
324  vnet_crypto_op_t * ops[], \
325  u32 n_ops) \
326 { \
327  ipsecmb_per_thread_data_t *ptd; \
328  ipsecmb_main_t *imbm; \
329  \
330  imbm = &ipsecmb_main; \
331  ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); \
332  \
333  return ipsecmb_ops_cbc_cipher_inline (vm, ptd, ops, n_ops, c, d, \
334  ptd->mgr->keyexp_##b, \
335  DECRYPT); \
336  }
338 #undef _
339 
340 always_inline void
341 ipsecmb_retire_gcm_cipher_job (JOB_AES_HMAC * job,
342  u32 * n_fail, JOB_CIPHER_DIRECTION direction)
343 {
344  vnet_crypto_op_t *op = job->user_data;
345 
346  if (STS_COMPLETED != job->status)
347  {
348  op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
349  *n_fail = *n_fail + 1;
350  }
351  else
352  op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
353 
354  if (DECRYPT == direction)
355  {
356  if ((memcmp (op->tag, job->auth_tag_output, op->tag_len)))
357  {
358  *n_fail = *n_fail + 1;
359  op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
360  }
361  }
362 }
363 
367  vnet_crypto_op_t * ops[],
368  u32 n_ops, u32 key_len, u32 iv_len,
369  ase_gcm_pre_t fn,
370  JOB_CIPHER_DIRECTION direction)
371 {
372  JOB_AES_HMAC *job;
373  u32 i, n_fail = 0;
374  u8 scratch[n_ops][64];
375 
376  /*
377  * queue all the jobs first ...
378  */
379  for (i = 0; i < n_ops; i++)
380  {
381  struct gcm_key_data key_data;
382  vnet_crypto_op_t *op = ops[i];
383  u32 nonce[3];
384  __m128i iv;
385 
386  fn (op->key, &key_data);
387 
388  job = IMB_GET_NEXT_JOB (ptd->mgr);
389 
390  job->src = op->src;
391  job->dst = op->dst;
392  job->msg_len_to_cipher_in_bytes = op->len;
393  job->cipher_start_src_offset_in_bytes = 0;
394 
395  job->hash_alg = AES_GMAC;
396  job->cipher_mode = GCM;
397  job->cipher_direction = direction;
398  job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
399 
400  if (direction == ENCRYPT)
401  {
402  if (op->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
403  {
404  iv = ptd->cbc_iv;
405  // only use 8 bytes of the IV
406  clib_memcpy_fast (op->iv, &iv, 8);
407  ptd->cbc_iv = _mm_aesenc_si128 (iv, iv);
408  }
409  nonce[0] = op->salt;
410  clib_memcpy_fast (nonce + 1, op->iv, 8);
411  job->iv = (u8 *) nonce;
412  }
413  else
414  {
415  nonce[0] = op->salt;
416  clib_memcpy_fast (nonce + 1, op->iv, 8);
417  job->iv = op->iv;
418  }
419 
420  job->aes_key_len_in_bytes = key_len;
421  job->aes_enc_key_expanded = &key_data;
422  job->aes_dec_key_expanded = &key_data;
423  job->iv_len_in_bytes = iv_len;
424 
425  job->u.GCM.aad = op->aad;
426  job->u.GCM.aad_len_in_bytes = op->aad_len;
427  job->auth_tag_output_len_in_bytes = op->tag_len;
428  if (DECRYPT == direction)
429  job->auth_tag_output = scratch[i];
430  else
431  job->auth_tag_output = op->tag;
432  job->user_data = op;
433 
434  job = IMB_SUBMIT_JOB (ptd->mgr);
435 
436  if (job)
437  ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
438  }
439 
440  /*
441  * .. then flush (i.e. complete) them
442  * We will have queued enough to satisfy the 'multi' buffer
443  */
444  while ((job = IMB_FLUSH_JOB (ptd->mgr)))
445  {
446  ipsecmb_retire_gcm_cipher_job (job, &n_fail, direction);
447  }
448 
449  return n_ops - n_fail;
450 }
451 
452 #define _(a, b, c, d) \
453 static_always_inline u32 \
454 ipsecmb_ops_gcm_cipher_enc_##a (vlib_main_t * vm, \
455  vnet_crypto_op_t * ops[], \
456  u32 n_ops) \
457 { \
458  ipsecmb_per_thread_data_t *ptd; \
459  ipsecmb_main_t *imbm; \
460  \
461  imbm = &ipsecmb_main; \
462  ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); \
463  \
464  return ipsecmb_ops_gcm_cipher_inline (vm, ptd, ops, n_ops, c, d, \
465  ipsecmb_gcm_pre_vft.ase_gcm_pre_##b, \
466  ENCRYPT); \
467  }
469 #undef _
470 
471 #define _(a, b, c, d) \
472 static_always_inline u32 \
473 ipsecmb_ops_gcm_cipher_dec_##a (vlib_main_t * vm, \
474  vnet_crypto_op_t * ops[], \
475  u32 n_ops) \
476 { \
477  ipsecmb_per_thread_data_t *ptd; \
478  ipsecmb_main_t *imbm; \
479  \
480  imbm = &ipsecmb_main; \
481  ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); \
482  \
483  return ipsecmb_ops_gcm_cipher_inline (vm, ptd, ops, n_ops, c, d, \
484  ipsecmb_gcm_pre_vft.ase_gcm_pre_##b, \
485  DECRYPT); \
486  }
488 #undef _
489 
490 clib_error_t *
492 {
494  clib_error_t *err = 0;
495  int fd;
496 
497  if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
498  return clib_error_return_unix (0, "failed to open '/dev/urandom'");
499 
500  vec_foreach (ptd, imbm->per_thread_data)
501  {
502  if (read (fd, &ptd->cbc_iv, sizeof (ptd->cbc_iv)) != sizeof (ptd->cbc_iv))
503  {
504  err = clib_error_return_unix (0, "'/dev/urandom' read failure");
505  close (fd);
506  return (err);
507  }
508  }
509 
510  close (fd);
511  return (NULL);
512 }
513 
514 static clib_error_t *
516 {
517  ipsecmb_main_t *imbm = &ipsecmb_main;
520  clib_error_t *error;
521  u32 eidx;
522 
523  if ((error = vlib_call_init_function (vm, vnet_crypto_init)))
524  return error;
525 
526  if (!clib_cpu_supports_aes ())
527  return 0;
528 
529  /*
530  * A priority that is better than OpenSSL but worse than VPP natvie
531  */
532  eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80,
533  "Intel IPSEC multi-buffer");
534 
535  vec_validate (imbm->per_thread_data, tm->n_vlib_mains - 1);
536 
537  if (clib_cpu_supports_avx512f ())
538  {
539  vec_foreach (ptd, imbm->per_thread_data)
540  {
541  ptd->mgr = alloc_mb_mgr (0);
542  init_mb_mgr_avx512 (ptd->mgr);
543  INIT_IPSEC_MB_GCM_PRE (avx_gen4);
544  }
545  }
546  else if (clib_cpu_supports_avx2 ())
547  {
548  vec_foreach (ptd, imbm->per_thread_data)
549  {
550  ptd->mgr = alloc_mb_mgr (0);
551  init_mb_mgr_avx2 (ptd->mgr);
552  INIT_IPSEC_MB_GCM_PRE (avx_gen2);
553  }
554  }
555  else
556  {
557  vec_foreach (ptd, imbm->per_thread_data)
558  {
559  ptd->mgr = alloc_mb_mgr (0);
560  init_mb_mgr_sse (ptd->mgr);
561  INIT_IPSEC_MB_GCM_PRE (sse);
562  }
563  }
564 
565  if (clib_cpu_supports_x86_aes () && (error = crypto_ipsecmb_iv_init (imbm)))
566  return (error);
567 
568 
569 #define _(a, b, c) \
570  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
571  ipsecmb_ops_hmac_##a); \
572 
574 #undef _
575 #define _(a, b, c, d) \
576  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
577  ipsecmb_ops_cbc_cipher_enc_##a); \
578 
580 #undef _
581 #define _(a, b, c, d) \
582  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
583  ipsecmb_ops_cbc_cipher_dec_##a); \
584 
586 #undef _
587 #define _(a, b, c, d) \
588  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
589  ipsecmb_ops_gcm_cipher_enc_##a); \
590 
592 #undef _
593 #define _(a, b, c, d) \
594  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
595  ipsecmb_ops_gcm_cipher_dec_##a); \
596 
598 #undef _
599 
600  return (NULL);
601 }
602 
604 
605 /* *INDENT-OFF* */
607 {
608  .version = VPP_BUILD_VER,
609  .description = "Intel IPSEC Multi-buffer Crypto Engine",
610 };
611 /* *INDENT-ON* */
612 
613 /*
614  * fd.io coding-style-patch-verification: ON
615  *
616  * Local Variables:
617  * eval: (c-set-style "gnu")
618  * End:
619  */
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:439
#define EXPANDED_KEY_N_BYTES
Definition: ipsecmb.c:219
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
ase_gcm_pre_t ase_gcm_pre_256
Definition: ipsecmb.c:49
#define NULL
Definition: clib.h:58
static void ipsecmb_retire_gcm_cipher_job(JOB_AES_HMAC *job, u32 *n_fail, JOB_CIPHER_DIRECTION direction)
Definition: ipsecmb.c:341
int i
VLIB_PLUGIN_REGISTER()
#define foreach_ipsecmb_hmac_op
Definition: ipsecmb.c:61
ase_gcm_pre_t ase_gcm_pre_128
Definition: ipsecmb.c:47
unsigned char u8
Definition: types.h:56
static ipsecmb_main_t ipsecmb_main
Definition: ipsecmb.c:59
static_always_inline u32 ipsecmb_ops_cbc_cipher_inline(vlib_main_t *vm, ipsecmb_per_thread_data_t *ptd, vnet_crypto_op_t *ops[], u32 n_ops, u32 key_len, u32 iv_len, keyexp_t fn, JOB_CIPHER_DIRECTION direction)
Definition: ipsecmb.c:236
clib_error_t * crypto_ipsecmb_iv_init(ipsecmb_main_t *imbm)
Definition: ipsecmb.c:491
#define static_always_inline
Definition: clib.h:99
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:163
struct ipsecmb_main_t_ ipsecmb_main_t
#define always_inline
Definition: clib.h:98
void(* ase_gcm_pre_t)(const void *key, struct gcm_key_data *key_data)
AES GCM key=expansion VFT.
Definition: ipsecmb.c:42
static clib_error_t * crypto_ipsecmb_init(vlib_main_t *vm)
Definition: ipsecmb.c:515
unsigned int u32
Definition: types.h:88
static void ipsecmb_retire_cipher_job(JOB_AES_HMAC *job, u32 *n_fail)
Definition: ipsecmb.c:222
#define vlib_call_init_function(vm, x)
Definition: init.h:260
ipsecmb_per_thread_data_t * per_thread_data
Definition: ipsecmb.c:36
static u8 iv[]
Definition: aes_cbc.c:24
uword user_data
Definition: crypto.h:125
static void ipsecmb_retire_hmac_job(JOB_AES_HMAC *job, u32 *n_fail)
Definition: ipsecmb.c:115
ase_gcm_pre_t ase_gcm_pre_192
Definition: ipsecmb.c:48
#define clib_error_return_unix(e, args...)
Definition: error.h:102
static void hash_expand_keys(const MB_MGR *mgr, const u8 *key, u32 length, u8 block_size, u8 ipad[256], u8 opad[256], hash_one_block_t fn)
Definition: ipsecmb.c:84
#define VNET_CRYPTO_OP_FLAG_HMAC_CHECK
Definition: crypto.h:114
#define foreach_ipsecmb_cbc_cipher_op
Definition: ipsecmb.c:70
#define VNET_CRYPTO_OP_FLAG_INIT_IV
Definition: crypto.h:113
vlib_main_t * vm
Definition: buffer.c:312
static_always_inline u32 ipsecmb_ops_hmac_inline(vlib_main_t *vm, const ipsecmb_per_thread_data_t *ptd, vnet_crypto_op_t *ops[], u32 n_ops, u32 block_size, hash_one_block_t fn, JOB_HASH_ALG alg)
Definition: ipsecmb.c:140
struct ipsecmb_gcm_pre_vft_t_ ipsecmb_gcm_pre_vft_t
#define foreach_ipsecmb_gcm_cipher_op
Definition: ipsecmb.c:78
#define INIT_IPSEC_MB_GCM_PRE(_arch)
Definition: ipsecmb.c:54
static foreach_aarch64_flags int clib_cpu_supports_aes()
Definition: cpu.h:235
typedef key
Definition: ipsec.api:244
vnet_crypto_op_status_t status
Definition: crypto.h:111
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
#define vec_foreach(var, vec)
Vector iterator.
static_always_inline u32 ipsecmb_ops_gcm_cipher_inline(vlib_main_t *vm, ipsecmb_per_thread_data_t *ptd, vnet_crypto_op_t *ops[], u32 n_ops, u32 key_len, u32 iv_len, ase_gcm_pre_t fn, JOB_CIPHER_DIRECTION direction)
Definition: ipsecmb.c:365
u32 vnet_crypto_register_engine(vlib_main_t *vm, char *name, int prio, char *desc)
Definition: crypto.c:78
clib_error_t * vnet_crypto_init(vlib_main_t *vm)
Definition: crypto.c:201
static ipsecmb_gcm_pre_vft_t ipsecmb_gcm_pre_vft
Definition: ipsecmb.c:52