FD.io VPP  v21.10.1-2-g0a485f517
Vector Packet Processing
cryptodev_raw_data_path.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2020 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
23 
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #undef always_inline
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
34 
35 #include "cryptodev.h"
36 
37 #if CLIB_DEBUG > 0
38 #define always_inline static inline
39 #else
40 #define always_inline static inline __attribute__ ((__always_inline__))
41 #endif
42 
45  u32 *max_end)
46 {
47  union rte_crypto_sym_ofs ofs;
48  u32 crypto_end = fe->crypto_start_offset + fe->crypto_total_length;
49  u32 integ_end =
51 
52  *min_ofs = clib_min (fe->crypto_start_offset, fe->integ_start_offset);
53  *max_end = clib_max (crypto_end, integ_end);
54 
55  ofs.ofs.cipher.head = fe->crypto_start_offset - *min_ofs;
56  ofs.ofs.cipher.tail = *max_end - crypto_end;
57  ofs.ofs.auth.head = fe->integ_start_offset - *min_ofs;
58  ofs.ofs.auth.tail = *max_end - integ_end;
59 
60  return ofs.raw;
61 }
62 
64 cryptodev_frame_build_sgl (vlib_main_t *vm, enum rte_iova_mode iova_mode,
65  struct rte_crypto_vec *data_vec, u16 *n_seg,
67 {
68  struct rte_crypto_vec *vec = data_vec + 1;
70  return -1;
71 
72  while ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && size)
73  {
74  u32 len;
77  vec->base = (void *) vlib_buffer_get_current (b);
78  if (iova_mode == RTE_IOVA_VA)
79  vec->iova = pointer_to_uword (vec->base);
80  else
81  vec->iova = vlib_buffer_get_current_pa (vm, b);
82  vec->len = len;
83  size -= len;
84  vec++;
85  *n_seg += 1;
86  }
87 
88  if (size)
89  return -1;
90 
91  return 0;
92 }
93 
96 {
97  union rte_cryptodev_session_ctx sess_ctx;
98 
99  ASSERT (cet->reset_sess != 0);
100 
101  sess_ctx.crypto_sess = cet->reset_sess;
102 
103  rte_cryptodev_configure_raw_dp_ctx (cet->cryptodev_id, cet->cryptodev_q,
104  cet->ctx, RTE_CRYPTO_OP_WITH_SESSION,
105  sess_ctx, 0);
106 }
107 
111  cryptodev_op_type_t op_type)
112 {
116  vlib_buffer_t **b;
117  struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
118  struct rte_crypto_va_iova_ptr iv_vec, digest_vec;
119  u32 n_elts;
120  u32 last_key_index = ~0;
121  i16 min_ofs;
122  u32 max_end;
123  u8 is_update = 0;
124  int status;
125 
126  n_elts = frame->n_elts;
127 
128  if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
129  {
131  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
132  return -1;
133  }
134 
135  vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
136 
137  b = cet->b;
138  fe = frame->elts;
139 
140  while (n_elts)
141  {
142  union rte_crypto_sym_ofs cofs;
143  u16 n_seg = 1;
144 
145  if (n_elts > 2)
146  {
147  clib_prefetch_load (&fe[1]);
148  clib_prefetch_load (&fe[2]);
149  vlib_prefetch_buffer_header (b[1], LOAD);
150  vlib_prefetch_buffer_header (b[2], LOAD);
151  }
152 
153  if (PREDICT_FALSE (last_key_index != fe->key_index))
154  {
156  union rte_cryptodev_session_ctx sess_ctx;
157 
158  if (PREDICT_FALSE (key->keys[vm->numa_node][op_type] == 0))
159  {
160  status = cryptodev_session_create (vm, fe->key_index, 0);
161  if (PREDICT_FALSE (status < 0))
162  goto error_exit;
163  }
164 
165  /* Borrow a created session to reset session ctx, based on a valid
166  * assumption that the session reset won't happen until first valid
167  * packet is processed */
168  if (PREDICT_FALSE (cet->reset_sess == 0))
169  cet->reset_sess = key->keys[vm->numa_node][op_type];
170 
171  sess_ctx.crypto_sess = key->keys[vm->numa_node][op_type];
172 
173  status = rte_cryptodev_configure_raw_dp_ctx (
174  cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
175  RTE_CRYPTO_OP_WITH_SESSION, sess_ctx, is_update);
176  if (PREDICT_FALSE (status < 0))
177  goto error_exit;
178 
179  last_key_index = fe->key_index;
180  is_update = 1;
181  }
182 
183  cofs.raw = compute_ofs_linked_alg (fe, &min_ofs, &max_end);
184 
185  vec->len = max_end - min_ofs;
186  if (cmt->iova_mode == RTE_IOVA_VA)
187  {
188  vec[0].base = (void *) (b[0]->data + min_ofs);
189  vec[0].iova = pointer_to_uword (b[0]->data) + min_ofs;
190  iv_vec.va = (void *) fe->iv;
191  iv_vec.iova = pointer_to_uword (fe->iv);
192  digest_vec.va = (void *) fe->tag;
193  digest_vec.iova = pointer_to_uword (fe->tag);
194  }
195  else
196  {
197  vec[0].base = (void *) (b[0]->data + min_ofs);
198  vec[0].iova = vlib_buffer_get_pa (vm, b[0]) + min_ofs;
199  iv_vec.va = (void *) fe->iv;
200  iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
201  digest_vec.va = (void *) fe->tag;
202  digest_vec.iova = vlib_physmem_get_pa (vm, fe->digest);
203  }
204 
206  {
207  vec[0].len = b[0]->current_data + b[0]->current_length - min_ofs;
208  if (cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
209  max_end - min_ofs - vec->len) < 0)
210  goto error_exit;
211  }
212 
213  status = rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
214  &digest_vec, 0, (void *) frame);
215  if (PREDICT_FALSE (status < 0))
216  goto error_exit;
217 
218  b++;
219  fe++;
220  n_elts--;
221  }
222 
223  status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
224  if (PREDICT_FALSE (status < 0))
225  {
226  cryptodev_reset_ctx (cet);
227  return -1;
228  }
229 
230  cet->inflight += frame->n_elts;
231  return 0;
232 
233 error_exit:
235  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
236  cryptodev_reset_ctx (cet);
237  return -1;
238 }
239 
242  cryptodev_op_type_t op_type, u8 aad_len)
243 {
247  vlib_buffer_t **b;
248  u32 n_elts;
249  union rte_crypto_sym_ofs cofs;
250  struct rte_crypto_vec vec[CRYPTODEV_MAX_N_SGL];
251  struct rte_crypto_va_iova_ptr iv_vec, digest_vec, aad_vec;
252  u32 last_key_index = ~0;
253  u8 is_update = 0;
254  int status;
255 
256  n_elts = frame->n_elts;
257 
258  if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
259  {
261  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
262  return -1;
263  }
264 
265  vlib_get_buffers (vm, frame->buffer_indices, cet->b, frame->n_elts);
266 
267  fe = frame->elts;
268  b = cet->b;
269  cofs.raw = 0;
270 
271  while (n_elts)
272  {
273  u32 aad_offset = ((cet->aad_index++) & CRYPTODEV_AAD_MASK) << 4;
274  u16 n_seg = 1;
275 
276  if (n_elts > 1)
277  {
278  clib_prefetch_load (&fe[1]);
279  vlib_prefetch_buffer_header (b[1], LOAD);
280  }
281 
282  if (PREDICT_FALSE (last_key_index != fe->key_index))
283  {
285  union rte_cryptodev_session_ctx sess_ctx;
286 
287  if (PREDICT_FALSE (key->keys[vm->numa_node][op_type] == 0))
288  {
289  status = cryptodev_session_create (vm, fe->key_index, aad_len);
290  if (PREDICT_FALSE (status < 0))
291  goto error_exit;
292  }
293 
294  if (PREDICT_FALSE (
295  (u8) key->keys[vm->numa_node][op_type]->opaque_data !=
296  aad_len))
297  {
299  fe->key_index, aad_len);
300  status = cryptodev_session_create (vm, fe->key_index, aad_len);
301  if (PREDICT_FALSE (status < 0))
302  goto error_exit;
303  }
304 
305  /* Borrow a created session to reset session ctx, based on a valid
306  * assumption that the session reset won't happen until first valid
307  * packet is processed */
308 
309  if (PREDICT_FALSE (cet->reset_sess == 0))
310  cet->reset_sess = key->keys[vm->numa_node][op_type];
311 
312  sess_ctx.crypto_sess = key->keys[vm->numa_node][op_type];
313 
314  status = rte_cryptodev_configure_raw_dp_ctx (
315  cet->cryptodev_id, cet->cryptodev_q, cet->ctx,
316  RTE_CRYPTO_OP_WITH_SESSION, sess_ctx, is_update);
317  if (PREDICT_FALSE (status < 0))
318  goto error_exit;
319 
320  last_key_index = fe->key_index;
321  is_update = 1;
322  }
323 
324  if (cmt->iova_mode == RTE_IOVA_VA)
325  {
326  vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
327  vec[0].iova = pointer_to_uword (vec[0].base);
328  vec[0].len = fe->crypto_total_length;
329  iv_vec.va = (void *) fe->iv;
330  iv_vec.iova = pointer_to_uword (fe->iv);
331  digest_vec.va = (void *) fe->tag;
332  digest_vec.iova = pointer_to_uword (fe->tag);
333  aad_vec.va = (void *) (cet->aad_buf + aad_offset);
334  aad_vec.iova = cet->aad_phy_addr + aad_offset;
335  }
336  else
337  {
338  vec[0].base = (void *) (b[0]->data + fe->crypto_start_offset);
339  vec[0].iova =
341  vec[0].len = fe->crypto_total_length;
342  iv_vec.va = (void *) fe->iv;
343  iv_vec.iova = vlib_physmem_get_pa (vm, fe->iv);
344  aad_vec.va = (void *) (cet->aad_buf + aad_offset);
345  aad_vec.iova = cet->aad_phy_addr + aad_offset;
346  digest_vec.va = (void *) fe->tag;
347  digest_vec.iova = vlib_physmem_get_pa (vm, fe->tag);
348  }
349 
350  if (aad_len == 8)
351  *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
352  else
353  {
354  /* aad_len == 12 */
355  *(u64 *) (cet->aad_buf + aad_offset) = *(u64 *) fe->aad;
356  *(u32 *) (cet->aad_buf + aad_offset + 8) = *(u32 *) (fe->aad + 8);
357  }
358 
360  {
361  vec[0].len = b[0]->current_data + b[0]->current_length -
363  status =
364  cryptodev_frame_build_sgl (vm, cmt->iova_mode, vec, &n_seg, b[0],
365  fe->crypto_total_length - vec[0].len);
366  if (status < 0)
367  goto error_exit;
368  }
369 
370  status =
371  rte_cryptodev_raw_enqueue (cet->ctx, vec, n_seg, cofs, &iv_vec,
372  &digest_vec, &aad_vec, (void *) frame);
373  if (PREDICT_FALSE (status < 0))
374  goto error_exit;
375 
376  fe++;
377  b++;
378  n_elts--;
379  }
380 
381  status = rte_cryptodev_raw_enqueue_done (cet->ctx, frame->n_elts);
382  if (PREDICT_FALSE (status < 0))
383  goto error_exit;
384 
385  cet->inflight += frame->n_elts;
386 
387  return 0;
388 
389 error_exit:
391  VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
392  cryptodev_reset_ctx (cet);
393  return -1;
394 }
395 
398 {
400  return f->n_elts;
401 }
402 
404 cryptodev_post_dequeue (void *frame, u32 index, u8 is_op_success)
405 {
407 
408  f->elts[index].status = is_op_success ? VNET_CRYPTO_OP_STATUS_COMPLETED :
409  VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
410 }
411 
412 #define GET_RING_OBJ(r, pos, f) \
413  do \
414  { \
415  vnet_crypto_async_frame_t **ring = (void *) &r[1]; \
416  f = ring[(r->cons.head + pos) & r->mask]; \
417  } \
418  while (0)
419 
421 cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
422  u32 *enqueue_thread_idx)
423 {
426  vnet_crypto_async_frame_t *frame, *frame_ret = 0;
427  u32 n_deq, n_success;
428  u32 n_cached_frame = rte_ring_count (cet->cached_frame), n_room_left;
429  u8 no_job_to_deq = 0;
430  u16 inflight = cet->inflight;
431  int dequeue_status;
432 
433  n_room_left = CRYPTODEV_DEQ_CACHE_SZ - n_cached_frame - 1;
434 
435  if (n_cached_frame)
436  {
437  u32 i;
438  for (i = 0; i < n_cached_frame; i++)
439  {
441  void *f_ret;
442  enum rte_crypto_op_status op_status;
443  u8 n_left, err, j;
444 
445  GET_RING_OBJ (cet->cached_frame, i, f);
446 
447  if (i < n_cached_frame - 2)
448  {
449  vnet_crypto_async_frame_t *f1, *f2;
450  GET_RING_OBJ (cet->cached_frame, i + 1, f1);
451  GET_RING_OBJ (cet->cached_frame, i + 2, f2);
452  clib_prefetch_load (f1);
453  clib_prefetch_load (f2);
454  }
455 
456  n_left = f->state & 0x7f;
457  err = f->state & 0x80;
458 
459  for (j = f->n_elts - n_left; j < f->n_elts && inflight; j++)
460  {
461  int ret;
462  f_ret = rte_cryptodev_raw_dequeue (cet->ctx, &ret, &op_status);
463 
464  if (!f_ret)
465  break;
466 
467  switch (op_status)
468  {
469  case RTE_CRYPTO_OP_STATUS_SUCCESS:
470  f->elts[j].status = VNET_CRYPTO_OP_STATUS_COMPLETED;
471  break;
472  default:
473  f->elts[j].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
474  err |= 1 << 7;
475  }
476 
477  inflight--;
478  }
479 
480  if (j == f->n_elts)
481  {
482  if (i == 0)
483  {
484  frame_ret = f;
485  f->state = err ? VNET_CRYPTO_FRAME_STATE_ELT_ERROR :
487  }
488  else
489  {
490  f->state = f->n_elts - j;
491  f->state |= err;
492  }
493  if (inflight)
494  continue;
495  }
496 
497  /* to here f is not completed dequeued and no more job can be
498  * dequeued
499  */
500  f->state = f->n_elts - j;
501  f->state |= err;
502  no_job_to_deq = 1;
503  break;
504  }
505 
506  if (frame_ret)
507  {
508  rte_ring_sc_dequeue (cet->cached_frame, (void **) &frame_ret);
509  n_room_left++;
510  }
511  }
512 
513  /* no point to dequeue further */
514  if (!inflight || no_job_to_deq || !n_room_left)
515  goto end_deq;
516 
517 #if RTE_VERSION >= RTE_VERSION_NUM(21, 5, 0, 0)
518  n_deq = rte_cryptodev_raw_dequeue_burst (
520  (void **) &frame, 0, &n_success, &dequeue_status);
521 #else
522  n_deq = rte_cryptodev_raw_dequeue_burst (
524  (void **) &frame, 0, &n_success, &dequeue_status);
525 #endif
526 
527  if (!n_deq)
528  goto end_deq;
529 
530  inflight -= n_deq;
531  no_job_to_deq = n_deq < frame->n_elts;
532  /* we have to cache the frame */
533  if (frame_ret || n_cached_frame || no_job_to_deq)
534  {
535  frame->state = frame->n_elts - n_deq;
536  frame->state |= ((n_success < n_deq) << 7);
537  rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
538  n_room_left--;
539  }
540  else
541  {
542  frame->state = n_success == frame->n_elts ?
545  frame_ret = frame;
546  }
547 
548  /* see if we can dequeue more */
549  while (inflight && n_room_left && !no_job_to_deq)
550  {
551 #if RTE_VERSION >= RTE_VERSION_NUM(21, 5, 0, 0)
552  n_deq = rte_cryptodev_raw_dequeue_burst (
554  (void **) &frame, 0, &n_success, &dequeue_status);
555 #else
556  n_deq = rte_cryptodev_raw_dequeue_burst (
558  (void **) &frame, 0, &n_success, &dequeue_status);
559 #endif
560  if (!n_deq)
561  break;
562  inflight -= n_deq;
563  no_job_to_deq = n_deq < frame->n_elts;
564  frame->state = frame->n_elts - n_deq;
565  frame->state |= ((n_success < n_deq) << 7);
566  rte_ring_sp_enqueue (cet->cached_frame, (void *) frame);
567  n_room_left--;
568  }
569 
570 end_deq:
571  if (inflight < cet->inflight)
572  {
573  int res =
574  rte_cryptodev_raw_dequeue_done (cet->ctx, cet->inflight - inflight);
575  ASSERT (res == 0);
576  cet->inflight = inflight;
577  }
578 
579  if (frame_ret)
580  {
581  *nb_elts_processed = frame_ret->n_elts;
582  *enqueue_thread_idx = frame_ret->enqueue_thread_index;
583  }
584 
585  return frame_ret;
586 }
587 
591 {
593 }
597 {
599 }
600 
604 {
606 }
610 {
612 }
613 
617 {
620 }
621 
625 {
628 }
629 
630 clib_error_t *
632 {
635  cryptodev_inst_t *cinst;
636  struct rte_cryptodev_info info;
637  struct rte_cryptodev_sym_capability_idx cap_auth_idx;
638  struct rte_cryptodev_sym_capability_idx cap_cipher_idx;
639  struct rte_cryptodev_sym_capability_idx cap_aead_idx;
640  u32 support_raw_api = 1, max_ctx_size = 0;
641  clib_error_t *error = 0;
642 
643  vec_foreach (cinst, cmt->cryptodev_inst)
644  {
645  u32 ctx_size;
646  rte_cryptodev_info_get (cinst->dev_id, &info);
647  if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_RAW_DP))
648  {
649  support_raw_api = 0;
650  break;
651  }
652 
653  ctx_size = rte_cryptodev_get_raw_dp_ctx_size (cinst->dev_id);
654  max_ctx_size = clib_max (ctx_size, max_ctx_size);
655  }
656 
657  if (!support_raw_api)
658  return cryptodev_register_cop_hdl (vm, eidx);
659 
660  vec_foreach (cet, cmt->per_thread_data)
661  {
662  u32 thread_id = cet - cmt->per_thread_data;
663  u32 numa = vlib_get_main_by_index (thread_id)->numa_node;
664  u8 *name = format (0, "cache_frame_ring_%u_%u", numa, thread_id);
665 
666  cet->cached_frame =
667  rte_ring_create ((char *) name, CRYPTODEV_DEQ_CACHE_SZ, numa,
668  RING_F_SC_DEQ | RING_F_SP_ENQ);
669 
670  cet->aad_buf = rte_zmalloc_socket (
672  CLIB_CACHE_LINE_BYTES, numa);
673  if (cet->aad_buf == 0)
674  {
675  error = clib_error_return (0, "Failed to alloc aad buf");
676  goto err_handling;
677  }
678  cet->aad_phy_addr = rte_malloc_virt2iova (cet->aad_buf);
679 
680  cet->ctx =
681  rte_zmalloc_socket (0, max_ctx_size, CLIB_CACHE_LINE_BYTES, numa);
682  if (!cet->ctx)
683  {
684  error = clib_error_return (0, "Failed to alloc raw dp ctx");
685  goto err_handling;
686  }
687 
688  if (cet->cached_frame == 0)
689  {
690  error = clib_error_return (0, "Failed to alloc frame ring %s", name);
691  goto err_handling;
692  }
693 
694  vec_free (name);
695  }
696 
697 /** INDENT-OFF **/
698 #define _(a, b, c, d, e, f, g) \
699  cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD; \
700  cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
701  if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f)) \
702  { \
703  vnet_crypto_register_async_handler ( \
704  vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
705  cryptodev_raw_enq_aead_aad_##f##_enc, cryptodev_raw_dequeue); \
706  vnet_crypto_register_async_handler ( \
707  vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
708  cryptodev_raw_enq_aead_aad_##f##_dec, cryptodev_raw_dequeue); \
709  }
711 #undef _
712 
713 #define _(a, b, c, d, e) \
714  cap_auth_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
715  cap_auth_idx.algo.auth = RTE_CRYPTO_AUTH_##d##_HMAC; \
716  cap_cipher_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
717  cap_cipher_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
718  if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) && \
719  cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1)) \
720  { \
721  vnet_crypto_register_async_handler ( \
722  vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC, \
723  cryptodev_raw_enq_linked_alg_enc, cryptodev_raw_dequeue); \
724  vnet_crypto_register_async_handler ( \
725  vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC, \
726  cryptodev_raw_enq_linked_alg_dec, cryptodev_raw_dequeue); \
727  }
729 #undef _
730 
731  cmt->is_raw_api = 1;
732 
733  return 0;
734 
735 err_handling:
736  vec_foreach (cet, cmt->per_thread_data)
737  {
738  if (cet->cached_frame)
739  rte_ring_free (cet->cached_frame);
740  }
741 
742  return error;
743 }
vlib.h
ipsec.h
VNET_CRYPTO_KEY_OP_DEL
@ VNET_CRYPTO_KEY_OP_DEL
Definition: crypto.h:132
vlib_buffer_t::next_buffer
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
cryptodev_engine_thread_t::inflight
u16 inflight
Definition: cryptodev.h:160
dpdk.h
cryptodev_engine_thread_t::cryptodev_q
u16 cryptodev_q
Definition: cryptodev.h:159
cryptodev_main_t::iova_mode
enum rte_iova_mode iova_mode
Definition: cryptodev.h:168
cryptodev_raw_dequeue
static_always_inline vnet_crypto_async_frame_t * cryptodev_raw_dequeue(vlib_main_t *vm, u32 *nb_elts_processed, u32 *enqueue_thread_idx)
Definition: cryptodev_raw_data_path.c:421
vlib_prefetch_buffer_header
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
frame
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
vlib_buffer_get_current_pa
static uword vlib_buffer_get_current_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:494
vnet_crypto_async_frame_elt_t::integ_length_adj
u16 integ_length_adj
Definition: crypto.h:340
vnet_crypto_async_frame_elt_t::crypto_total_length
u32 crypto_total_length
Definition: crypto.h:336
crypto.h
cryptodev_frame_linked_algs_enqueue
static_always_inline int cryptodev_frame_linked_algs_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type)
Definition: cryptodev_raw_data_path.c:109
cryptodev_post_dequeue
static_always_inline void cryptodev_post_dequeue(void *frame, u32 index, u8 is_op_success)
Definition: cryptodev_raw_data_path.c:404
clib_max
#define clib_max(x, y)
Definition: clib.h:335
vlib_get_buffer
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
f
vlib_frame_t * f
Definition: interface_output.c:1098
pointer_to_uword
static uword pointer_to_uword(const void *p)
Definition: types.h:131
name
string name[64]
Definition: fib.api:25
vlib_get_buffers
vlib_get_buffers(vm, from, b, n_left_from)
CRYPTODEV_OP_TYPE_DECRYPT
@ CRYPTODEV_OP_TYPE_DECRYPT
Definition: cryptodev.h:74
cryptodev_raw_enq_aead_aad_12_enc
static_always_inline int cryptodev_raw_enq_aead_aad_12_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev_raw_data_path.c:595
cryptodev_main_t
Definition: cryptodev.h:163
clib_error_return
#define clib_error_return(e, args...)
Definition: error.h:99
cryptodev_reset_ctx
static_always_inline void cryptodev_reset_ctx(cryptodev_engine_thread_t *cet)
Definition: cryptodev_raw_data_path.c:95
u16
unsigned short u16
Definition: types.h:57
vnet_crypto_async_frame_t
Definition: crypto.h:358
vm
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
CRYPTODEV_OP_TYPE_ENCRYPT
@ CRYPTODEV_OP_TYPE_ENCRYPT
Definition: cryptodev.h:73
vnet_crypto_async_frame_elt_t::crypto_start_offset
i16 crypto_start_offset
Definition: crypto.h:337
vlib_buffer_get_pa
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:488
error
Definition: cJSON.c:88
VNET_CRYPTO_FRAME_STATE_ELT_ERROR
@ VNET_CRYPTO_FRAME_STATE_ELT_ERROR
Definition: crypto.h:355
cryptodev_engine_thread_t
Definition: cryptodev.h:136
key
typedef key
Definition: ipsec_types.api:91
cryptodev_main_t::keys
cryptodev_key_t * keys
Definition: cryptodev.h:166
cryptodev_engine_thread_t::cryptodev_id
u16 cryptodev_id
Definition: cryptodev.h:158
CRYPTODEV_DEQ_CACHE_SZ
#define CRYPTODEV_DEQ_CACHE_SZ
Definition: cryptodev.h:29
cryptodev_raw_enq_aead_aad_8_dec
static_always_inline int cryptodev_raw_enq_aead_aad_8_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev_raw_data_path.c:602
cryptodev_engine_thread_t::reset_sess
struct rte_cryptodev_sym_session * reset_sess
Definition: cryptodev.h:155
cryptodev_frame_build_sgl
static_always_inline int cryptodev_frame_build_sgl(vlib_main_t *vm, enum rte_iova_mode iova_mode, struct rte_crypto_vec *data_vec, u16 *n_seg, vlib_buffer_t *b, u32 size)
Definition: cryptodev_raw_data_path.c:64
cryptodev_raw_aead_enqueue
static_always_inline int cryptodev_raw_aead_enqueue(vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_op_type_t op_type, u8 aad_len)
Definition: cryptodev_raw_data_path.c:241
i16
signed short i16
Definition: types.h:46
vlib_buffer_t::current_data
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:119
cryptodev_main
cryptodev_main_t cryptodev_main
Definition: cryptodev.c:43
vlib_physmem_get_pa
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
Definition: physmem_funcs.h:103
len
u8 len
Definition: ip_types.api:103
cryptodev_main_t::is_raw_api
u8 is_raw_api
Definition: cryptodev.h:173
vnet_crypto_async_frame_elt_t::key_index
u32 key_index
Definition: crypto.h:335
vec_elt_at_index
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
Definition: vec_bootstrap.h:203
error_exit
static void error_exit(int code)
Definition: error.c:88
PREDICT_FALSE
#define PREDICT_FALSE(x)
Definition: clib.h:124
cryptodev_key_t
Definition: cryptodev.h:79
cryptodev_main_t::cryptodev_inst
cryptodev_inst_t * cryptodev_inst
Definition: cryptodev.h:169
VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
Definition: crypto.h:264
static_always_inline
#define static_always_inline
Definition: clib.h:112
GET_RING_OBJ
#define GET_RING_OBJ(r, pos, f)
Definition: cryptodev_raw_data_path.c:412
cryptodev_main_t::per_thread_data
cryptodev_engine_thread_t * per_thread_data
Definition: cryptodev.h:167
buffer.h
cryptodev_raw_enq_aead_aad_8_enc
static_always_inline int cryptodev_raw_enq_aead_aad_8_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev_raw_data_path.c:589
vlib_main_t::thread_index
u32 thread_index
Definition: main.h:215
cryptodev_sess_handler
void cryptodev_sess_handler(vlib_main_t *vm, vnet_crypto_key_op_t kop, vnet_crypto_key_index_t idx, u32 aad_len)
Definition: cryptodev.c:272
cryptodev_engine_thread_t::aad_buf
u8 * aad_buf
Definition: cryptodev.h:153
vnet_crypto_async_frame_elt_t::flags
u8 flags
share same VNET_CRYPTO_OP_FLAG_* values
Definition: crypto.h:342
clib_min
#define clib_min(x, y)
Definition: clib.h:342
vlib_buffer_chain_linearize
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:1471
cryptodev_mark_frame_err_status
static_always_inline void cryptodev_mark_frame_err_status(vnet_crypto_async_frame_t *f, vnet_crypto_op_status_t s)
Definition: cryptodev.h:179
CLIB_CACHE_LINE_BYTES
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:58
cryptodev_raw_enq_aead_aad_12_dec
static_always_inline int cryptodev_raw_enq_aead_aad_12_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev_raw_data_path.c:608
vlib_buffer_t::current_length
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
CRYPTODEV_NB_CRYPTO_OPS
#define CRYPTODEV_NB_CRYPTO_OPS
Definition: cryptodev.h:24
plugin.h
data
u8 data[128]
Definition: ipsec_types.api:95
vlib_main_t::numa_node
u32 numa_node
Definition: main.h:217
cryptodev_op_type_t
cryptodev_op_type_t
Definition: cryptodev.h:71
cryptodev_inst_t::dev_id
u32 dev_id
Definition: cryptodev.h:114
vnet_crypto_async_frame_t::n_elts
u16 n_elts
Definition: crypto.h:363
vec_free
#define vec_free(V)
Free vector's memory (no header).
Definition: vec.h:395
cryptodev_inst_t
Definition: cryptodev.h:112
size
u32 size
Definition: vhost_user.h:125
index
u32 index
Definition: flow_types.api:221
vnet_crypto_async_frame_elt_t::aad
u8 * aad
Definition: crypto.h:334
cryptodev_register_raw_hdl
clib_error_t * cryptodev_register_raw_hdl(vlib_main_t *vm, u32 eidx)
Definition: cryptodev_raw_data_path.c:631
cryptodev_engine_thread_t::aad_index
u16 aad_index
Definition: cryptodev.h:152
u64
unsigned long u64
Definition: types.h:89
format
description fragment has unexpected format
Definition: map.api:433
ASSERT
#define ASSERT(truth)
Definition: error_bootstrap.h:69
u32
unsigned int u32
Definition: types.h:88
clib_prefetch_load
static_always_inline void clib_prefetch_load(void *p)
Definition: cache.h:92
VNET_CRYPTO_FRAME_STATE_SUCCESS
@ VNET_CRYPTO_FRAME_STATE_SUCCESS
Definition: crypto.h:354
vec_foreach
#define vec_foreach(var, vec)
Vector iterator.
Definition: vec_bootstrap.h:213
n_left
u32 n_left
Definition: interface_output.c:1096
vlib_get_main_by_index
static vlib_main_t * vlib_get_main_by_index(u32 thread_index)
Definition: global_funcs.h:29
foreach_vnet_aead_crypto_conversion
#define foreach_vnet_aead_crypto_conversion
Definition: cryptodev.h:40
vnet_crypto_async_frame_elt_t::integ_start_offset
i16 integ_start_offset
Definition: crypto.h:338
CRYPTODEV_MAX_AAD_SIZE
#define CRYPTODEV_MAX_AAD_SIZE
Definition: cryptodev.h:32
cryptodev_raw_enq_linked_alg_dec
static_always_inline int cryptodev_raw_enq_linked_alg_dec(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev_raw_data_path.c:623
cryptodev_engine_thread_t::b
vlib_buffer_t * b[VNET_CRYPTO_FRAME_SIZE]
Definition: cryptodev.h:139
vlib_main_t
Definition: main.h:102
cryptodev_engine_thread_t::ctx
struct rte_crypto_raw_dp_ctx * ctx
Definition: cryptodev.h:150
b
vlib_buffer_t ** b
Definition: nat44_ei_out2in.c:717
u8
unsigned char u8
Definition: types.h:56
clib_error_t
Definition: clib_error.h:21
cryptodev.h
cryptodev_engine_thread_t::cached_frame
struct rte_ring * cached_frame
Definition: cryptodev.h:151
vlib_buffer_get_current
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
vnet_crypto_async_frame_elt_t::tag
u8 * tag
Definition: crypto.h:332
i
int i
Definition: flowhash_template.h:376
vnet_crypto_async_frame_elt_t::iv
u8 * iv
Definition: crypto.h:328
CRYPTODEV_MAX_INFLIGHT
#define CRYPTODEV_MAX_INFLIGHT
Definition: cryptodev.h:27
CRYPTODEV_AAD_MASK
#define CRYPTODEV_AAD_MASK
Definition: cryptodev.h:28
CRYPTODEV_MAX_N_SGL
#define CRYPTODEV_MAX_N_SGL
maximum number of segments
Definition: cryptodev.h:33
cryptodev_register_cop_hdl
clib_error_t * cryptodev_register_cop_hdl(vlib_main_t *vm, u32 eidx)
Definition: cryptodev_op_data_path.c:509
cryptodev_get_frame_n_elts
static_always_inline u32 cryptodev_get_frame_n_elts(void *frame)
Definition: cryptodev_raw_data_path.c:397
cryptodev_engine_thread_t::aad_phy_addr
u64 aad_phy_addr
Definition: cryptodev.h:154
vnet_crypto_async_frame_elt_t
Definition: crypto.h:326
compute_ofs_linked_alg
static_always_inline u64 compute_ofs_linked_alg(vnet_crypto_async_frame_elt_t *fe, i16 *min_ofs, u32 *max_end)
Definition: cryptodev_raw_data_path.c:44
foreach_cryptodev_link_async_alg
#define foreach_cryptodev_link_async_alg
crypto (alg, cryptodev_alg, key_size), hash (alg, digest-size)
Definition: cryptodev.h:51
cryptodev_session_create
int cryptodev_session_create(vlib_main_t *vm, vnet_crypto_key_index_t idx, u32 aad_len)
Definition: cryptodev.c:326
dpdk_priv.h
vlib_buffer_t::flags
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
Definition: buffer.h:133
vnet_crypto_async_frame_t::enqueue_thread_index
u32 enqueue_thread_index
Definition: crypto.h:367
vlib_buffer_t
VLIB buffer representation.
Definition: buffer.h:111
cryptodev_raw_enq_linked_alg_enc
static_always_inline int cryptodev_raw_enq_linked_alg_enc(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
Definition: cryptodev_raw_data_path.c:615
vnet_crypto_async_frame_elt_t::digest
u8 * digest
Definition: crypto.h:331