29 #define foreach_esp_decrypt_next \ 30 _(DROP, "error-drop") \ 31 _(IP4_INPUT, "ip4-input-no-checksum") \ 32 _(IP6_INPUT, "ip6-input") 34 #define _(v, s) ESP_DECRYPT_NEXT_##v, 42 #define foreach_esp_decrypt_error \ 43 _(RX_PKTS, "ESP pkts received") \ 44 _(DECRYPTION_FAILED, "ESP decryption failed") \ 45 _(REPLAY, "SA replayed packet") \ 46 _(NOT_IP, "Not IP packet (dropped)") \ 47 _(ENQ_FAIL, "Enqueue decrypt failed (queue full)") \ 48 _(DISCARD, "Not enough crypto operations") \ 49 _(BAD_LEN, "Invalid ciphertext length") \ 50 _(SESSION, "Failed to get crypto session") \ 51 _(NOSUP, "Cipher/Auth not supported") 56 #define _(sym,str) ESP_DECRYPT_ERROR_##sym, 63 #define _(sym,string) string, 87 s =
format (s,
"cipher %U auth %U\n",
100 u32 n_left_from, *from, *to_next, next_index, thread_index;
107 struct rte_cryptodev_sym_session *session = 0;
108 u32 ret, last_sa_index = ~0;
109 u8 numa = rte_socket_id ();
113 struct rte_crypto_op **ops = cwm->
ops;
124 ESP_DECRYPT_ERROR_DISCARD, n_left_from);
127 ESP_DECRYPT_ERROR_DISCARD, n_left_from);
133 next_index = ESP_DECRYPT_NEXT_DROP;
135 while (n_left_from > 0)
141 while (n_left_from > 0 && n_left_to_next > 0)
144 u32 bi0, sa_index0, iv_size;
148 struct rte_mbuf *mb0;
149 struct rte_crypto_op *op;
167 ASSERT (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED);
175 sizeof (op[0]) +
sizeof (op[0].sym[0]) +
sizeof (priv[0]);
180 thread_index, sa_index0);
182 if (sa_index0 != last_sa_index)
190 is_aead = (cipher_alg->
type == RTE_CRYPTO_SYM_XFORM_AEAD);
192 auth_alg = cipher_alg;
201 ESP_DECRYPT_ERROR_NOSUP, 1);
205 ESP_DECRYPT_ERROR_NOSUP, 1);
219 ESP_DECRYPT_ERROR_SESSION,
224 ESP_DECRYPT_ERROR_SESSION,
232 last_sa_index = sa_index0;
237 (sa0, clib_host_to_net_u32 (esp0->
seq)))
242 ESP_DECRYPT_ERROR_REPLAY, 1);
246 ESP_DECRYPT_ERROR_REPLAY, 1);
254 priv->
next = DPDK_CRYPTO_INPUT_NEXT_DECRYPT6_POST;
257 priv->
next = DPDK_CRYPTO_INPUT_NEXT_DECRYPT4_POST;
258 b0->
flags |= VNET_BUFFER_F_IS_IP4;
273 mb0->data_off = RTE_PKTMBUF_HEADROOM + b0->
current_data;
275 trunc_size = auth_alg->trunc_size;
276 iv_size = cipher_alg->
iv_len;
282 ASSERT (payload_len >= 4);
284 if (payload_len & (cipher_alg->
boundary - 1))
288 ESP_DECRYPT_ERROR_BAD_LEN, 1);
291 ESP_DECRYPT_ERROR_BAD_LEN, 1);
299 u32 cipher_off, cipher_len;
303 u8 *
iv = (
u8 *) (esp0 + 1);
308 cipher_len = payload_len;
312 mb0->buf_physaddr + digest - ((
u8 *) mb0->buf_addr);
314 if (!is_aead && cipher_alg->
alg == RTE_CRYPTO_CIPHER_AES_CBC)
333 _aad[1] = clib_host_to_net_u32 (sa0->
seq_hi);
340 auth_len =
sizeof (
esp_header_t) + iv_size + payload_len;
342 if (ipsec_sa_is_set_USE_ESN (sa0))
345 u32 *_digest = (
u32 *) digest;
346 _digest[0] = clib_host_to_net_u32 (sa0->
seq_hi);
347 auth_len +=
sizeof (sa0->
seq_hi);
351 op->phys_addr + (uintptr_t) priv->
icv - (uintptr_t) op;
356 0, auth_len, aad, digest, digest_paddr);
374 ESP_DECRYPT_ERROR_RX_PKTS,
378 ESP_DECRYPT_ERROR_ENQ_FAIL, numa, 0 );
383 ESP_DECRYPT_ERROR_RX_PKTS,
387 ESP_DECRYPT_ERROR_ENQ_FAIL, numa, 0 );
404 .name =
"dpdk-esp4-decrypt",
405 .vector_size =
sizeof (
u32),
414 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, 430 .name =
"dpdk-esp6-decrypt",
431 .vector_size =
sizeof (
u32),
440 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, 451 #define foreach_esp_decrypt_post_error \ 452 _(PKTS, "ESP post pkts") 456 #define _(sym,str) ESP_DECRYPT_POST_ERROR_##sym, 463 #define _(sym,string) string, 479 s =
format (s,
"cipher %U auth %U\n",
499 u32 n_left_from, *from, *to_next = 0, next_index;
510 while (n_left_from > 0)
516 while (n_left_from > 0 && n_left_to_next > 0)
519 u32 bi0, iv_size, next0;
525 u8 trunc_size, is_aead;
526 u16 udp_encap_adv = 0;
528 next0 = ESP_DECRYPT_NEXT_DROP;
546 is_aead = cipher_alg->
type == RTE_CRYPTO_SYM_XFORM_AEAD;
548 auth_alg = cipher_alg;
552 iv_size = cipher_alg->
iv_len;
555 clib_host_to_net_u32 (esp0->
seq));
558 if (ipsec_sa_is_set_UDP_ENCAP (sa0)
559 && (b0->
flags & VNET_BUFFER_F_IS_IP4))
564 if (b0->
flags & VNET_BUFFER_F_IS_IP4)
572 b0->
flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
582 ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
587 if (ipsec_sa_is_set_IS_TUNNEL (sa0))
590 next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
592 next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
599 ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
604 ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
615 next0 = ESP_DECRYPT_NEXT_IP4_INPUT;
618 memmove (oh4, ih4, ih4_len);
630 next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
633 oh6->payload_length = clib_host_to_net_u16 (len);
641 ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
646 ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
666 to_next, n_left_to_next, bi0,
674 ESP_DECRYPT_POST_ERROR_PKTS,
678 ESP_DECRYPT_POST_ERROR_PKTS,
693 .name =
"dpdk-esp4-decrypt-post",
694 .vector_size =
sizeof (
u32),
703 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, 719 .name =
"dpdk-esp6-decrypt-post",
720 .vector_size =
sizeof (
u32),
729 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static_always_inline void crypto_op_setup(u8 is_aead, struct rte_mbuf *mb0, struct rte_crypto_op *op, void *session, u32 cipher_off, u32 cipher_len, u32 auth_off, u32 auth_len, u8 *aad, u8 *digest, u64 digest_paddr)
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
static u8 * vlib_buffer_get_tail(vlib_buffer_t *b)
Get pointer to the end of buffer's data.
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
static char * esp_decrypt_post_error_strings[]
static u8 * format_esp_decrypt_post_trace(u8 *s, va_list *args)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
#define clib_memcpy_fast(a, b, c)
static_always_inline i32 crypto_alloc_ops(u8 numa, struct rte_crypto_op **ops, u32 n)
ipsec_integ_alg_t integ_alg
u16 current_length
Nbytes between current data and the end of this buffer.
static_always_inline void crypto_set_icb(dpdk_gcm_cnt_blk *icb, u32 salt, u32 seq, u32 seq_hi)
ipsec_crypto_alg_t crypto_alg
#define VLIB_NODE_FN(node)
static char * esp_decrypt_error_strings[]
static_always_inline clib_error_t * crypto_get_session(struct rte_cryptodev_sym_session **session, u32 sa_idx, crypto_resource_t *res, crypto_worker_main_t *cwm, u8 is_outbound)
static_always_inline void crypto_free_ops(u8 numa, struct rte_crypto_op **ops, u32 n)
vlib_node_registration_t dpdk_esp4_decrypt_post_node
(constructor) VLIB_REGISTER_NODE (dpdk_esp4_decrypt_post_node)
#define foreach_esp_decrypt_next
dpdk_crypto_main_t dpdk_crypto_main
#define foreach_esp_decrypt_post_error
static_always_inline void crypto_enqueue_ops(vlib_main_t *vm, crypto_worker_main_t *cwm, u32 node_index, u32 error, u8 numa, u8 encrypt)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static const u8 pad_data[]
vl_api_fib_path_type_t type
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
crypto_alg_t * cipher_algs
static void ipsec_sa_anti_replay_advance(ipsec_sa_t *sa, u32 seq)
#define rte_mbuf_from_vlib_buffer(x)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
vlib_node_registration_t dpdk_esp4_decrypt_node
(constructor) VLIB_REGISTER_NODE (dpdk_esp4_decrypt_node)
static void vlib_prefetch_combined_counter(const vlib_combined_counter_main_t *cm, u32 thread_index, u32 index)
Pre-fetch a per-thread combined counter for the given object index.
vlib_node_registration_t dpdk_esp6_decrypt_post_node
(constructor) VLIB_REGISTER_NODE (dpdk_esp6_decrypt_post_node)
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
#define VLIB_REGISTER_NODE(x,...)
static_always_inline uword vlib_get_thread_index(void)
#define CLIB_PREFETCH(addr, size, type)
static uword dpdk_esp_decrypt_post_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, int is_ip6)
#define clib_warning(format, args...)
static uword dpdk_esp_decrypt_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, int is_ip6)
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
vlib_main_t vlib_node_runtime_t * node
u8 * format_esp_header(u8 *s, va_list *args)
vlib_combined_counter_main_t ipsec_sa_counters
SA packet & bytes counters.
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
crypto_worker_main_t * workers_main
static int ipsec_sa_anti_replay_check(ipsec_sa_t *sa, u32 seq)
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
crypto_resource_t * resource
struct _vlib_node_registration vlib_node_registration_t
vlib_node_registration_t dpdk_esp6_decrypt_node
(constructor) VLIB_REGISTER_NODE (dpdk_esp6_decrypt_node)
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
struct rte_crypto_op ** ops
static u8 * format_esp_decrypt_trace(u8 *s, va_list *args)
enum rte_crypto_sym_xform_type type
static_always_inline u16 get_resource(crypto_worker_main_t *cwm, ipsec_sa_t *sa)
ipsec_crypto_alg_t crypto_alg
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static int ip4_header_bytes(const ip4_header_t *i)
#define CLIB_CACHE_LINE_BYTES
#define foreach_esp_decrypt_error
struct rte_crypto_op * ops[VLIB_FRAME_SIZE]
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static u16 ip4_header_checksum(ip4_header_t *i)
ipsec_integ_alg_t integ_alg
static_always_inline dpdk_op_priv_t * crypto_op_get_priv(struct rte_crypto_op *op)