28 #define foreach_esp_encrypt_next \ 29 _(DROP, "error-drop") \ 30 _(IP4_LOOKUP, "ip4-lookup") \ 31 _(IP6_LOOKUP, "ip6-lookup") \ 32 _(INTERFACE_OUTPUT, "interface-output") 34 #define _(v, s) ESP_ENCRYPT_NEXT_##v, 42 #define foreach_esp_encrypt_error \ 43 _(RX_PKTS, "ESP pkts received") \ 44 _(SEQ_CYCLED, "sequence number cycled (packet dropped)") \ 45 _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \ 46 _(CHAINED_BUFFER, "chained buffers (packet dropped)") \ 47 _(NO_TRAILER_SPACE, "no trailer space (packet dropped)") 51 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym, 58 #define _(sym,string) string, 81 s =
format (s,
"esp: sa-index %d spi %u seq %u crypto %U integrity %U%s",
85 t->
udp_encap ?
" udp-encap-enabled" :
"");
94 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
95 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x00, 0x00,
100 u8 pad_bytes = new_length - min_length;
118 len = clib_net_to_host_u16 (len);
123 u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
142 udp->
length = clib_net_to_host_u16 (len);
148 #ifdef CLIB_HAVE_VEC128 149 static const u8x16 ext_hdr_types = {
150 IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
151 IP_PROTOCOL_IPV6_ROUTE,
152 IP_PROTOCOL_IPV6_FRAGMENTATION,
155 return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
157 return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
158 (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
159 (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
176 p = (
void *) (ip6 + 1);
190 u16 * next,
u16 buffer_data_size)
196 b->
error = node->
errors[ESP_ENCRYPT_ERROR_NO_TRAILER_SPACE];
197 next[0] = ESP_ENCRYPT_NEXT_DROP;
215 ASSERT (op - ops < n_ops);
217 if (op->
status != VNET_CRYPTO_OP_STATUS_COMPLETED)
220 b[bi]->
error = node->
errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
221 nexts[bi] = ESP_ENCRYPT_NEXT_DROP;
240 u32 current_sa_index = ~0, current_sa_packets = 0;
241 u32 current_sa_bytes = 0,
spi = 0;
242 u8 block_sz = 0, iv_sz = 0, icv_sz = 0;
254 u8 *payload, *next_hdr_ptr;
280 if (sa_index0 != current_sa_index)
283 current_sa_index = sa_index0;
285 sa_index0, current_sa_packets,
287 current_sa_packets = current_sa_bytes = 0;
288 spi = clib_net_to_host_u32 (sa0->
spi);
296 b[0]->
error = node->
errors[ESP_ENCRYPT_ERROR_CHAINED_BUFFER];
297 next[0] = ESP_ENCRYPT_NEXT_DROP;
303 b[0]->
error = node->
errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
304 next[0] = ESP_ENCRYPT_NEXT_DROP;
311 if (ipsec_sa_is_set_IS_TUNNEL (sa0))
321 hdr_len +=
sizeof (*esp);
325 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
329 payload_len + hdr_len);
333 if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
340 *next_hdr_ptr = (is_ip6 ?
341 IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
342 len = payload_len + hdr_len -
len;
352 *next_hdr_ptr = (is_ip6 ?
353 IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
354 len = payload_len + hdr_len;
367 u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
384 hdr_len +=
sizeof (*esp);
388 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
396 ip_hdr = payload - hdr_len;
399 l2_len =
vnet_buffer (b[0])->ip.save_rewrite_length;
401 l2_hdr = payload - hdr_len;
411 ip6->
protocol = IP_PROTOCOL_IPSEC_ESP;
413 clib_host_to_net_u16 (payload_len + hdr_len - l2_len -
421 len = payload_len + hdr_len - l2_len;
431 next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
435 esp->
seq = clib_net_to_host_u32 (sa0->
seq);
442 op->
iv = payload - iv_sz;
443 op->
src = op->
dst = payload;
445 op->
len = payload_len - icv_sz;
450 if (ipsec_sa_is_set_IS_AEAD (sa0))
460 op->
tag = payload + op->
len;
471 op->
digest = payload + payload_len - icv_sz;
477 if (ipsec_sa_is_set_USE_ESN (sa0))
479 u32 seq_hi = clib_net_to_host_u32 (sa0->
seq_hi);
481 op->
len +=
sizeof (seq_hi);
487 current_sa_packets += 1;
488 current_sa_bytes += payload_len;
498 tr->
udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
509 current_sa_index, current_sa_packets,
515 ESP_ENCRYPT_ERROR_RX_PKTS, frame->
n_vectors);
530 .name =
"esp4-encrypt",
531 .vector_size =
sizeof (
u32),
540 #define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n, 556 .name =
"esp6-encrypt",
557 .vector_size =
sizeof (
u32),
566 #define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n, 582 .name =
"esp4-encrypt-tun",
583 .vector_size =
sizeof (
u32),
592 [ESP_ENCRYPT_NEXT_DROP] =
"ip4-drop",
598 .arc_name =
"ip4-output",
599 .node_name =
"esp4-encrypt-tun",
613 .name =
"esp6-encrypt-tun",
614 .vector_size =
sizeof (
u32),
623 [ESP_ENCRYPT_NEXT_DROP] =
"ip6-drop",
629 .arc_name =
"ip6-output",
630 .node_name =
"esp6-encrypt-tun",
u32 vnet_crypto_process_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], u32 n_ops)
static_always_inline u8 esp_get_ip6_hdr_len(ip6_header_t *ip6)
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
ipsec_per_thread_data_t * ptd
vnet_crypto_op_t * integ_ops
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
static_always_inline void esp_process_ops(vlib_main_t *vm, vlib_node_runtime_t *node, vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
#define clib_memcpy_fast(a, b, c)
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
ipsec_integ_alg_t integ_alg
u16 current_length
Nbytes between current data and the end of this buffer.
struct esp_aead_t_ esp_aead_t
AES GCM Additional Authentication data.
vnet_crypto_op_t * crypto_ops
static void * ip6_ext_next_header(ip6_ext_header_t *ext_hdr)
static u8 * format_esp_encrypt_trace(u8 *s, va_list *args)
ipsec_integ_alg_t integ_alg
ipsec_crypto_alg_t crypto_alg
#define VLIB_NODE_FN(node)
vlib_error_t * errors
Vector of errors for this node.
vnet_crypto_op_id_t integ_op_id
vlib_node_registration_t esp4_encrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_tun_node)
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static int esp_seq_advance(ipsec_sa_t *sa)
#define static_always_inline
static_always_inline int esp_trailer_icv_overflow(vlib_node_runtime_t *node, vlib_buffer_t *b, u16 *next, u16 buffer_data_size)
#define foreach_esp_encrypt_next
static_always_inline void esp_fill_udp_hdr(ipsec_sa_t *sa, udp_header_t *udp, u16 len)
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
static_always_inline void vnet_crypto_op_init(vnet_crypto_op_t *op, vnet_crypto_op_id_t type)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static_always_inline u8 ext_hdr_is_pre_esp(u8 nexthdr)
static const u8 pad_data[]
vlib_error_t error
Error code for buffers to be enqueued to error handler.
vlib_node_registration_t esp6_encrypt_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_node)
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
static_always_inline void * vnet_feature_next_with_data(u32 *next0, vlib_buffer_t *b0, u32 n_data_bytes)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
vlib_node_registration_t esp6_encrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_tun_node)
static void esp_aad_fill(vnet_crypto_op_t *op, const esp_header_t *esp, const ipsec_sa_t *sa)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
u32 node_index
Node index.
static char * esp_encrypt_error_strings[]
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
#define ip6_ext_header_len(p)
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
#define VLIB_REGISTER_NODE(x,...)
#define VNET_CRYPTO_OP_FLAG_INIT_IV
#define CLIB_PREFETCH(addr, size, type)
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
vlib_node_registration_t esp4_encrypt_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_node)
static uword round_pow2(uword x, uword pow2)
#define ESP_MAX_BLOCK_SIZE
vlib_combined_counter_main_t ipsec_sa_counters
SA packet & bytes counters.
u8 data[IPSEC_KEY_MAX_LEN]
static_always_inline u8 * esp_add_footer_and_icv(vlib_buffer_t *b, u8 block_size, u8 icv_sz)
static_always_inline void esp_update_ip4_hdr(ip4_header_t *ip4, u16 len, int is_transport, int is_udp)
dpo_id_t dpo[IPSEC_N_PROTOCOLS]
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
#define VNET_FEATURES(...)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static_always_inline void clib_memcpy_le64(u8 *dst, u8 *src, u8 len)
index_t dpoi_index
the index of objects of that type
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
#define ip_csum_update(sum, old, new, type, field)
static uword esp_encrypt_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_ip6, int is_tun)
vnet_crypto_op_id_t crypto_enc_op_id
vnet_crypto_op_status_t status
ipsec_crypto_alg_t crypto_alg
u16 dpoi_next_node
The next VLIB node to follow.
static int ip4_header_bytes(const ip4_header_t *i)
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
VNET_FEATURE_INIT(esp4_encrypt_tun_feat_node, static)
#define CLIB_CACHE_LINE_BYTES
#define foreach_esp_encrypt_error
static u16 ip_csum_fold(ip_csum_t c)
static_always_inline void clib_memcpy_le32(u8 *dst, u8 *src, u8 len)