|
FD.io VPP
v21.10.1-2-g0a485f517
Vector Packet Processing
|
Go to the documentation of this file.
29 #define foreach_esp_encrypt_next \
30 _ (DROP4, "ip4-drop") \
31 _ (DROP6, "ip6-drop") \
32 _ (DROP_MPLS, "mpls-drop") \
33 _ (HANDOFF4, "handoff4") \
34 _ (HANDOFF6, "handoff6") \
35 _ (HANDOFF_MPLS, "handoff-mpls") \
36 _ (INTERFACE_OUTPUT, "interface-output")
38 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
46 #define foreach_esp_encrypt_error \
47 _ (RX_PKTS, "ESP pkts received") \
48 _ (POST_RX_PKTS, "ESP-post pkts received") \
49 _ (HANDOFF, "Hand-off") \
50 _ (SEQ_CYCLED, "sequence number cycled (packet dropped)") \
51 _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
52 _ (CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)") \
53 _ (NO_BUFFERS, "no buffers (packet dropped)")
57 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
64 #define _(sym,string) string,
95 "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
99 t->
udp_encap ?
" udp-encap-enabled" :
"");
118 u16 buffer_data_size,
uword total_len)
121 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
122 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00,
127 u8 pad_bytes = new_length - min_length;
129 last[0]->current_length + pad_bytes);
132 if (
last[0]->current_data +
last[0]->current_length + tail_sz >
140 last[0]->next_buffer = tmp_bi;
141 last[0]->flags |= VLIB_BUFFER_NEXT_PRESENT;
143 tmp->current_length += tail_sz;
147 last[0]->current_length += tail_sz;
149 f->pad_length = pad_bytes;
157 return &
f->next_header;
166 len = clib_net_to_host_u16 (
len);
167 old_len =
ip4->length;
171 u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
175 ip4->protocol = prot;
190 udp->
length = clib_net_to_host_u16 (
len);
196 #ifdef CLIB_HAVE_VEC128
197 static const u8x16 ext_hdr_types = {
198 IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
199 IP_PROTOCOL_IPV6_ROUTE,
200 IP_PROTOCOL_IPV6_FRAGMENTATION,
203 return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
205 return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
206 (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
207 (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
226 p = (
void *) (
ip6 + 1);
255 ASSERT (op - ops < n_ops);
257 if (op->
status != VNET_CRYPTO_OP_STATUS_COMPLETED)
260 b[bi]->
error =
node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
261 nexts[bi] = drop_next;
283 ASSERT (op - ops < n_ops);
285 if (op->
status != VNET_CRYPTO_OP_STATUS_COMPLETED)
288 b[bi]->
error =
node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
289 nexts[bi] = drop_next;
300 u32 start_len,
u16 * n_ch)
307 total_len = ch->
len = start_len;
308 ch->
src = ch->
dst = start;
321 if (!(cb->
flags & VLIB_BUFFER_NEXT_PRESENT))
337 u32 start_len,
u8 * digest,
u16 * n_ch)
344 total_len = ch->
len = start_len;
355 if (ipsec_sa_is_set_USE_ESN (sa0))
357 u32 seq_hi = clib_net_to_host_u32 (sa0->
seq_hi);
359 ch->
len +=
sizeof (seq_hi);
360 total_len +=
sizeof (seq_hi);
367 if (!(cb->
flags & VLIB_BUFFER_NEXT_PRESENT))
383 u8 *payload,
u16 payload_len,
u8 iv_sz,
u8 icv_sz,
u32 bi,
393 op->
src = op->
dst = payload;
395 op->
len = payload_len - icv_sz;
398 if (ipsec_sa_is_set_IS_CTR (sa0))
402 esp_ctr_nonce_t *nonce =
403 (esp_ctr_nonce_t *) (payload -
sizeof (
u64) - hdr_len -
405 u64 *pkt_iv = (
u64 *) (payload -
sizeof (
u64));
407 if (ipsec_sa_is_set_IS_AEAD (sa0))
412 op->
tag = payload + op->
len;
417 nonce->ctr = clib_host_to_net_u32 (1);
420 nonce->salt = sa0->
salt;
421 nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa0->
ctr_iv_counter++);
422 op->
iv = (
u8 *) nonce;
426 op->
iv = payload - iv_sz;
447 op->
digest = payload + payload_len - icv_sz;
462 payload_len + iv_sz +
466 else if (ipsec_sa_is_set_USE_ESN (sa0))
468 u32 tmp = clib_net_to_host_u32 (seq_hi);
470 op->
len +=
sizeof (seq_hi);
479 u8 *payload,
u32 payload_len,
u8 iv_sz,
u8 icv_sz,
484 u8 *tag, *
iv, *aad = 0;
487 i16 crypto_start_offset, integ_start_offset = 0;
488 u16 crypto_total_len, integ_total_len;
493 crypto_start_offset = payload -
b->
data;
494 crypto_total_len = integ_total_len = payload_len - icv_sz;
495 tag = payload + crypto_total_len;
499 if (ipsec_sa_is_set_IS_CTR (sa))
503 esp_ctr_nonce_t *nonce = (esp_ctr_nonce_t *) (payload -
sizeof (
u64) -
504 hdr_len -
sizeof (*nonce));
505 u64 *pkt_iv = (
u64 *) (payload -
sizeof (
u64));
507 if (ipsec_sa_is_set_IS_AEAD (sa))
516 nonce->ctr = clib_host_to_net_u32 (1);
519 nonce->salt = sa->
salt;
520 nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->
ctr_iv_counter++);
525 iv = payload - iv_sz;
535 payload, payload_len, 0);
540 integ_start_offset = crypto_start_offset - iv_sz -
sizeof (
esp_header_t);
546 vm, ptd, sa,
b, lb, icv_sz,
550 else if (ipsec_sa_is_set_USE_ESN (sa))
552 u32 seq_hi = clib_net_to_host_u32 (sa->
seq_hi);
554 integ_total_len +=
sizeof (seq_hi);
560 integ_total_len - crypto_total_len,
561 crypto_start_offset, integ_start_offset, bi,
562 async_next,
iv, tag, aad, flag);
577 u32 current_sa_index = ~0, current_sa_packets = 0;
578 u32 current_sa_bytes = 0,
spi = 0;
579 u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
585 int is_async =
im->async_mode;
590 ESP_ENCRYPT_NEXT_DROP_MPLS));
592 ESP_ENCRYPT_NEXT_HANDOFF6 :
594 ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
611 clib_memset (async_frames, 0,
sizeof (async_frames));
618 u8 *payload, *next_hdr_ptr;
619 u16 payload_len, payload_len_total, n_bufs;
622 err = ESP_ENCRYPT_ERROR_RX_PKTS;
647 if (sa_index0 != current_sa_index)
649 if (current_sa_packets)
654 current_sa_packets = current_sa_bytes = 0;
661 current_sa_index = sa_index0;
662 spi = clib_net_to_host_u32 (sa0->
spi);
666 is_async =
im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
681 err = ESP_ENCRYPT_ERROR_HANDOFF;
691 err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
699 while (lb->
flags & VLIB_BUFFER_NEXT_PRESENT)
705 err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
713 if (ipsec_sa_is_set_IS_TUNNEL (sa0))
717 vm, &lb, esp_align, icv_sz,
node, buffer_data_size,
721 err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
726 b[0]->
flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
731 hdr_len +=
sizeof (*esp);
735 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
739 payload_len_total + hdr_len);
743 if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
753 *next_hdr_ptr = IP_PROTOCOL_IPV6;
760 *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
766 *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
774 len = payload_len_total + hdr_len -
len;
775 ip6->payload_length = clib_net_to_host_u16 (
len);
776 b[0]->
flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
788 *next_hdr_ptr = IP_PROTOCOL_IPV6;
795 *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
802 *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
810 len = payload_len_total + hdr_len;
821 sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
822 b[0]->
flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
826 u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
827 ip6_ext_header_t *ext_hdr;
840 vm, &lb, esp_align, icv_sz,
node, buffer_data_size,
844 err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
850 b[0]->
flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
855 hdr_len +=
sizeof (*esp);
859 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
867 ip_hdr = payload - hdr_len;
874 l2_hdr = payload - hdr_len;
887 *next_hdr_ptr =
ip6->protocol;
888 ip6->protocol = IP_PROTOCOL_IPSEC_ESP;
892 *next_hdr_ptr = ext_hdr->next_hdr;
893 ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP;
895 ip6->payload_length =
896 clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len -
903 *next_hdr_ptr =
ip4->protocol;
904 len = payload_len_total + hdr_len - l2_len;
908 udp_len =
len - ip_len;
921 sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
936 esp->
seq = clib_net_to_host_u32 (sa0->
seq);
944 if (NULL == async_frames[async_op] ||
947 async_frames[async_op] =
954 esp, payload, payload_len, iv_sz, icv_sz,
956 async_next_node, lb);
960 payload, payload_len, iv_sz, icv_sz, n_sync,
b,
965 current_sa_packets += 1;
966 current_sa_bytes += payload_len_total;
977 tr->
udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
983 if (ESP_ENCRYPT_ERROR_RX_PKTS != err)
992 sync_bufs[n_sync] =
b[0];
1006 current_sa_index, current_sa_packets,
1013 sync_nexts, ptd->
chunks, drop_next);
1018 sync_nexts, ptd->
chunks, drop_next);
1032 vm, *async_frame,
node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
1033 n_sync, noop_bi, noop_nexts, drop_next);
1045 return frame->n_vectors;
1081 if (
b[0]->
flags & VLIB_BUFFER_IS_TRACED)
1087 if (
b[1]->
flags & VLIB_BUFFER_IS_TRACED)
1093 if (
b[2]->
flags & VLIB_BUFFER_IS_TRACED)
1099 if (
b[3]->
flags & VLIB_BUFFER_IS_TRACED)
1128 ESP_ENCRYPT_ERROR_POST_RX_PKTS,
1131 return frame->n_vectors;
1144 .name =
"esp4-encrypt",
1145 .vector_size =
sizeof (
u32),
1153 .next_nodes = { [ESP_ENCRYPT_NEXT_DROP4] =
"ip4-drop",
1154 [ESP_ENCRYPT_NEXT_DROP6] =
"ip6-drop",
1155 [ESP_ENCRYPT_NEXT_DROP_MPLS] =
"mpls-drop",
1156 [ESP_ENCRYPT_NEXT_HANDOFF4] =
"esp4-encrypt-handoff",
1157 [ESP_ENCRYPT_NEXT_HANDOFF6] =
"esp6-encrypt-handoff",
1158 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] =
"error-drop",
1159 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] =
"interface-output" },
1172 .name =
"esp4-encrypt-post",
1173 .vector_size =
sizeof (
u32),
1176 .sibling_of =
"esp4-encrypt",
1193 .name =
"esp6-encrypt",
1194 .vector_size =
sizeof (
u32),
1197 .sibling_of =
"esp4-encrypt",
1213 .name =
"esp6-encrypt-post",
1214 .vector_size =
sizeof (
u32),
1217 .sibling_of =
"esp4-encrypt",
1234 .name =
"esp4-encrypt-tun",
1235 .vector_size =
sizeof (
u32),
1244 [ESP_ENCRYPT_NEXT_DROP4] =
"ip4-drop",
1245 [ESP_ENCRYPT_NEXT_DROP6] =
"ip6-drop",
1246 [ESP_ENCRYPT_NEXT_DROP_MPLS] =
"mpls-drop",
1247 [ESP_ENCRYPT_NEXT_HANDOFF4] =
"esp4-encrypt-tun-handoff",
1248 [ESP_ENCRYPT_NEXT_HANDOFF6] =
"esp6-encrypt-tun-handoff",
1249 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] =
"esp-mpls-encrypt-tun-handoff",
1250 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] =
"adj-midchain-tx",
1263 .name =
"esp4-encrypt-tun-post",
1264 .vector_size =
sizeof (
u32),
1267 .sibling_of =
"esp4-encrypt-tun",
1284 .name =
"esp6-encrypt-tun",
1285 .vector_size =
sizeof (
u32),
1294 [ESP_ENCRYPT_NEXT_DROP4] =
"ip4-drop",
1295 [ESP_ENCRYPT_NEXT_DROP6] =
"ip6-drop",
1296 [ESP_ENCRYPT_NEXT_DROP_MPLS] =
"mpls-drop",
1297 [ESP_ENCRYPT_NEXT_HANDOFF4] =
"esp4-encrypt-tun-handoff",
1298 [ESP_ENCRYPT_NEXT_HANDOFF6] =
"esp6-encrypt-tun-handoff",
1299 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] =
"esp-mpls-encrypt-tun-handoff",
1300 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] =
"adj-midchain-tx",
1315 .name =
"esp6-encrypt-tun-post",
1316 .vector_size =
sizeof (
u32),
1319 .sibling_of =
"esp-mpls-encrypt-tun",
1334 .name =
"esp-mpls-encrypt-tun",
1335 .vector_size =
sizeof (
u32),
1344 [ESP_ENCRYPT_NEXT_DROP4] =
"ip4-drop",
1345 [ESP_ENCRYPT_NEXT_DROP6] =
"ip6-drop",
1346 [ESP_ENCRYPT_NEXT_DROP_MPLS] =
"mpls-drop",
1347 [ESP_ENCRYPT_NEXT_HANDOFF4] =
"esp4-encrypt-tun-handoff",
1348 [ESP_ENCRYPT_NEXT_HANDOFF6] =
"esp6-encrypt-tun-handoff",
1349 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] =
"esp-mpls-encrypt-tun-handoff",
1350 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] =
"adj-midchain-tx",
1361 .name =
"esp-mpls-encrypt-tun-post",
1362 .vector_size =
sizeof (
u32),
1365 .sibling_of =
"esp-mpls-encrypt-tun",
1400 "Outbound ESP packets received",
1439 return frame->n_vectors;
1452 .name =
"esp4-no-crypto",
1453 .vector_size =
sizeof (
u32),
1473 .name =
"esp6-no-crypto",
1474 .vector_size =
sizeof (
u32),
1485 #ifndef CLIB_MARCH_VARIANT
1492 im->esp4_enc_fq_index =
1494 im->esp6_enc_fq_index =
1496 im->esp4_enc_tun_fq_index =
1498 im->esp6_enc_tun_fq_index =
1500 im->esp_mpls_enc_tun_fq_index =
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static_always_inline u32 esp_encrypt_chain_crypto(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, ipsec_sa_t *sa0, vlib_buffer_t *b, vlib_buffer_t *lb, u8 icv_sz, u8 *start, u32 start_len, u16 *n_ch)
vnet_crypto_op_t * integ_ops
u32 next_buffer
Next buffer for this linked-list of buffers.
static_always_inline void vnet_crypto_async_free_frame(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
u16 dpoi_next_node
The next VLIB node to follow.
static_always_inline void vnet_crypto_op_init(vnet_crypto_op_t *op, vnet_crypto_op_id_t type)
vnet_interface_main_t * im
ipsec_crypto_alg_t crypto_alg
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
vnet_crypto_key_index_t integ_key_index
index_t dpoi_index
the index of objects of that type
static_always_inline u8 ext_hdr_is_pre_esp(u8 nexthdr)
static u8 * format_esp_no_crypto_trace(u8 *s, va_list *args)
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
vlib_node_registration_t esp4_encrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_tun_node)
nat44_ei_hairpin_src_next_t next_index
@ VNET_CRYPTO_ASYNC_OP_N_IDS
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static index_t ipsec_tun_protect_get_sa_out(adj_index_t ai)
vlib_main_t vlib_node_runtime_t * node
vnet_crypto_op_chunk_t * chunks
vnet_crypto_op_status_t status
vlib_get_buffers(vm, from, b, n_left_from)
u32 esp_mpls_tun_post_next
@ VLIB_NODE_TYPE_INTERNAL
#define ip6_ext_header_len(p)
vlib_node_registration_t esp4_no_crypto_tun_node
(constructor) VLIB_REGISTER_NODE (esp4_no_crypto_tun_node)
static char * esp_encrypt_error_strings[]
vlib_node_registration_t esp_mpls_encrypt_tun_post_node
(constructor) VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_post_node)
static_always_inline void tunnel_encap_fixup_mplso6(tunnel_encap_decap_flags_t flags, const vlib_buffer_t *b, const mpls_unicast_header_t *inner, ip6_header_t *outer)
vlib_node_registration_t esp6_no_crypto_tun_node
(constructor) VLIB_REGISTER_NODE (esp6_no_crypto_tun_node)
static_always_inline void esp_process_chained_ops(vlib_main_t *vm, vlib_node_runtime_t *node, vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts, vnet_crypto_op_chunk_t *chunks, u16 drop_next)
static void esp_set_next_index(vlib_buffer_t *b, vlib_node_runtime_t *node, u32 err, u16 index, u16 *nexts, u16 drop_next)
static_always_inline void vnet_crypto_async_add_to_frame(vlib_main_t *vm, vnet_crypto_async_frame_t *f, u32 key_index, u32 crypto_len, i16 integ_len_adj, i16 crypto_start_offset, u16 integ_start_offset, u32 buffer_index, u16 next_node, u8 *iv, u8 *tag, u8 *aad, u8 flags)
static_always_inline void tunnel_encap_fixup_mplso4_w_chksum(tunnel_encap_decap_flags_t flags, const mpls_unicast_header_t *inner, ip4_header_t *outer)
static_always_inline void clib_memcpy_le32(u8 *dst, u8 *src, u8 len)
AES GCM Additional Authentication data.
ipsec_integ_alg_t integ_alg
vlib_main_t vlib_node_runtime_t vlib_frame_t * from_frame
vlib_buffer_enqueue_to_next(vm, node, from,(u16 *) nexts, frame->n_vectors)
#define VNET_CRYPTO_OP_FLAG_INIT_IV
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
static_always_inline int vnet_crypto_async_submit_open_frame(vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
vlib_node_registration_t esp6_encrypt_post_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_post_node)
u32 vlib_frame_queue_main_init(u32 node_index, u32 frame_queue_nelts)
static u8 * format_esp_post_encrypt_trace(u8 *s, va_list *args)
static uword esp_no_crypto_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static uword round_pow2(uword x, uword pow2)
#define CLIB_PREFETCH(addr, size, type)
static_always_inline void vlib_buffer_enqueue_to_single_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 next_index, u32 count)
static_always_inline void tunnel_encap_fixup_6o6(tunnel_encap_decap_flags_t flags, const ip6_header_t *inner, ip6_header_t *outer)
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
u32 vnet_crypto_process_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], u32 n_ops)
esp_async_post_next_t esp_encrypt_async_next
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
@ ESP_NO_CRYPTO_ERROR_RX_PKTS
static_always_inline u8 * esp_add_footer_and_icv(vlib_main_t *vm, vlib_buffer_t **last, u8 esp_align, u8 icv_sz, vlib_node_runtime_t *node, u16 buffer_data_size, uword total_len)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
#define VLIB_NODE_FN(node)
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
static ipsec_sa_t * ipsec_sa_get(u32 sa_index)
#define foreach_esp_encrypt_error
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static_always_inline void esp_update_ip4_hdr(ip4_header_t *ip4, u16 len, int is_transport, int is_udp)
#define VLIB_NODE_FLAG_TRACE
u32 vnet_crypto_process_chained_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], vnet_crypto_op_chunk_t *chunks, u32 n_ops)
vlib_combined_counter_main_t ipsec_sa_counters
SA packet & bytes counters.
vlib_node_registration_t esp4_encrypt_post_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_post_node)
static_always_inline void clib_memcpy_le64(u8 *dst, u8 *src, u8 len)
vnet_crypto_async_frame_t ** async_frames
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
static clib_error_t * esp_encrypt_init(vlib_main_t *vm)
#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS
tunnel_encap_decap_flags_t tunnel_flags
#define static_always_inline
static_always_inline void tunnel_encap_fixup_6o4_w_chksum(tunnel_encap_decap_flags_t flags, const ip6_header_t *inner, ip4_header_t *outer)
static heap_elt_t * last(heap_header_t *h)
static_always_inline void esp_process_ops(vlib_main_t *vm, vlib_node_runtime_t *node, vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts, u16 drop_next)
static char * esp_no_crypto_error_strings[]
static_always_inline void vnet_crypto_async_reset_frame(vnet_crypto_async_frame_t *f)
vlib_node_registration_t esp_mpls_encrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_node)
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
static_always_inline void esp_fill_udp_hdr(ipsec_sa_t *sa, udp_header_t *udp, u16 len)
static_always_inline u8 vnet_crypto_async_frame_is_full(const vnet_crypto_async_frame_t *f)
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
static u32 ipsec_sa_assign_thread(u32 thread_id)
#define CLIB_CACHE_LINE_BYTES
static_always_inline u8 esp_get_ip6_hdr_len(ip6_header_t *ip6, ip6_ext_header_t **ext_hdr)
struct _vlib_node_registration vlib_node_registration_t
ipsec_crypto_alg_t crypto_alg
vnet_crypto_async_op_id_t crypto_async_enc_op_id
u16 current_length
Nbytes between current data and the end of this buffer.
#define ESP_MAX_BLOCK_SIZE
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
#define foreach_esp_encrypt_next
static uword esp_encrypt_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_link_t lt, int is_tun, u16 async_next_node)
description fragment has unexpected format
static u8 * vlib_buffer_get_tail(vlib_buffer_t *b)
Get pointer to the end of buffer's data.
vnet_crypto_op_t * chained_crypto_ops
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
#define VLIB_INIT_FUNCTION(x)
static u8 * format_esp_encrypt_trace(u8 *s, va_list *args)
#define clib_atomic_cmp_and_swap(addr, old, new)
vl_api_ip_proto_t protocol
static_always_inline void clib_prefetch_load(void *p)
static uword esp_encrypt_post_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static u16 esp_aad_fill(u8 *data, const esp_header_t *esp, const ipsec_sa_t *sa, u32 seq_hi)
static_always_inline vnet_crypto_async_frame_t * vnet_crypto_async_get_frame(vlib_main_t *vm, vnet_crypto_async_op_id_t opt)
async crypto inline functions
#define vec_foreach(var, vec)
Vector iterator.
vnet_crypto_op_id_t integ_op_id
vnet_crypto_key_index_t linked_key_index
static_always_inline void esp_prepare_async_frame(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, vnet_crypto_async_frame_t *async_frame, ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp, u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len, u16 async_next, vlib_buffer_t *lb)
vnet_crypto_op_t * chained_integ_ops
vlib_node_registration_t esp6_encrypt_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_node)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
#define ip_csum_update(sum, old, new, type, field)
enum vnet_link_t_ vnet_link_t
Link Type: A description of the protocol of packets on the link.
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
vnet_crypto_async_op_id_t
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
static_always_inline u32 esp_encrypt_chain_integ(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, ipsec_sa_t *sa0, vlib_buffer_t *b, vlib_buffer_t *lb, u8 icv_sz, u8 *start, u32 start_len, u8 *digest, u16 *n_ch)
clib_error_t *() vlib_init_function_t(struct vlib_main_t *vm)
ipsec_integ_alg_t integ_alg
@ ESP_NO_CRYPTO_NEXT_DROP
The identity of a DPO is a combination of its type and its instance number/index of objects of that t...
u16 nexts[VLIB_FRAME_SIZE]
static int ip4_header_bytes(const ip4_header_t *i)
static_always_inline void tunnel_encap_fixup_4o4_w_chksum(tunnel_encap_decap_flags_t flags, const ip4_header_t *inner, ip4_header_t *outer)
static void * ip6_ext_next_header(ip6_ext_header_t *ext_hdr)
static_always_inline void tunnel_encap_fixup_4o6(tunnel_encap_decap_flags_t flags, const vlib_buffer_t *b, const ip4_header_t *inner, ip6_header_t *outer)
static void esp_prepare_sync_op(vlib_main_t *vm, ipsec_per_thread_data_t *ptd, vnet_crypto_op_t **crypto_ops, vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u32 seq_hi, u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi, vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len, esp_header_t *esp)
static u32 esp_async_recycle_failed_submit(vlib_main_t *vm, vnet_crypto_async_frame_t *f, vlib_node_runtime_t *node, u32 err, u16 index, u32 *from, u16 *nexts, u16 drop_next_index)
vlib_node_registration_t esp4_encrypt_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_node)
static u16 ip_csum_fold(ip_csum_t c)
vlib_node_registration_t esp4_encrypt_tun_post_node
(constructor) VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node)
vnet_crypto_op_id_t crypto_enc_op_id
vlib_node_registration_t esp6_encrypt_tun_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_tun_node)
vl_api_fib_path_type_t type
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
vlib_node_registration_t esp6_encrypt_tun_post_node
(constructor) VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
vnet_crypto_key_index_t crypto_key_index
static int esp_seq_advance(ipsec_sa_t *sa)
vnet_crypto_op_t * crypto_ops
VLIB buffer representation.
#define VLIB_REGISTER_NODE(x,...)
vl_api_wireguard_peer_flags_t flags