26 #define foreach_ah_encrypt_next \ 27 _ (DROP, "error-drop") \ 28 _ (HANDOFF, "handoff") \ 29 _ (INTERFACE_OUTPUT, "interface-output") 32 #define _(v, s) AH_ENCRYPT_NEXT_##v, 40 #define foreach_ah_encrypt_error \ 41 _(RX_PKTS, "AH pkts received") \ 42 _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \ 43 _(SEQ_CYCLED, "sequence number cycled") 48 #define _(sym,str) AH_ENCRYPT_ERROR_##sym, 55 #define _(sym,string) string, 77 s =
format (s,
"ah: sa-index %d spi %u (0x%08x) seq %u:%u integrity %U",
99 if (op->
status != VNET_CRYPTO_OP_STATUS_COMPLETED)
102 b[bi]->
error = node->
errors[AH_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
103 nexts[bi] = AH_ENCRYPT_NEXT_DROP;
136 u32 n_left, *from, thread_index;
147 ip4_and_ah_header_t *ih0, *oh0 = 0;
148 ip6_and_ah_header_t *ih6_0, *oh6_0 = 0;
149 u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
152 .protocol = IP_PROTOCOL_IPSEC_AH,
156 .protocol = IP_PROTOCOL_IPSEC_AH,
169 if (
vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
171 if (current_sa_index != ~0)
176 current_sa_index =
vnet_buffer (b[0])->ipsec.sad_index;
179 current_sa_bytes = current_sa_pkts = 0;
183 next[0] = AH_ENCRYPT_NEXT_DROP;
196 next[0] = AH_ENCRYPT_NEXT_HANDOFF;
202 b[0]->
error = node->
errors[AH_ENCRYPT_ERROR_SEQ_CYCLED];
207 current_sa_pkts += 1;
212 pd->
ttl = ih0->ip4.ttl;
213 pd->
tos = ih0->ip4.tos;
218 adv = -
sizeof (ip6_and_ah_header_t);
220 adv = -
sizeof (ip4_and_ah_header_t);
236 u8 *l2_hdr_out = l2_hdr_in + adv - icv_size;
245 ih6_0 = (ip6_and_ah_header_t *) ih0;
252 ih6_0->ip6.ip_version_traffic_class_and_flow_label;
255 next_hdr_type = IP_PROTOCOL_IPV6;
259 next_hdr_type = ih6_0->ip6.protocol;
264 oh6_0->ah.reserved = 0;
265 oh6_0->ah.nexthdr = next_hdr_type;
266 oh6_0->ah.spi = clib_net_to_host_u32 (sa0->
spi);
267 oh6_0->ah.seq_no = clib_net_to_host_u32 (sa0->
seq);
268 oh6_0->ip6.payload_length =
272 (
sizeof (
ah_header_t) + icv_size + padding_len) / 4 - 2;
278 clib_memset (oh0, 0,
sizeof (ip4_and_ah_header_t));
283 next_hdr_type = IP_PROTOCOL_IP_IN_IP;
287 next_hdr_type = ih0->ip4.protocol;
297 oh0->ah.spi = clib_net_to_host_u32 (sa0->
spi);
298 oh0->ah.seq_no = clib_net_to_host_u32 (sa0->
seq);
299 oh0->ah.nexthdr = next_hdr_type;
301 (
sizeof (
ah_header_t) + icv_size + padding_len) / 4 - 2;
304 if (
PREDICT_TRUE (!is_ip6 && ipsec_sa_is_set_IS_TUNNEL (sa0) &&
305 !ipsec_sa_is_set_IS_TUNNEL_V6 (sa0)))
314 else if (is_ip6 && ipsec_sa_is_set_IS_TUNNEL (sa0) &&
315 ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
319 sizeof (ip6_address_t) * 2);
337 if (ipsec_sa_is_set_USE_ESN (sa0))
339 u32 seq_hi = clib_host_to_net_u32 (sa0->
seq_hi);
341 op->
len +=
sizeof (seq_hi);
347 if (!ipsec_sa_is_set_IS_TUNNEL (sa0))
349 next[0] = AH_ENCRYPT_NEXT_INTERFACE_OUTPUT;
366 AH_ENCRYPT_ERROR_RX_PKTS, n_left);
368 current_sa_index, current_sa_pkts,
382 oh6_0->ip6.ip_version_traffic_class_and_flow_label =
388 oh0->ip4.ttl = pd->
ttl;
389 oh0->ip4.tos = pd->
tos;
427 .name =
"ah4-encrypt",
428 .vector_size =
sizeof (
u32),
437 [AH_ENCRYPT_NEXT_DROP] =
"ip4-drop",
438 [AH_ENCRYPT_NEXT_HANDOFF] =
"ah4-encrypt-handoff",
439 [AH_ENCRYPT_NEXT_INTERFACE_OUTPUT] =
"interface-output",
453 .name =
"ah6-encrypt",
454 .vector_size =
sizeof (
u32),
463 [AH_ENCRYPT_NEXT_DROP] =
"ip6-drop",
464 [AH_ENCRYPT_NEXT_HANDOFF] =
"ah6-encrypt-handoff",
465 [AH_ENCRYPT_NEXT_INTERFACE_OUTPUT] =
"interface-output",
u32 vnet_crypto_process_ops(vlib_main_t *vm, vnet_crypto_op_t ops[], u32 n_ops)
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
ipsec_per_thread_data_t * ptd
vnet_crypto_op_t * integ_ops
vl_api_wireguard_peer_flags_t flags
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
#define clib_memcpy_fast(a, b, c)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static u32 ipsec_sa_assign_thread(u32 thread_id)
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
ipsec_integ_alg_t integ_alg
u16 current_length
Nbytes between current data and the end of this buffer.
vnet_crypto_op_t * crypto_ops
vlib_node_registration_t ah6_encrypt_node
(constructor) VLIB_REGISTER_NODE (ah6_encrypt_node)
#define VLIB_NODE_FN(node)
vlib_error_t * errors
Vector of errors for this node.
vnet_crypto_op_id_t integ_op_id
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
static uword ah_encrypt_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_ip6)
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define clib_memcpy(d, s, n)
static int esp_seq_advance(ipsec_sa_t *sa)
#define static_always_inline
static_always_inline void vnet_crypto_op_init(vnet_crypto_op_t *op, vnet_crypto_op_id_t type)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
ipsec_integ_alg_t integ_alg
static u8 ah_calc_icv_padding_len(u8 icv_size, int is_ipv6)
vl_api_fib_path_type_t type
vlib_error_t error
Error code for buffers to be enqueued to error handler.
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
vlib_node_registration_t ah4_encrypt_node
(constructor) VLIB_REGISTER_NODE (ah4_encrypt_node)
u32 node_index
Node index.
u32 ip_version_traffic_class_and_flow_label
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
static u8 * format_ah_encrypt_trace(u8 *s, va_list *args)
#define VLIB_REGISTER_NODE(x,...)
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
static char * ah_encrypt_error_strings[]
vlib_main_t vlib_node_runtime_t * node
#define clib_atomic_cmp_and_swap(addr, old, new)
vlib_combined_counter_main_t ipsec_sa_counters
SA packet & bytes counters.
static_always_inline void ah_process_ops(vlib_main_t *vm, vlib_node_runtime_t *node, vnet_crypto_op_t *ops, vlib_buffer_t *b[], u16 *nexts)
#define foreach_ah_encrypt_error
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
vnet_crypto_key_index_t integ_key_index
index_t dpoi_index
the index of objects of that type
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
vnet_crypto_op_status_t status
#define foreach_ah_encrypt_next
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
u16 dpoi_next_node
The next VLIB node to follow.
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
#define CLIB_CACHE_LINE_BYTES
static u16 ip4_header_checksum(ip4_header_t *i)
static_always_inline void clib_memcpy_le32(u8 *dst, u8 *src, u8 len)