28 #define foreach_ipsec_input_error \ 29 _(RX_PKTS, "IPSEC pkts received") \ 30 _(RX_MATCH_PKTS, "IPSEC pkts matched") 34 #define _(sym,str) IPSEC_INPUT_ERROR_##sym, 41 #define _(sym,string) string, 64 s =
format (s,
"%U: sa_id %u spd %u policy %d spi %u (0x%08x) seq %u",
87 if (ipsec_sa_is_set_IS_TUNNEL (s))
98 if (da < clib_net_to_host_u32 (p->
laddr.
start.ip4.as_u32))
101 if (da > clib_net_to_host_u32 (p->
laddr.
stop.ip4.as_u32))
104 if (sa < clib_net_to_host_u32 (p->
raddr.
start.ip4.as_u32))
107 if (sa > clib_net_to_host_u32 (p->
raddr.
stop.ip4.as_u32))
143 if (ipsec_sa_is_set_IS_TUNNEL (s))
171 u32 n_left_from, *from, next_index, *to_next, thread_index;
173 u32 ipsec_unprocessed = 0;
174 u32 ipsec_matched = 0;
177 n_left_from = from_frame->n_vectors;
180 next_index = node->cached_next_index;
182 while (n_left_from > 0)
188 while (n_left_from > 0 && n_left_to_next > 0)
200 bi0 = to_next[0] = from[0];
207 b0->
flags |= VNET_BUFFER_F_IS_IP4;
208 b0->
flags &= ~VNET_BUFFER_F_IS_IP6;
216 (ip0->
protocol == IP_PROTOCOL_IPSEC_ESP
217 || ip0->
protocol == IP_PROTOCOL_UDP))
221 (
"packet received from %U to %U spi %u size %u spd_id %u",
224 clib_net_to_host_u32 (esp0->
spi),
225 clib_net_to_host_u16 (ip0->
length), spd0->
id);
257 thread_index, pi0, 1,
258 clib_net_to_host_u16 (ip0->length));
279 tr->
proto = ip0->protocol;
282 has_space0 ? clib_net_to_host_u32 (esp0->
spi) : ~0;
284 has_space0 ? clib_net_to_host_u32 (esp0->
seq) : ~0;
289 else if (ip0->
protocol == IP_PROTOCOL_IPSEC_AH)
314 thread_index, pi0, 1,
315 clib_net_to_host_u16 (ip0->length));
334 tr->
proto = ip0->protocol;
336 tr->
spi = has_space0 ? clib_net_to_host_u32 (ah0->
spi) : ~0;
338 has_space0 ? clib_net_to_host_u32 (ah0->
seq_no) : ~0;
345 ipsec_unprocessed += 1;
349 to_next, n_left_to_next, bi0,
356 IPSEC_INPUT_ERROR_RX_PKTS,
357 from_frame->n_vectors - ipsec_unprocessed);
360 IPSEC_INPUT_ERROR_RX_MATCH_PKTS,
362 return from_frame->n_vectors;
368 .name =
"ipsec4-input-feature",
369 .vector_size =
sizeof (
u32),
376 #define _(s,n) [IPSEC_INPUT_NEXT_##s] = n, 390 u32 n_left_from, *from, next_index, *to_next, thread_index;
392 u32 ipsec_unprocessed = 0;
393 u32 ipsec_matched = 0;
396 n_left_from = from_frame->n_vectors;
399 next_index = node->cached_next_index;
401 while (n_left_from > 0)
407 while (n_left_from > 0 && n_left_to_next > 0)
417 u32 header_size =
sizeof (ip0[0]);
419 bi0 = to_next[0] = from[0];
426 b0->
flags |= VNET_BUFFER_F_IS_IP6;
427 b0->
flags &= ~VNET_BUFFER_F_IS_IP4;
440 (
"packet received from %U to %U spi %u size %u spd_id %u",
459 thread_index, pi0, 1,
473 else if (ip0->
protocol == IP_PROTOCOL_IPSEC_AH)
487 thread_index, pi0, 1,
502 ipsec_unprocessed += 1;
515 tr->
spi = clib_net_to_host_u32 (esp0->
spi);
516 tr->
seq = clib_net_to_host_u32 (esp0->
seq);
521 n_left_to_next, bi0, next0);
527 IPSEC_INPUT_ERROR_RX_PKTS,
528 from_frame->n_vectors - ipsec_unprocessed);
531 IPSEC_INPUT_ERROR_RX_MATCH_PKTS,
534 return from_frame->n_vectors;
540 .name =
"ipsec6-input-feature",
541 .vector_size =
sizeof (
u32),
548 #define _(s,n) [IPSEC_INPUT_NEXT_##s] = n,
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
ip46_address_t tunnel_src_addr
u32 ah4_decrypt_next_index
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
ip46_address_range_t laddr
A Secruity Policy Database.
#define VLIB_NODE_FN(node)
u32 esp4_decrypt_next_index
vl_api_fib_path_type_t type
static_always_inline void * vnet_feature_next_with_data(u32 *next0, vlib_buffer_t *b0, u32 n_data_bytes)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
vlib_combined_counter_main_t ipsec_spd_policy_counters
Policy packet & bytes counters.
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
enum ip_protocol ip_protocol_t
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
ip46_address_t tunnel_dst_addr
#define VLIB_REGISTER_NODE(x,...)
#define clib_warning(format, args...)
u32 esp6_decrypt_next_index
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
ipsec_policy_t * policies
u32 * policies[IPSEC_SPD_POLICY_N_TYPES]
vectors for each of the policy types
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
static uword ip6_address_is_equal(const ip6_address_t *a, const ip6_address_t *b)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
struct _vlib_node_registration vlib_node_registration_t
static u8 vlib_buffer_has_space(vlib_buffer_t *b, word l)
Check if there is enough space in buffer to advance.
u32 ah6_decrypt_next_index
ip46_address_range_t raddr
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
u32 id
the User's ID for this policy
#define vec_foreach(var, vec)
Vector iterator.
static int ip4_header_bytes(const ip4_header_t *i)
#define VLIB_NODE_FLAG_TRACE
#define foreach_ipsec_input_next
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.