|
FD.io VPP
v21.10.1-2-g0a485f517
Vector Packet Processing
|
Go to the documentation of this file.
43 }) ip4_and_esp_header_t;
51 }) ip4_and_udp_and_esp_header_t;
58 }) ip6_and_esp_header_t;
69 } __clib_packed esp_ctr_nonce_t;
87 #define ESP_SEQ_MAX (4294967295UL)
88 #define ESP_MAX_BLOCK_SIZE (16)
89 #define ESP_MAX_IV_SIZE (16)
90 #define ESP_MAX_ICV_SIZE (32)
129 if (ipsec_sa_is_set_USE_ESN (sa))
132 aad->
data[1] = (
u32) clib_host_to_net_u32 (seq_hi);
163 u32 n_drop =
f->n_elts;
164 u32 *bi =
f->buffer_indices;
226 "Custom meta-data too large for vnet_buffer_opaque_t");
228 #define esp_post_data(b) \
229 ((esp_post_data_t *)((u8 *)((b)->opaque) \
230 + STRUCT_OFFSET_OF (vnet_buffer_opaque_t, unused)))
234 "Custom meta-data too large for vnet_buffer_opaque2_t");
236 #define esp_post_data2(b) \
237 ((esp_decrypt_packet_data2_t *)((u8 *)((b)->opaque2) \
238 + STRUCT_OFFSET_OF (vnet_buffer_opaque2_t, unused)))
u32 data[3]
for GCM: when using ESN it's: SPI, seq-hi, seg-low else SPI, seq-low
typedef CLIB_PACKED(struct { ip4_header_t ip4;esp_header_t esp;}) ip4_and_esp_header_t
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
u32 esp_mpls_tun_post_next
vlib_main_t vlib_node_runtime_t * node
esp_decrypt_packet_data_t decrypt_data
static void esp_set_next_index(vlib_buffer_t *b, vlib_node_runtime_t *node, u32 err, u16 index, u16 *nexts, u16 drop_next)
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
AES GCM Additional Authentication data.
STATIC_ASSERT(sizeof(esp_post_data_t)<=STRUCT_SIZE_OF(vnet_buffer_opaque_t, unused), "Custom meta-data too large for vnet_buffer_opaque_t")
enum ipsec_sad_flags_t_ ipsec_sa_flags_t
esp_async_post_next_t esp_encrypt_async_next
vlib_error_t error
Error code for buffers to be enqueued to error handler.
static_always_inline void vnet_crypto_async_reset_frame(vnet_crypto_async_frame_t *f)
struct esp_aead_t_ esp_aead_t
AES GCM Additional Authentication data.
STATIC_ASSERT_SIZEOF(esp_ctr_nonce_t, 16)
STATIC_ASSERT_OFFSET_OF(esp_decrypt_packet_data_t, seq, sizeof(u64))
static u16 esp_aad_fill(u8 *data, const esp_header_t *esp, const ipsec_sa_t *sa, u32 seq_hi)
u16 nexts[VLIB_FRAME_SIZE]
u8 * format_esp_header(u8 *s, va_list *args)
static u32 esp_async_recycle_failed_submit(vlib_main_t *vm, vnet_crypto_async_frame_t *f, vlib_node_runtime_t *node, u32 err, u16 index, u32 *from, u16 *nexts, u16 drop_next_index)
The post data structure to for esp_encrypt/decrypt_inline to write to vib_buffer_t opaque unused fiel...
esp_async_post_next_t esp_decrypt_async_next
#define STRUCT_SIZE_OF(t, f)
static int esp_seq_advance(ipsec_sa_t *sa)
VLIB buffer representation.