40 s =
format(s,
"IPv%s offset: %u mtu: %u fragments: %u",
52 u16 mtu, ptr, len, max, rem,
53 offset, ip_frag_id, ip_frag_offset;
63 rem = clib_net_to_host_u16(ip4->
length) -
sizeof(*ip4);
65 max = (mtu -
sizeof(*ip4) -
vnet_buffer(p)->ip_frag.header_offset) & ~0x7;
68 *error = IP_FRAG_ERROR_MALFORMED;
72 if (mtu <
sizeof(*ip4)) {
73 *error = IP_FRAG_ERROR_CANT_FRAGMENT_HEADER;
79 *error = IP_FRAG_ERROR_DONT_FRAGMENT_SET;
99 len = (rem > (mtu -
sizeof(*ip4) -
vnet_buffer(p)->ip_frag.header_offset)) ? max : rem;
107 *error = IP_FRAG_ERROR_MEMORY;
120 packet + offset +
sizeof(*fip4) + ptr, len);
124 fip4->fragment_id = ip_frag_id;
125 fip4->flags_and_fragment_offset = clib_host_to_net_u16((ptr >> 3) + ip_frag_offset);
126 fip4->flags_and_fragment_offset |= clib_host_to_net_u16(((len != rem) || more) << 13);
129 fip4->length = clib_host_to_net_u16(len +
sizeof(*fip4));
162 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
167 u32 frag_sent = 0, small_packets = 0;
170 while (n_left_from > 0) {
173 while (n_left_from > 0 && n_left_to_next > 0) {
174 u32 pi0, *frag_from, frag_left;
185 error0 = IP_FRAG_ERROR_NONE;
199 if (error0 == IP_FRAG_ERROR_DONT_FRAGMENT_SET) {
201 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
208 if (error0 == IP_FRAG_ERROR_NONE) {
210 small_packets += (
vec_len(buffer) == 1);
218 while (frag_left > 0) {
219 while (frag_left > 0 && n_left_to_next > 0) {
221 i = to_next[0] = frag_from[0];
229 to_next, n_left_to_next, i,
253 ip6_frag_hdr_t *frag_hdr;
254 u8 *payload, *next_header;
260 payload = (
u8 *)(ip6_hdr + 1);
262 if (*next_header == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) {
263 next_header = payload;
264 payload += payload[1] * 8;
267 if (*next_header == IP_PROTOCOL_IP6_DESTINATION_OPTIONS) {
268 next_header = payload;
269 payload += payload[1] * 8;
272 if (*next_header == IP_PROTOCOL_IPV6_ROUTE) {
273 next_header = payload;
274 payload += payload[1] * 8;
279 if (*next_header == IP_PROTOCOL_IPV6_FRAGMENTATION) {
281 frag_hdr = (ip6_frag_hdr_t *)payload;
286 u8 nh = *next_header;
287 *next_header = IP_PROTOCOL_IPV6_FRAGMENTATION;
290 memmove(start, start +
sizeof(*frag_hdr), payload - (start +
sizeof(*frag_hdr)));
291 frag_hdr = (ip6_frag_hdr_t *)(payload -
sizeof(*frag_hdr));
293 frag_hdr->next_hdr = nh;
298 payload = (
u8 *)(frag_hdr + 1);
305 if(max_payload < 8) {
306 *error = IP_FRAG_ERROR_CANT_FRAGMENT_HEADER;
313 u16 len = (rem > max_payload)?(max_payload & ~0x7):rem;
318 *error = IP_FRAG_ERROR_MEMORY;
360 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
365 u32 frag_sent = 0, small_packets = 0;
368 while (n_left_from > 0) {
371 while (n_left_from > 0 && n_left_to_next > 0) {
372 u32 pi0, *frag_from, frag_left;
380 error0 = IP_FRAG_ERROR_NONE;
396 small_packets += (
vec_len(buffer) == 1);
401 while (frag_left > 0) {
402 while (frag_left > 0 && n_left_to_next > 0) {
404 i = to_next[0] = frag_from[0];
412 to_next, n_left_to_next, i,
430 #define _(sym,string) string, 438 .vector_size =
sizeof (
u32),
457 .vector_size =
sizeof (
u32),
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
always_inline void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
sll srl srl sll sra u16x4 i
#define foreach_ip_frag_error
void ip_frag_set_vnet_buffer(vlib_buffer_t *b, u16 offset, u16 mtu, u8 next_index, u8 flags)
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
#define IP6_FRAG_NODE_NAME
always_inline int ip4_get_fragment_offset(ip4_header_t *i)
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
always_inline int ip4_is_fragment(ip4_header_t *i)
always_inline void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
static u8 * format_ip_frag_trace(u8 *s, va_list *args)
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static u32 running_fragment_id
static void ip6_frag_do_fragment(vlib_main_t *vm, u32 pi, u32 **buffer, ip_frag_error_t *error)
static uword ip4_frag(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
always_inline void * vlib_frame_vector_args(vlib_frame_t *f)
#define ip6_frag_hdr_more(hdr)
u16 current_length
Nbytes between current data and the end of this buffer.
#define IP4_FRAG_NODE_NAME
always_inline void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
vlib_node_registration_t ip6_frag_node
(constructor) VLIB_REGISTER_NODE (ip6_frag_node)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
always_inline u16 ip4_header_checksum(ip4_header_t *i)
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
#define vec_free(V)
Free vector's memory (no header).
void icmp4_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
#define clib_memcpy(a, b, c)
#define ip6_frag_hdr_offset(hdr)
#define IP_FRAG_FLAG_IP6_HEADER
#define IP_FRAG_FLAG_IP4_HEADER
u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
#define VLIB_BUFFER_IS_TRACED
vlib_node_registration_t ip4_frag_node
(constructor) VLIB_REGISTER_NODE (ip4_frag_node)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
always_inline void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
static char * ip4_frag_error_strings[]
always_inline void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
#define VLIB_REGISTER_NODE(x,...)
always_inline vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
static void ip4_frag_do_fragment(vlib_main_t *vm, u32 pi, u32 **buffer, ip_frag_error_t *error)
static uword ip6_frag(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
#define IP4_HEADER_FLAG_DONT_FRAGMENT
#define ip6_frag_hdr_offset_and_more(offset, more)
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
always_inline vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.