29 #define foreach_vmxnet3_input_error \ 30 _(BUFFER_ALLOC, "buffer alloc error") \ 31 _(RX_PACKET_NO_SOP, "Rx packet error - no SOP") \ 32 _(RX_PACKET, "Rx packet error") \ 33 _(RX_PACKET_EOP, "Rx packet error found on EOP") \ 34 _(NO_BUFFER, "Rx no buffer error") 38 #define _(f,s) VMXNET3_INPUT_ERROR_##f, 56 rid = rx_comp->index & (0xffffffff >> 6);
92 hb->
flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
93 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
94 VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP4;
101 hb->
flags |= VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
111 hb->
flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
119 hb->
flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
137 l4_hdr_sz =
sizeof (*udp);
141 hb->
flags |= VNET_BUFFER_F_GSO;
150 hb->
flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
151 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
152 VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP6;
164 hb->
flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
172 hb->
flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
190 l4_hdr_sz =
sizeof (*udp);
194 hb->
flags |= VNET_BUFFER_F_GSO;
206 u32 n_rx_packets = 0, n_rx_bytes = 0;
207 vmxnet3_rx_comp *rx_comp;
218 u8 known_next = 0, got_packet = 0;
219 vmxnet3_rx_desc *rxd;
244 VMXNET3_INPUT_ERROR_NO_BUFFER, 1);
256 rxd = &rxq->
rx_desc[rid][desc_idx];
258 bi0 = ring->
bufs[desc_idx];
259 ring->
bufs[desc_idx] = ~0;
279 VMXNET3_INPUT_ERROR_RX_PACKET_EOP, 1);
296 vmxnet3_rx_comp_ext *lro = (vmxnet3_rx_comp_ext *) rx_comp;
305 hb->
flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
322 prev_b0->
flags |= VLIB_BUFFER_NEXT_PRESENT;
324 hb->total_length_not_including_first_buffer +=
333 VMXNET3_INPUT_ERROR_RX_PACKET_NO_SOP, 1);
347 prev_b0->
flags |= VLIB_BUFFER_NEXT_PRESENT;
355 VMXNET3_INPUT_ERROR_RX_PACKET, 1);
384 next[0] = next_index;
409 u32 n_left = n_rx_packets;
413 while (n_trace && n_left)
447 VMXNET3_INPUT_ERROR_BUFFER_ALLOC, 1);
453 VMXNET3_INPUT_ERROR_BUFFER_ALLOC, 1);
472 if ((vd->
flags & VMXNET3_DEVICE_F_ADMIN_UP) == 0)
479 #ifndef CLIB_MARCH_VARIANT 482 .name =
"vmxnet3-input",
483 .sibling_of =
"device-input",
487 .state = VLIB_NODE_STATE_DISABLED,
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static_always_inline clib_error_t * vmxnet3_rxq_refill_ring0(vlib_main_t *vm, vmxnet3_device_t *vd, vmxnet3_rxq_t *rxq)
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
vnet_main_t * vnet_get_main(void)
vmxnet3_rx_desc * rx_desc[VMXNET3_RX_RING_SIZE]
vnet_interface_main_t interface_main
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
u16 current_length
Nbytes between current data and the end of this buffer.
vmxnet3_main_t vmxnet3_main
#define VLIB_NODE_FN(node)
struct _tcp_header tcp_header_t
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
#define static_always_inline
vlib_combined_counter_main_t * combined_sw_if_counters
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define VMXNET3_RXC_INDEX
#define VMXNET3_RXCL_LEN_MASK
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
vlib_error_t error
Error code for buffers to be enqueued to error handler.
#define VMXNET3_RXECF_MSS_MASK
vmxnet3_rx_comp_ring rx_comp_ring
u32 node_index
Node index.
#define VLIB_REGISTER_NODE(x,...)
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
u32 per_interface_next_index
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
static_always_inline int ethernet_frame_is_tagged(u16 type)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
#define foreach_device_and_queue(var, vec)
#define VMXNET3_RXCL_ERROR
u32 next_buffer
Next buffer for this linked-list of buffers.
VLIB buffer representation.
#define VMXNET3_RXF_BTYPE
vmxnet3_rx_comp * rx_comp
vmxnet3_rx_ring rx_ring[VMXNET3_RX_RING_SIZE]
format_function_t format_vmxnet3_input_trace
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
static int tcp_header_bytes(tcp_header_t *t)
vmxnet3_device_t * devices
static_always_inline clib_error_t * vmxnet3_rxq_refill_ring1(vlib_main_t *vm, vmxnet3_device_t *vd, vmxnet3_rxq_t *rxq)
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
static int ip4_header_bytes(const ip4_header_t *i)
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
#define VMXNET3_RXCOMP_TYPE_LRO
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.