21 #include <sys/ioctl.h> 31 #define foreach_mrvl_pp2_input_error \ 32 _(PPIO_RECV, "pp2_ppio_recv error") \ 33 _(BPOOL_GET_NUM_BUFFS, "pp2_bpool_get_num_buffs error") \ 34 _(BPOOL_PUT_BUFFS, "pp2_bpool_put_buffs error") \ 35 _(BUFFER_ALLOC, "buffer alloc error") \ 36 _(MAC_CE, "MAC error (CRC error)") \ 37 _(MAC_OR, "overrun error") \ 38 _(MAC_RSVD, "unknown MAC error") \ 39 _(MAC_RE, "resource error") \ 40 _(IP_HDR, "ip4 header error") 44 #define _(f,s) MRVL_PP2_INPUT_ERROR_##f, 76 len = pp2_ppio_inq_desc_get_pkt_len (d);
78 b->
flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | add_flags;
80 if (add_flags & VNET_BUFFER_F_L2_HDR_OFFSET_VALID)
83 if (add_flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID)
96 if (add_flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID)
98 DM_RXD_GET_IPHDR_LEN (d) * 4;
112 u8 ec = DM_RXD_GET_EC (d);
114 b->
error = node->
errors[MRVL_PP2_INPUT_ERROR_MAC_CE];
116 b->
error = node->
errors[MRVL_PP2_INPUT_ERROR_MAC_OR];
118 b->
error = node->
errors[MRVL_PP2_INPUT_ERROR_MAC_RSVD];
120 b->
error = node->
errors[MRVL_PP2_INPUT_ERROR_MAC_RE];
123 l3_info = DM_RXD_GET_L3_PRS_INFO (d);
131 b->
error = node->
errors[MRVL_PP2_INPUT_ERROR_IP_HDR];
137 VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
138 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
139 VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP4);
148 VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
149 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
150 VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP6);
155 VNET_BUFFER_F_L2_HDR_OFFSET_VALID);
172 u32 n_rx_packets = 0;
175 struct pp2_ppio_desc *d;
188 n_rx_packets = n_desc;
190 for (i = 0; i < n_desc; i++)
191 ptd->
buffers[i] = pp2_ppio_inq_desc_get_cookie (&ptd->
descs[i]);
204 while (n_desc >= 4 && n_left_to_next >= 2)
229 sizeof (sw_if_index));
231 sizeof (sw_if_index));
252 n_left_to_next, bi0, bi1, next0,
256 while (n_desc && n_left_to_next)
258 u32 bi0 = buffers[0];
275 sizeof (sw_if_index));
290 n_left_to_next, bi0, next0);
295 interface_main.combined_sw_if_counters +
303 MRVL_PP2_INPUT_ERROR_BPOOL_GET_NUM_BUFFS, 1);
307 n_bufs = inq->
size - n_bufs;
311 struct buff_release_entry *e = ptd->
bre;
320 MRVL_PP2_INPUT_ERROR_BUFFER_ALLOC, 1);
330 e->bpool = inq->
bpool;
339 MRVL_PP2_INPUT_ERROR_BPOOL_PUT_BUFFS, 1);
378 .name =
"mrvl-pp2-input",
379 .sibling_of =
"device-input",
382 .state = VLIB_NODE_STATE_POLLING,
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
u8 runtime_data[0]
Function dependent node-runtime data.
struct buff_release_entry bre[MRVL_PP2_BUFF_BATCH_SZ]
format_function_t format_mrvl_pp2_input_trace
vnet_main_t * vnet_get_main(void)
u32 buffers[VLIB_FRAME_SIZE]
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
struct pp2_ppio_desc * descs
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
#define clib_memcpy_fast(a, b, c)
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
u16 current_length
Nbytes between current data and the end of this buffer.
u32 per_interface_next_index
mrvl_pp2_main_t mrvl_pp2_main
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
vlib_error_t * errors
Vector of errors for this node.
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
#define static_always_inline
vl_api_interface_index_t sw_if_index
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define MRVL_PP2_BUFF_BATCH_SZ
vlib_error_t error
Error code for buffers to be enqueued to error handler.
u32 node_index
Node index.
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static_always_inline void vnet_feature_start_device_input_x2(u32 sw_if_index, u32 *next0, u32 *next1, vlib_buffer_t *b0, vlib_buffer_t *b1)
#define VLIB_REGISTER_NODE(x,...)
sll srl srl sll sra u16x4 i
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
vlib_main_t vlib_node_runtime_t * node
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
template key/value backing page structure
#define foreach_device_and_queue(var, vec)
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
VLIB buffer representation.
struct clib_bihash_value offset
template key/value backing page structure
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
mrvl_pp2_if_t * interfaces
mrvl_pp2_per_thread_data_t * per_thread_data
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
#define CLIB_CACHE_LINE_BYTES
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
#define MRVL_PP2_IF_F_ADMIN_UP