FD.io VPP  v21.01.1
Vector Packet Processing
input.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
23 #include <vnet/ip/ip6_packet.h>
24 #include <vnet/ip/ip4_packet.h>
25 #include <vnet/udp/udp_packet.h>
26 
27 #include <vmxnet3/vmxnet3.h>
28 
29 #define foreach_vmxnet3_input_error \
30  _(BUFFER_ALLOC, "buffer alloc error") \
31  _(RX_PACKET_NO_SOP, "Rx packet error - no SOP") \
32  _(RX_PACKET, "Rx packet error") \
33  _(RX_PACKET_EOP, "Rx packet error found on EOP") \
34  _(NO_BUFFER, "Rx no buffer error")
35 
36 typedef enum
37 {
38 #define _(f,s) VMXNET3_INPUT_ERROR_##f,
40 #undef _
43 
44 static __clib_unused char *vmxnet3_input_error_strings[] = {
45 #define _(n,s) s,
47 #undef _
48 };
49 
51 vmxnet3_find_rid (vmxnet3_device_t * vd, vmxnet3_rx_comp * rx_comp)
52 {
53  u32 rid;
54 
55  // rid is bits 16-25 (10 bits number)
56  rid = rx_comp->index & (0xffffffff >> 6);
57  rid >>= 16;
58  if ((rid >= vd->num_rx_queues) && (rid < (vd->num_rx_queues << 1)))
59  return 1;
60  else
61  return 0;
62 }
63 
66 {
67  vmxnet3_rx_comp_ring *comp_ring = &rxq->rx_comp_ring;
68 
69  comp_ring->next++;
70  if (PREDICT_FALSE (comp_ring->next == rxq->size))
71  {
72  comp_ring->next = 0;
73  comp_ring->gen ^= VMXNET3_RXCF_GEN;
74  }
75 }
76 
78 vmxnet3_handle_offload (vmxnet3_rx_comp * rx_comp, vlib_buffer_t * hb,
79  u16 gso_size)
80 {
81  u8 l4_hdr_sz = 0;
82 
83  if (rx_comp->flags & VMXNET3_RXCF_IP4)
84  {
85  ip4_header_t *ip4 = (ip4_header_t *) (hb->data +
86  sizeof (ethernet_header_t));
87 
88  vnet_buffer (hb)->l2_hdr_offset = 0;
89  vnet_buffer (hb)->l3_hdr_offset = sizeof (ethernet_header_t);
90  vnet_buffer (hb)->l4_hdr_offset = sizeof (ethernet_header_t) +
91  ip4_header_bytes (ip4);
92  hb->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
93  VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
94  VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP4;
95 
96  /* checksum offload */
97  if (!(rx_comp->index & VMXNET3_RXCI_CNC))
98  {
99  if (!(rx_comp->flags & VMXNET3_RXCF_IPC))
100  {
101  hb->flags |= VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
102  ip4->checksum = 0;
103  }
104  if (!(rx_comp->flags & VMXNET3_RXCF_TUC))
105  {
106  if (rx_comp->flags & VMXNET3_RXCF_TCP)
107  {
108  tcp_header_t *tcp =
109  (tcp_header_t *) (hb->data +
110  vnet_buffer (hb)->l4_hdr_offset);
111  hb->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
112  tcp->checksum = 0;
113  }
114  else if (rx_comp->flags & VMXNET3_RXCF_UDP)
115  {
116  udp_header_t *udp =
117  (udp_header_t *) (hb->data +
118  vnet_buffer (hb)->l4_hdr_offset);
119  hb->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
120  udp->checksum = 0;
121  }
122  }
123  }
124 
125  if (gso_size)
126  {
127  if (rx_comp->flags & VMXNET3_RXCF_TCP)
128  {
129  tcp_header_t *tcp =
130  (tcp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
131  l4_hdr_sz = tcp_header_bytes (tcp);
132  }
133  else if (rx_comp->flags & VMXNET3_RXCF_UDP)
134  {
135  udp_header_t *udp =
136  (udp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
137  l4_hdr_sz = sizeof (*udp);
138  }
139  vnet_buffer2 (hb)->gso_size = gso_size;
140  vnet_buffer2 (hb)->gso_l4_hdr_sz = l4_hdr_sz;
141  hb->flags |= VNET_BUFFER_F_GSO;
142  }
143  }
144  else if (rx_comp->flags & VMXNET3_RXCF_IP6)
145  {
146  vnet_buffer (hb)->l2_hdr_offset = 0;
147  vnet_buffer (hb)->l3_hdr_offset = sizeof (ethernet_header_t);
148  vnet_buffer (hb)->l4_hdr_offset = sizeof (ethernet_header_t) +
149  sizeof (ip6_header_t);
150  hb->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
151  VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
152  VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP6;
153 
154  /* checksum offload */
155  if (!(rx_comp->index & VMXNET3_RXCI_CNC))
156  {
157  if (!(rx_comp->flags & VMXNET3_RXCF_TUC))
158  {
159  if (rx_comp->flags & VMXNET3_RXCF_TCP)
160  {
161  tcp_header_t *tcp =
162  (tcp_header_t *) (hb->data +
163  vnet_buffer (hb)->l4_hdr_offset);
164  hb->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
165  tcp->checksum = 0;
166  }
167  else if (rx_comp->flags & VMXNET3_RXCF_UDP)
168  {
169  udp_header_t *udp =
170  (udp_header_t *) (hb->data +
171  vnet_buffer (hb)->l4_hdr_offset);
172  hb->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
173  udp->checksum = 0;
174  }
175  }
176  }
177 
178  if (gso_size)
179  {
180  if (rx_comp->flags & VMXNET3_RXCF_TCP)
181  {
182  tcp_header_t *tcp =
183  (tcp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
184  l4_hdr_sz = tcp_header_bytes (tcp);
185  }
186  else if (rx_comp->flags & VMXNET3_RXCF_UDP)
187  {
188  udp_header_t *udp =
189  (udp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
190  l4_hdr_sz = sizeof (*udp);
191  }
192  vnet_buffer2 (hb)->gso_size = gso_size;
193  vnet_buffer2 (hb)->gso_l4_hdr_sz = l4_hdr_sz;
194  hb->flags |= VNET_BUFFER_F_GSO;
195  }
196  }
197 }
198 
202  u16 qid)
203 {
204  vnet_main_t *vnm = vnet_get_main ();
205  uword n_trace = vlib_get_trace_count (vm, node);
206  u32 n_rx_packets = 0, n_rx_bytes = 0;
207  vmxnet3_rx_comp *rx_comp;
208  u32 desc_idx;
209  vmxnet3_rxq_t *rxq;
210  u32 thread_index = vm->thread_index;
211  u32 buffer_indices[VLIB_FRAME_SIZE], *bi;
212  u16 nexts[VLIB_FRAME_SIZE], *next;
213  vmxnet3_rx_ring *ring;
214  vmxnet3_rx_comp_ring *comp_ring;
215  u16 rid;
216  vlib_buffer_t *prev_b0 = 0, *hb = 0;
218  u8 known_next = 0, got_packet = 0;
219  vmxnet3_rx_desc *rxd;
221  u16 gso_size = 0;
222 
223  rxq = vec_elt_at_index (vd->rxqs, qid);
224  comp_ring = &rxq->rx_comp_ring;
225  bi = buffer_indices;
226  next = nexts;
227  rx_comp = &rxq->rx_comp[comp_ring->next];
228 
229  while (PREDICT_TRUE ((n_rx_packets < VLIB_FRAME_SIZE) &&
230  (comp_ring->gen ==
231  (rx_comp->flags & VMXNET3_RXCF_GEN))))
232  {
233  vlib_buffer_t *b0;
234  u32 bi0;
235 
236  rid = vmxnet3_find_rid (vd, rx_comp);
237  ring = &rxq->rx_ring[rid];
238 
239  if (PREDICT_TRUE (ring->fill >= 1))
240  ring->fill--;
241  else
242  {
243  vlib_error_count (vm, node->node_index,
244  VMXNET3_INPUT_ERROR_NO_BUFFER, 1);
245  if (hb)
246  {
248  hb = 0;
249  }
250  prev_b0 = 0;
251  break;
252  }
253 
254  desc_idx = rx_comp->index & VMXNET3_RXC_INDEX;
255  ring->consume = desc_idx;
256  rxd = &rxq->rx_desc[rid][desc_idx];
257 
258  bi0 = ring->bufs[desc_idx];
259  ring->bufs[desc_idx] = ~0;
260 
261  b0 = vlib_get_buffer (vm, bi0);
262  vnet_buffer (b0)->sw_if_index[VLIB_RX] = vd->sw_if_index;
263  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
264  vnet_buffer (b0)->feature_arc_index = 0;
265  b0->current_length = rx_comp->len & VMXNET3_RXCL_LEN_MASK;
266  b0->current_data = 0;
268  b0->next_buffer = 0;
269  b0->flags = 0;
270  b0->error = 0;
271  b0->current_config_index = 0;
272 
273  if (PREDICT_FALSE ((rx_comp->index & VMXNET3_RXCI_EOP) &&
274  (rx_comp->len & VMXNET3_RXCL_ERROR)))
275  {
276  vlib_buffer_free_one (vm, bi0);
277  vlib_error_count (vm, node->node_index,
278  VMXNET3_INPUT_ERROR_RX_PACKET_EOP, 1);
279  if (hb && vlib_get_buffer_index (vm, hb) != bi0)
280  {
282  hb = 0;
283  }
284  prev_b0 = 0;
285  goto next;
286  }
287 
288  if (rx_comp->index & VMXNET3_RXCI_SOP)
289  {
290  ASSERT (!(rxd->flags & VMXNET3_RXF_BTYPE));
291  /* start segment */
292  if (vd->gso_enable &&
293  (rx_comp->flags & VMXNET3_RXCF_CT) == VMXNET3_RXCOMP_TYPE_LRO)
294  {
295  vmxnet3_rx_comp_ext *lro = (vmxnet3_rx_comp_ext *) rx_comp;
296 
297  gso_size = lro->flags & VMXNET3_RXECF_MSS_MASK;
298  }
299 
300  hb = b0;
301  bi[0] = bi0;
302  if (!(rx_comp->index & VMXNET3_RXCI_EOP))
303  {
304  hb->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
305  prev_b0 = b0;
306  }
307  else
308  {
309  /*
310  * Both start and end of packet is set. It is a complete packet
311  */
312  prev_b0 = 0;
313  got_packet = 1;
314  }
315  }
316  else if (rx_comp->index & VMXNET3_RXCI_EOP)
317  {
318  /* end of segment */
319  if (PREDICT_TRUE (prev_b0 != 0))
320  {
321  if (PREDICT_TRUE (b0->current_length != 0))
322  {
323  prev_b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
324  prev_b0->next_buffer = bi0;
325  hb->total_length_not_including_first_buffer +=
326  b0->current_length;
327  }
328  else
329  {
330  vlib_buffer_free_one (vm, bi0);
331  }
332  prev_b0 = 0;
333  got_packet = 1;
334  }
335  else
336  {
337  /* EOP without SOP, error */
338  vlib_error_count (vm, node->node_index,
339  VMXNET3_INPUT_ERROR_RX_PACKET_NO_SOP, 1);
340  vlib_buffer_free_one (vm, bi0);
341  if (hb && vlib_get_buffer_index (vm, hb) != bi0)
342  {
344  hb = 0;
345  }
346  goto next;
347  }
348  }
349  else if (prev_b0) // !sop && !eop
350  {
351  /* mid chain */
352  ASSERT (rxd->flags & VMXNET3_RXF_BTYPE);
353  prev_b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
354  prev_b0->next_buffer = bi0;
355  prev_b0 = b0;
357  }
358  else
359  {
360  vlib_error_count (vm, node->node_index,
361  VMXNET3_INPUT_ERROR_RX_PACKET, 1);
362  vlib_buffer_free_one (vm, bi0);
363  if (hb && vlib_get_buffer_index (vm, hb) != bi0)
364  {
366  hb = 0;
367  }
368  goto next;
369  }
370 
371  n_rx_bytes += b0->current_length;
372 
373  if (got_packet)
374  {
375  if (PREDICT_FALSE (vd->per_interface_next_index != ~0))
376  {
377  next_index = vd->per_interface_next_index;
378  known_next = 1;
379  }
380 
381  if (PREDICT_FALSE
383  {
385  &next_index, hb);
386  known_next = 1;
387  }
388 
389  if (PREDICT_FALSE (known_next))
390  next[0] = next_index;
391  else
392  {
393  ethernet_header_t *e = (ethernet_header_t *) hb->data;
394 
396  if (!ethernet_frame_is_tagged (ntohs (e->type)))
397  vmxnet3_handle_offload (rx_comp, hb, gso_size);
398  }
399 
400  n_rx_packets++;
401  next++;
402  bi++;
403  hb = 0;
404  got_packet = 0;
405  gso_size = 0;
406  }
407 
408  next:
410  rx_comp = &rxq->rx_comp[comp_ring->next];
411  }
412 
413  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
414  {
415  u32 n_left = n_rx_packets;
416 
417  bi = buffer_indices;
418  next = nexts;
419  while (n_trace && n_left)
420  {
421  vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
422  if (PREDICT_TRUE
424  (vm, node, next[0], b, /* follow_chain */ 0)))
425  {
427  vlib_add_trace (vm, node, b, sizeof (*tr));
428  tr->next_index = next[0];
429  tr->hw_if_index = vd->hw_if_index;
430  tr->buffer = *b;
431  n_trace--;
432  }
433  n_left--;
434  bi++;
435  next++;
436  }
437  vlib_set_trace_count (vm, node, n_trace);
438  }
439 
440  if (PREDICT_TRUE (n_rx_packets))
441  {
442  vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts,
443  n_rx_packets);
446  VNET_INTERFACE_COUNTER_RX, thread_index,
447  vd->sw_if_index, n_rx_packets, n_rx_bytes);
448  }
449 
450  error = vmxnet3_rxq_refill_ring0 (vm, vd, rxq);
451  if (PREDICT_FALSE (error != 0))
452  {
453  vlib_error_count (vm, node->node_index,
454  VMXNET3_INPUT_ERROR_BUFFER_ALLOC, 1);
455  }
456  error = vmxnet3_rxq_refill_ring1 (vm, vd, rxq);
457  if (PREDICT_FALSE (error != 0))
458  {
459  vlib_error_count (vm, node->node_index,
460  VMXNET3_INPUT_ERROR_BUFFER_ALLOC, 1);
461  }
462 
463  return n_rx_packets;
464 }
465 
469 {
470  u32 n_rx = 0;
471  vmxnet3_main_t *vmxm = &vmxnet3_main;
472  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
474 
476  {
477  vmxnet3_device_t *vd;
478  vd = vec_elt_at_index (vmxm->devices, dq->dev_instance);
479  if ((vd->flags & VMXNET3_DEVICE_F_ADMIN_UP) == 0)
480  continue;
481  n_rx += vmxnet3_device_input_inline (vm, node, frame, vd, dq->queue_id);
482  }
483  return n_rx;
484 }
485 
486 #ifndef CLIB_MARCH_VARIANT
487 /* *INDENT-OFF* */
489  .name = "vmxnet3-input",
490  .sibling_of = "device-input",
492  .format_trace = format_vmxnet3_input_trace,
493  .type = VLIB_NODE_TYPE_INPUT,
494  .state = VLIB_NODE_STATE_DISABLED,
495  .n_errors = VMXNET3_INPUT_N_ERROR,
496  .error_strings = vmxnet3_input_error_strings,
497 };
498 #endif
499 
500 /* *INDENT-ON* */
501 
502 /*
503  * fd.io coding-style-patch-verification: ON
504  *
505  * Local Variables:
506  * eval: (c-set-style "gnu")
507  * End:
508  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
static_always_inline clib_error_t * vmxnet3_rxq_refill_ring0(vlib_main_t *vm, vmxnet3_device_t *vd, vmxnet3_rxq_t *rxq)
Definition: vmxnet3.h:688
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:201
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:239
#define VMXNET3_RXCF_TCP
Definition: vmxnet3.h:124
#define ntohs(x)
Definition: af_xdp.bpf.c:29
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
#define vnet_buffer2(b)
Definition: buffer.h:481
vmxnet3_rx_desc * rx_desc[VMXNET3_RX_RING_SIZE]
Definition: vmxnet3.h:520
vnet_interface_main_t interface_main
Definition: vnet.h:65
#define PREDICT_TRUE(x)
Definition: clib.h:122
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:306
#define VMXNET3_RXCF_IP4
Definition: vmxnet3.h:127
u32 thread_index
Definition: main.h:250
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
vmxnet3_main_t vmxnet3_main
Definition: vmxnet3.c:28
vlib_main_t * vm
Definition: in2out_ed.c:1580
#define VLIB_NODE_FN(node)
Definition: node.h:203
struct _tcp_header tcp_header_t
unsigned char u8
Definition: types.h:56
#define VMXNET3_RXCF_GEN
Definition: vmxnet3.h:129
#define VMXNET3_RXCF_IPC
Definition: vmxnet3.h:125
vmxnet3_rxq_t * rxqs
Definition: vmxnet3.h:569
static __clib_unused char * vmxnet3_input_error_strings[]
Definition: input.c:44
#define static_always_inline
Definition: clib.h:109
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:882
#define VMXNET3_RXCF_UDP
Definition: vmxnet3.h:123
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
Definition: feature.h:336
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define VMXNET3_RXC_INDEX
Definition: vmxnet3.h:131
const cJSON *const b
Definition: cJSON.h:255
#define VMXNET3_RXCL_LEN_MASK
Definition: vmxnet3.h:175
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:378
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
Definition: buffer_funcs.h:293
#define VMXNET3_RXCF_CT
Definition: vmxnet3.h:128
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define VMXNET3_RXECF_MSS_MASK
Definition: vmxnet3.h:185
Definition: cJSON.c:84
#define VMXNET3_RXCF_TUC
Definition: vmxnet3.h:122
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:142
static_always_inline void vmxnet3_rx_comp_ring_advance_next(vmxnet3_rxq_t *rxq)
Definition: input.c:65
unsigned short u16
Definition: types.h:57
vmxnet3_rx_comp_ring rx_comp_ring
Definition: vmxnet3.h:522
#define PREDICT_FALSE(x)
Definition: clib.h:121
vl_api_ip4_address_t ip4
Definition: one.api:376
u32 node_index
Node index.
Definition: node.h:488
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:170
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:339
vlib_buffer_t buffer
Definition: vmxnet3.h:619
u8 data[]
Packet data.
Definition: buffer.h:181
u32 per_interface_next_index
Definition: vmxnet3.h:558
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:147
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1580
#define ASSERT(truth)
static_always_inline int ethernet_frame_is_tagged(u16 type)
Definition: ethernet.h:78
#define VMXNET3_RXCI_EOP
Definition: vmxnet3.h:178
#define foreach_vmxnet3_input_error
Definition: input.c:29
static_always_inline uword vmxnet3_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vmxnet3_device_t *vd, u16 qid)
Definition: input.c:200
vlib_node_registration_t vmxnet3_input_node
(constructor) VLIB_REGISTER_NODE (vmxnet3_input_node)
Definition: input.c:488
#define foreach_device_and_queue(var, vec)
Definition: devices.h:152
Definition: defs.h:47
#define VMXNET3_RXCL_ERROR
Definition: vmxnet3.h:176
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1581
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
#define VMXNET3_RXF_BTYPE
Definition: vmxnet3.h:118
static_always_inline void vmxnet3_handle_offload(vmxnet3_rx_comp *rx_comp, vlib_buffer_t *hb, u16 gso_size)
Definition: input.c:78
vmxnet3_rx_comp * rx_comp
Definition: vmxnet3.h:521
#define VMXNET3_RXCI_CNC
Definition: vmxnet3.h:180
vmxnet3_rx_ring rx_ring[VMXNET3_RX_RING_SIZE]
Definition: vmxnet3.h:519
vmxnet3_input_error_t
Definition: input.c:36
format_function_t format_vmxnet3_input_trace
Definition: vmxnet3.h:632
#define vnet_buffer(b)
Definition: buffer.h:417
#define VMXNET3_RXCF_IP6
Definition: vmxnet3.h:126
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:343
static int tcp_header_bytes(tcp_header_t *t)
Definition: tcp_packet.h:93
vmxnet3_device_t * devices
Definition: vmxnet3.h:592
static_always_inline clib_error_t * vmxnet3_rxq_refill_ring1(vlib_main_t *vm, vmxnet3_device_t *vd, vmxnet3_rxq_t *rxq)
Definition: vmxnet3.h:734
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:634
#define VMXNET3_RXCI_SOP
Definition: vmxnet3.h:179
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
Definition: buffer_funcs.h:970
static int ip4_header_bytes(const ip4_header_t *i)
Definition: ip4_packet.h:190
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:215
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
#define VMXNET3_RXCOMP_TYPE_LRO
Definition: vmxnet3.h:183
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
Definition: defs.h:46
static_always_inline u16 vmxnet3_find_rid(vmxnet3_device_t *vd, vmxnet3_rx_comp *rx_comp)
Definition: input.c:51