FD.io VPP  v18.10-34-gcce845e
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 
23 #include <avf/avf.h>
24 
25 #define AVF_MBOX_LEN 64
26 #define AVF_MBOX_BUF_SZ 512
27 #define AVF_RXQ_SZ 512
28 #define AVF_TXQ_SZ 512
29 #define AVF_ITR_INT 8160
30 
31 #define PCI_VENDOR_ID_INTEL 0x8086
32 #define PCI_DEVICE_ID_INTEL_AVF 0x1889
33 #define PCI_DEVICE_ID_INTEL_X710_VF 0x154c
34 #define PCI_DEVICE_ID_INTEL_X722_VF 0x37cd
35 
37 
38 static pci_device_id_t avf_pci_device_ids[] = {
40  {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X710_VF},
41  {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X722_VF},
42  {0},
43 };
44 
45 static inline void
47 {
48  u32 dyn_ctl0 = 0, icr0_ena = 0;
49 
50  dyn_ctl0 |= (3 << 3); /* 11b = No ITR update */
51 
52  avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
53  avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
54  avf_reg_flush (ad);
55 }
56 
57 static inline void
59 {
60  u32 dyn_ctl0 = 0, icr0_ena = 0;
61 
62  icr0_ena |= (1 << 30); /* [30] Admin Queue Enable */
63 
64  dyn_ctl0 |= (1 << 0); /* [0] Interrupt Enable */
65  dyn_ctl0 |= (1 << 1); /* [1] Clear PBA */
66  //dyn_ctl0 |= (3 << 3); /* [4:3] ITR Index, 11b = No ITR update */
67  dyn_ctl0 |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
68 
69  avf_irq_0_disable (ad);
70  avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
71  avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
72  avf_reg_flush (ad);
73 }
74 
75 static inline void
77 {
78  u32 dyn_ctln = 0;
79 
80  avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
81  avf_reg_flush (ad);
82 }
83 
84 static inline void
86 {
87  u32 dyn_ctln = 0;
88 
89  dyn_ctln |= (1 << 0); /* [0] Interrupt Enable */
90  dyn_ctln |= (1 << 1); /* [1] Clear PBA */
91  dyn_ctln |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
92 
93  avf_irq_n_disable (ad, line);
94  avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
95  avf_reg_flush (ad);
96 }
97 
98 
101  void *data, int len)
102 {
103  avf_main_t *am = &avf_main;
104  clib_error_t *err = 0;
105  avf_aq_desc_t *d, dc;
106  int n_retry = 5;
107 
108  d = &ad->atq[ad->atq_next_slot];
109  clib_memcpy (d, dt, sizeof (avf_aq_desc_t));
110  d->flags |= AVF_AQ_F_RD | AVF_AQ_F_SI;
111  if (len)
112  d->datalen = len;
113  if (len)
114  {
115  u64 pa;
116  pa = ad->atq_bufs_pa + ad->atq_next_slot * AVF_MBOX_BUF_SZ;
117  d->addr_hi = (u32) (pa >> 32);
118  d->addr_lo = (u32) pa;
120  len);
121  d->flags |= AVF_AQ_F_BUF;
122  }
123 
124  if (ad->flags & AVF_DEVICE_F_ELOG)
125  clib_memcpy (&dc, d, sizeof (avf_aq_desc_t));
126 
128  vlib_log_debug (am->log_class, "%U", format_hexdump, data, len);
129  ad->atq_next_slot = (ad->atq_next_slot + 1) % AVF_MBOX_LEN;
131  avf_reg_flush (ad);
132 
133 retry:
134  vlib_process_suspend (vm, 10e-6);
135 
136  if (((d->flags & AVF_AQ_F_DD) == 0) || ((d->flags & AVF_AQ_F_CMP) == 0))
137  {
138  if (--n_retry == 0)
139  {
140  err = clib_error_return (0, "adminq enqueue timeout [opcode 0x%x]",
141  d->opcode);
142  goto done;
143  }
144  goto retry;
145  }
146 
147  clib_memcpy (dt, d, sizeof (avf_aq_desc_t));
148  if (d->flags & AVF_AQ_F_ERR)
149  return clib_error_return (0, "adminq enqueue error [opcode 0x%x, retval "
150  "%d]", d->opcode, d->retval);
151 
152 done:
153  if (ad->flags & AVF_DEVICE_F_ELOG)
154  {
155  /* *INDENT-OFF* */
156  ELOG_TYPE_DECLARE (el) =
157  {
158  .format = "avf[%d] aq enq: s_flags 0x%x r_flags 0x%x opcode 0x%x "
159  "datalen %d retval %d",
160  .format_args = "i4i2i2i2i2i2",
161  };
162  struct
163  {
164  u32 dev_instance;
165  u16 s_flags;
166  u16 r_flags;
167  u16 opcode;
168  u16 datalen;
169  u16 retval;
170  } *ed;
171  ed = ELOG_DATA (&vm->elog_main, el);
172  ed->dev_instance = ad->dev_instance;
173  ed->s_flags = dc.flags;
174  ed->r_flags = d->flags;
175  ed->opcode = dc.opcode;
176  ed->datalen = dc.datalen;
177  ed->retval = d->retval;
178  /* *INDENT-ON* */
179  }
180 
181  return err;
182 }
183 
184 clib_error_t *
186  u32 val)
187 {
188  clib_error_t *err;
189  avf_aq_desc_t d = {.opcode = 0x207,.param1 = reg,.param3 = val };
190  err = avf_aq_desc_enq (vm, ad, &d, 0, 0);
191 
192  if (ad->flags & AVF_DEVICE_F_ELOG)
193  {
194  /* *INDENT-OFF* */
195  ELOG_TYPE_DECLARE (el) =
196  {
197  .format = "avf[%d] rx ctl reg write: reg 0x%x val 0x%x ",
198  .format_args = "i4i4i4",
199  };
200  struct
201  {
202  u32 dev_instance;
203  u32 reg;
204  u32 val;
205  } *ed;
206  ed = ELOG_DATA (&vm->elog_main, el);
207  ed->dev_instance = ad->dev_instance;
208  ed->reg = reg;
209  ed->val = val;
210  /* *INDENT-ON* */
211  }
212  return err;
213 }
214 
215 clib_error_t *
216 avf_rxq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 rxq_size)
217 {
218  avf_main_t *am = &avf_main;
219  avf_rxq_t *rxq;
220  clib_error_t *error = 0;
221  u32 n_alloc, i;
222 
224  rxq = vec_elt_at_index (ad->rxqs, qid);
225  rxq->size = rxq_size;
226  rxq->next = 0;
227  rxq->descs = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
228  rxq->size * sizeof (avf_rx_desc_t),
230  memset ((void *) rxq->descs, 0, rxq->size * sizeof (avf_rx_desc_t));
232  rxq->qrx_tail = ad->bar0 + AVF_QRX_TAIL (qid);
233 
234  n_alloc = vlib_buffer_alloc (vm, rxq->bufs, rxq->size - 8);
235 
236  if (n_alloc == 0)
237  return clib_error_return (0, "buffer allocation error");
238 
239  rxq->n_enqueued = n_alloc;
240  avf_rx_desc_t *d = rxq->descs;
241  for (i = 0; i < n_alloc; i++)
242  {
243  if (ad->flags & AVF_DEVICE_F_IOVA)
244  {
245  vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[i]);
246  d->qword[0] = pointer_to_uword (b->data);
247  }
248  else
249  d->qword[0] =
251  d++;
252  }
253 
254  ad->n_rx_queues = clib_min (ad->num_queue_pairs, qid + 1);
255  return 0;
256 }
257 
258 clib_error_t *
259 avf_txq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 txq_size)
260 {
261  avf_main_t *am = &avf_main;
262  avf_txq_t *txq;
263  clib_error_t *error = 0;
264 
265  if (qid >= ad->num_queue_pairs)
266  {
267  qid = qid % ad->num_queue_pairs;
268  txq = vec_elt_at_index (ad->txqs, qid);
269  if (txq->lock == 0)
270  clib_spinlock_init (&txq->lock);
271  ad->flags |= AVF_DEVICE_F_SHARED_TXQ_LOCK;
272  return 0;
273  }
274 
276  txq = vec_elt_at_index (ad->txqs, qid);
277  txq->size = txq_size;
278  txq->next = 0;
279  txq->descs = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
280  txq->size * sizeof (avf_tx_desc_t),
283  txq->qtx_tail = ad->bar0 + AVF_QTX_TAIL (qid);
284 
285  ad->n_tx_queues = clib_min (ad->num_queue_pairs, qid + 1);
286  return 0;
287 }
288 
289 typedef struct
290 {
294 
295 void
297 {
298  avf_aq_desc_t *d;
299  u64 pa = ad->arq_bufs_pa + slot * AVF_MBOX_BUF_SZ;
300  d = &ad->arq[slot];
301  memset (d, 0, sizeof (avf_aq_desc_t));
302  d->flags = AVF_AQ_F_BUF;
304  d->addr_hi = (u32) (pa >> 32);
305  d->addr_lo = (u32) pa;
306 }
307 
308 static inline uword
310 {
311  avf_main_t *am = &avf_main;
312  return (ad->flags & AVF_DEVICE_F_IOVA) ?
313  pointer_to_uword (p) :
315 }
316 
317 static void
319 {
320  u64 pa;
321  int i;
322 
323  /* VF MailBox Transmit */
324  memset (ad->atq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
325  ad->atq_bufs_pa = avf_dma_addr (vm, ad, ad->atq_bufs);
326 
327  pa = avf_dma_addr (vm, ad, ad->atq);
328  avf_reg_write (ad, AVF_ATQT, 0); /* Tail */
329  avf_reg_write (ad, AVF_ATQH, 0); /* Head */
330  avf_reg_write (ad, AVF_ATQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
331  avf_reg_write (ad, AVF_ATQBAL, (u32) pa); /* Base Address Low */
332  avf_reg_write (ad, AVF_ATQBAH, (u32) (pa >> 32)); /* Base Address High */
333 
334  /* VF MailBox Receive */
335  memset (ad->arq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
336  ad->arq_bufs_pa = avf_dma_addr (vm, ad, ad->arq_bufs);
337 
338  for (i = 0; i < AVF_MBOX_LEN; i++)
339  avf_arq_slot_init (ad, i);
340 
341  pa = avf_dma_addr (vm, ad, ad->arq);
342 
343  avf_reg_write (ad, AVF_ARQH, 0); /* Head */
344  avf_reg_write (ad, AVF_ARQT, 0); /* Head */
345  avf_reg_write (ad, AVF_ARQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
346  avf_reg_write (ad, AVF_ARQBAL, (u32) pa); /* Base Address Low */
347  avf_reg_write (ad, AVF_ARQBAH, (u32) (pa >> 32)); /* Base Address High */
348  avf_reg_write (ad, AVF_ARQT, AVF_MBOX_LEN - 1); /* Tail */
349 
350  ad->atq_next_slot = 0;
351  ad->arq_next_slot = 0;
352 }
353 
354 clib_error_t *
356  void *in, int in_len, void *out, int out_len)
357 {
358  clib_error_t *err;
359  avf_aq_desc_t *d, dt = {.opcode = 0x801,.v_opcode = op };
360  u32 head;
361  int n_retry = 5;
362 
363 
364  /* supppres interrupt in the next adminq receive slot
365  as we are going to wait for response
366  we only need interrupts when event is received */
367  d = &ad->arq[ad->arq_next_slot];
368  d->flags |= AVF_AQ_F_SI;
369 
370  if ((err = avf_aq_desc_enq (vm, ad, &dt, in, in_len)))
371  return err;
372 
373 retry:
374  head = avf_get_u32 (ad->bar0, AVF_ARQH);
375 
376  if (ad->arq_next_slot == head)
377  {
378  if (--n_retry == 0)
379  return clib_error_return (0, "timeout");
380  vlib_process_suspend (vm, 10e-3);
381  goto retry;
382  }
383 
384  d = &ad->arq[ad->arq_next_slot];
385 
386  if (d->v_opcode == VIRTCHNL_OP_EVENT)
387  {
388  void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
390 
391  if ((d->datalen != sizeof (virtchnl_pf_event_t)) ||
392  ((d->flags & AVF_AQ_F_BUF) == 0))
393  return clib_error_return (0, "event message error");
394 
395  vec_add2 (ad->events, e, 1);
396  clib_memcpy (e, buf, sizeof (virtchnl_pf_event_t));
398  ad->arq_next_slot++;
399  n_retry = 5;
400  goto retry;
401  }
402 
403  if (d->v_opcode != op)
404  {
405  err =
407  "unexpected message receiver [v_opcode = %u, "
408  "expected %u, v_retval %d]", d->v_opcode, op,
409  d->v_retval);
410  goto done;
411  }
412 
413  if (d->v_retval)
414  {
415  err = clib_error_return (0, "error [v_opcode = %u, v_retval %d]",
416  d->v_opcode, d->v_retval);
417  goto done;
418  }
419 
420  if (d->flags & AVF_AQ_F_BUF)
421  {
422  void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
423  clib_memcpy (out, buf, out_len);
424  }
425 
428  avf_reg_flush (ad);
429  ad->arq_next_slot = (ad->arq_next_slot + 1) % AVF_MBOX_LEN;
430 
431 done:
432 
433  if (ad->flags & AVF_DEVICE_F_ELOG)
434  {
435  /* *INDENT-OFF* */
436  ELOG_TYPE_DECLARE (el) =
437  {
438  .format = "avf[%d] send to pf: v_opcode %s (%d) v_retval 0x%x",
439  .format_args = "i4t4i4i4",
440  .n_enum_strings = VIRTCHNL_N_OPS,
441  .enum_strings = {
442 #define _(v, n) [v] = #n,
444 #undef _
445  },
446  };
447  struct
448  {
449  u32 dev_instance;
450  u32 v_opcode;
451  u32 v_opcode_val;
452  u32 v_retval;
453  } *ed;
454  ed = ELOG_DATA (&vm->elog_main, el);
455  ed->dev_instance = ad->dev_instance;
456  ed->v_opcode = op;
457  ed->v_opcode_val = op;
458  ed->v_retval = d->v_retval;
459  /* *INDENT-ON* */
460  }
461  return err;
462 }
463 
464 clib_error_t *
467 {
468  clib_error_t *err = 0;
469  virtchnl_version_info_t myver = {
471  .minor = VIRTCHNL_VERSION_MINOR,
472  };
473 
474  err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_VERSION, &myver,
475  sizeof (virtchnl_version_info_t), ver,
476  sizeof (virtchnl_version_info_t));
477 
478  if (err)
479  return err;
480 
481  return err;
482 }
483 
484 clib_error_t *
487 {
488  u32 bitmap = (VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF |
489  VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_VLAN |
490  VIRTCHNL_VF_OFFLOAD_RX_POLLING);
491 
492  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_VF_RESOURCES, &bitmap,
493  sizeof (u32), res, sizeof (virtchnl_vf_resource_t));
494 }
495 
496 clib_error_t *
498 {
499  int msg_len = sizeof (virtchnl_rss_lut_t) + ad->rss_lut_size - 1;
500  int i;
501  u8 msg[msg_len];
502  virtchnl_rss_lut_t *rl;
503 
504  memset (msg, 0, msg_len);
505  rl = (virtchnl_rss_lut_t *) msg;
506  rl->vsi_id = ad->vsi_id;
507  rl->lut_entries = ad->rss_lut_size;
508  for (i = 0; i < ad->rss_lut_size; i++)
509  rl->lut[i] = i % ad->n_rx_queues;
510 
511  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_LUT, msg, msg_len, 0,
512  0);
513 }
514 
515 clib_error_t *
517 {
518  int msg_len = sizeof (virtchnl_rss_key_t) + ad->rss_key_size - 1;
519  int i;
520  u8 msg[msg_len];
521  virtchnl_rss_key_t *rk;
522 
523  memset (msg, 0, msg_len);
524  rk = (virtchnl_rss_key_t *) msg;
525  rk->vsi_id = ad->vsi_id;
526  rk->key_len = ad->rss_key_size;
527  u32 seed = random_default_seed ();
528  for (i = 0; i < ad->rss_key_size; i++)
529  rk->key[i] = (u8) random_uword (&seed);
530 
531  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_KEY, msg, msg_len, 0,
532  0);
533 }
534 
535 clib_error_t *
537 {
538  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 0, 0, 0,
539  0);
540 }
541 
542 clib_error_t *
544 {
545  virtchnl_promisc_info_t pi = { 0 };
546 
547  pi.vsi_id = ad->vsi_id;
548  pi.flags = 1;
549  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, &pi,
550  sizeof (virtchnl_promisc_info_t), 0, 0);
551 }
552 
553 
554 clib_error_t *
556 {
557  int i;
558  int n_qp = clib_max (vec_len (ad->rxqs), vec_len (ad->txqs));
559  int msg_len = sizeof (virtchnl_vsi_queue_config_info_t) + n_qp *
561  u8 msg[msg_len];
563 
564  memset (msg, 0, msg_len);
566  ci->vsi_id = ad->vsi_id;
567  ci->num_queue_pairs = n_qp;
568 
569  for (i = 0; i < n_qp; i++)
570  {
571  virtchnl_txq_info_t *txq = &ci->qpair[i].txq;
572  virtchnl_rxq_info_t *rxq = &ci->qpair[i].rxq;
573 
574  rxq->vsi_id = ad->vsi_id;
575  rxq->queue_id = i;
576  rxq->max_pkt_size = 1518;
577  if (i < vec_len (ad->rxqs))
578  {
579  avf_rxq_t *q = vec_elt_at_index (ad->rxqs, i);
580  rxq->ring_len = q->size;
582  rxq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
583  avf_reg_write (ad, AVF_QRX_TAIL (i), q->size - 1);
584  }
585 
586  avf_txq_t *q = vec_elt_at_index (ad->txqs, i);
587  txq->vsi_id = ad->vsi_id;
588  if (i < vec_len (ad->txqs))
589  {
590  txq->queue_id = i;
591  txq->ring_len = q->size;
592  txq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
593  }
594  }
595 
596  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_VSI_QUEUES, msg, msg_len,
597  0, 0);
598 }
599 
600 clib_error_t *
602 {
603  int count = 1;
604  int msg_len = sizeof (virtchnl_irq_map_info_t) +
605  count * sizeof (virtchnl_vector_map_t);
606  u8 msg[msg_len];
608 
609  memset (msg, 0, msg_len);
610  imi = (virtchnl_irq_map_info_t *) msg;
611  imi->num_vectors = count;
612 
613  imi->vecmap[0].vector_id = 1;
614  imi->vecmap[0].vsi_id = ad->vsi_id;
615  imi->vecmap[0].rxq_map = 1;
616  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_IRQ_MAP, msg, msg_len, 0,
617  0);
618 }
619 
620 clib_error_t *
622 {
623  int msg_len =
624  sizeof (virtchnl_ether_addr_list_t) +
625  count * sizeof (virtchnl_ether_addr_t);
626  u8 msg[msg_len];
628  int i;
629 
630  memset (msg, 0, msg_len);
631  al = (virtchnl_ether_addr_list_t *) msg;
632  al->vsi_id = ad->vsi_id;
633  al->num_elements = count;
634  for (i = 0; i < count; i++)
635  clib_memcpy (&al->list[i].addr, macs + i * 6, 6);
636  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ADD_ETH_ADDR, msg, msg_len, 0,
637  0);
638 }
639 
640 clib_error_t *
642 {
643  virtchnl_queue_select_t qs = { 0 };
644  int i;
645  qs.vsi_id = ad->vsi_id;
646  qs.rx_queues = rx;
647  qs.tx_queues = tx;
648  for (i = 0; i < ad->n_rx_queues; i++)
649  {
650  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
651  avf_reg_write (ad, AVF_QRX_TAIL (i), rxq->n_enqueued);
652  }
653  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ENABLE_QUEUES, &qs,
654  sizeof (virtchnl_queue_select_t), 0, 0);
655 }
656 
657 clib_error_t *
660 {
661  virtchnl_queue_select_t qs = { 0 };
662  qs.vsi_id = ad->vsi_id;
663  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_STATS,
664  &qs, sizeof (virtchnl_queue_select_t),
665  es, sizeof (virtchnl_eth_stats_t));
666 }
667 
668 clib_error_t *
670 {
671  avf_aq_desc_t d = { 0 };
672  clib_error_t *error;
673  u32 rstat;
674  int n_retry = 20;
675 
676  d.opcode = 0x801;
677  d.v_opcode = VIRTCHNL_OP_RESET_VF;
678  if ((error = avf_aq_desc_enq (vm, ad, &d, 0, 0)))
679  return error;
680 
681 retry:
682  vlib_process_suspend (vm, 10e-3);
683  rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
684 
685  if (rstat == 2 || rstat == 3)
686  return 0;
687 
688  if (--n_retry == 0)
689  return clib_error_return (0, "reset failed (timeout)");
690 
691  goto retry;
692 }
693 
694 clib_error_t *
696 {
697  virtchnl_vf_res_request_t res_req = { 0 };
698  clib_error_t *error;
699  u32 rstat;
700  int n_retry = 20;
701 
702  res_req.num_queue_pairs = num_queue_pairs;
703 
704  error = avf_send_to_pf (vm, ad, VIRTCHNL_OP_REQUEST_QUEUES, &res_req,
705  sizeof (virtchnl_vf_res_request_t), &res_req,
706  sizeof (virtchnl_vf_res_request_t));
707 
708  /*
709  * if PF respondes, the request failed
710  * else PF initializes restart and avf_send_to_pf returns an error
711  */
712  if (!error)
713  {
714  return clib_error_return (0, "requested more than %u queue pairs",
715  res_req.num_queue_pairs);
716  }
717 
718 retry:
719  vlib_process_suspend (vm, 10e-3);
720  rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
721 
722  if ((rstat == VIRTCHNL_VFR_COMPLETED) || (rstat == VIRTCHNL_VFR_VFACTIVE))
723  goto done;
724 
725  if (--n_retry == 0)
726  return clib_error_return (0, "reset failed (timeout)");
727 
728  goto retry;
729 
730 done:
731  return NULL;
732 }
733 
734 clib_error_t *
736  avf_create_if_args_t * args)
737 {
738  virtchnl_version_info_t ver = { 0 };
739  virtchnl_vf_resource_t res = { 0 };
740  clib_error_t *error;
742  int i;
743 
744  avf_adminq_init (vm, ad);
745 
746  /* request more queues only if we need them */
747  if ((error = avf_request_queues (vm, ad, tm->n_vlib_mains)))
748  {
749  /* we failed to get more queues, but still we want to proceed */
750  clib_error_free (error);
751 
752  if ((error = avf_device_reset (vm, ad)))
753  return error;
754  }
755 
756  avf_adminq_init (vm, ad);
757 
758  /*
759  * OP_VERSION
760  */
761  if ((error = avf_op_version (vm, ad, &ver)))
762  return error;
763 
764  if (ver.major != VIRTCHNL_VERSION_MAJOR ||
766  return clib_error_return (0, "incompatible protocol version "
767  "(remote %d.%d)", ver.major, ver.minor);
768 
769  /*
770  * OP_GET_VF_RESOUCES
771  */
772  if ((error = avf_op_get_vf_resources (vm, ad, &res)))
773  return error;
774 
775  if (res.num_vsis != 1 || res.vsi_res[0].vsi_type != VIRTCHNL_VSI_SRIOV)
776  return clib_error_return (0, "unexpected GET_VF_RESOURCE reply received");
777 
778  ad->vsi_id = res.vsi_res[0].vsi_id;
781  ad->max_vectors = res.max_vectors;
782  ad->max_mtu = res.max_mtu;
783  ad->rss_key_size = res.rss_key_size;
784  ad->rss_lut_size = res.rss_lut_size;
785 
786  clib_memcpy (ad->hwaddr, res.vsi_res[0].default_mac_addr, 6);
787 
788  /*
789  * Disable VLAN stripping
790  */
791  if ((error = avf_op_disable_vlan_stripping (vm, ad)))
792  return error;
793 
794  if ((error = avf_config_promisc_mode (vm, ad)))
795  return error;
796 
797  /*
798  * Init Queues
799  */
800  if (args->rxq_num == 0)
801  {
802  args->rxq_num = 1;
803  }
804  else if (args->rxq_num > ad->num_queue_pairs)
805  {
806  args->rxq_num = ad->num_queue_pairs;
807  vlib_log_warn (am->log_class, "Requested more rx queues than"
808  "queue pairs available. Using %u rx queues.",
809  args->rxq_num);
810  }
811 
812  for (i = 0; i < args->rxq_num; i++)
813  if ((error = avf_rxq_init (vm, ad, i, args->rxq_size)))
814  return error;
815 
816  for (i = 0; i < tm->n_vlib_mains; i++)
817  if ((error = avf_txq_init (vm, ad, i, args->txq_size)))
818  return error;
819 
820  if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
821  (error = avf_op_config_rss_lut (vm, ad)))
822  return error;
823 
824  if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
825  (error = avf_op_config_rss_key (vm, ad)))
826  return error;
827 
828  if ((error = avf_op_config_vsi_queues (vm, ad)))
829  return error;
830 
831  if ((error = avf_op_config_irq_map (vm, ad)))
832  return error;
833 
834  avf_irq_0_enable (ad);
835  for (i = 0; i < ad->n_rx_queues; i++)
836  avf_irq_n_enable (ad, i);
837 
838  if ((error = avf_op_add_eth_addr (vm, ad, 1, ad->hwaddr)))
839  return error;
840 
841  if ((error = avf_op_enable_queues (vm, ad, ad->n_rx_queues, 0)))
842  return error;
843 
844  if ((error = avf_op_enable_queues (vm, ad, 0, ad->n_tx_queues)))
845  return error;
846 
847  ad->flags |= AVF_DEVICE_F_INITIALIZED;
848  return error;
849 }
850 
851 void
853 {
854  avf_main_t *am = &avf_main;
855  vnet_main_t *vnm = vnet_get_main ();
857  u32 r;
858 
859  if (ad->flags & AVF_DEVICE_F_ERROR)
860  return;
861 
862  if ((ad->flags & AVF_DEVICE_F_INITIALIZED) == 0)
863  return;
864 
865  ASSERT (ad->error == 0);
866 
867  /* do not process device in reset state */
868  r = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
869  if (r != VIRTCHNL_VFR_VFACTIVE)
870  return;
871 
872  r = avf_get_u32 (ad->bar0, AVF_ARQLEN);
873  if ((r & 0xf0000000) != (1ULL << 31))
874  {
875  ad->error = clib_error_return (0, "arq not enabled, arqlen = 0x%x", r);
876  goto error;
877  }
878 
879  r = avf_get_u32 (ad->bar0, AVF_ATQLEN);
880  if ((r & 0xf0000000) != (1ULL << 31))
881  {
882  ad->error = clib_error_return (0, "atq not enabled, atqlen = 0x%x", r);
883  goto error;
884  }
885 
886  if (is_irq == 0)
887  avf_op_get_stats (vm, ad, &ad->eth_stats);
888 
889  /* *INDENT-OFF* */
890  vec_foreach (e, ad->events)
891  {
893  {
894  int link_up = e->event_data.link_event.link_status;
895  virtchnl_link_speed_t speed = e->event_data.link_event.link_speed;
896  u32 flags = 0;
897 
898  if (link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) == 0)
899  {
900  ad->flags |= AVF_DEVICE_F_LINK_UP;
903  if (speed == VIRTCHNL_LINK_SPEED_40GB)
905  else if (speed == VIRTCHNL_LINK_SPEED_25GB)
907  else if (speed == VIRTCHNL_LINK_SPEED_10GB)
909  else if (speed == VIRTCHNL_LINK_SPEED_1GB)
911  else if (speed == VIRTCHNL_LINK_SPEED_100MB)
913  vnet_hw_interface_set_flags (vnm, ad->hw_if_index, flags);
914  ad->link_speed = speed;
915  }
916  else if (!link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) != 0)
917  {
918  ad->flags &= ~AVF_DEVICE_F_LINK_UP;
919  ad->link_speed = 0;
920  }
921 
922  if (ad->flags & AVF_DEVICE_F_ELOG)
923  {
924  ELOG_TYPE_DECLARE (el) =
925  {
926  .format = "avf[%d] link change: link_status %d "
927  "link_speed %d",
928  .format_args = "i4i1i1",
929  };
930  struct
931  {
932  u32 dev_instance;
933  u8 link_status;
934  u8 link_speed;
935  } *ed;
936  ed = ELOG_DATA (&vm->elog_main, el);
937  ed->dev_instance = ad->dev_instance;
938  ed->link_status = link_up;
939  ed->link_speed = speed;
940  }
941  }
942  else
943  {
944  if (ad->flags & AVF_DEVICE_F_ELOG)
945  {
946  ELOG_TYPE_DECLARE (el) =
947  {
948  .format = "avf[%d] unknown event: event %d severity %d",
949  .format_args = "i4i4i1i1",
950  };
951  struct
952  {
953  u32 dev_instance;
954  u32 event;
955  u32 severity;
956  } *ed;
957  ed = ELOG_DATA (&vm->elog_main, el);
958  ed->dev_instance = ad->dev_instance;
959  ed->event = e->event;
960  ed->severity = e->severity;
961  }
962  }
963  }
964  /* *INDENT-ON* */
965  vec_reset_length (ad->events);
966 
967  return;
968 
969 error:
970  ad->flags |= AVF_DEVICE_F_ERROR;
971  ASSERT (ad->error != 0);
972  vlib_log_err (am->log_class, "%U", format_clib_error, ad->error);
973 }
974 
975 static u32
977 {
978  avf_main_t *am = &avf_main;
979  vlib_log_warn (am->log_class, "TODO");
980  return 0;
981 }
982 
983 static uword
985 {
986  avf_main_t *am = &avf_main;
987  avf_device_t *ad;
988  uword *event_data = 0, event_type;
989  int enabled = 0, irq;
990  f64 last_run_duration = 0;
991  f64 last_periodic_time = 0;
992 
993  while (1)
994  {
995  if (enabled)
996  vlib_process_wait_for_event_or_clock (vm, 5.0 - last_run_duration);
997  else
999 
1000  event_type = vlib_process_get_events (vm, &event_data);
1001  vec_reset_length (event_data);
1002  irq = 0;
1003 
1004  switch (event_type)
1005  {
1006  case ~0:
1007  last_periodic_time = vlib_time_now (vm);
1008  break;
1010  enabled = 1;
1011  break;
1013  enabled = 0;
1014  continue;
1016  irq = 1;
1017  break;
1018  default:
1019  ASSERT (0);
1020  }
1021 
1022  /* *INDENT-OFF* */
1023  pool_foreach (ad, am->devices,
1024  {
1025  avf_process_one_device (vm, ad, irq);
1026  });
1027  /* *INDENT-ON* */
1028  last_run_duration = vlib_time_now (vm) - last_periodic_time;
1029  }
1030  return 0;
1031 }
1032 
1033 /* *INDENT-OFF* */
1035  .function = avf_process,
1036  .type = VLIB_NODE_TYPE_PROCESS,
1037  .name = "avf-process",
1038 };
1039 /* *INDENT-ON* */
1040 
1041 static void
1043 {
1045  avf_main_t *am = &avf_main;
1047  avf_device_t *ad = pool_elt_at_index (am->devices, pd);
1048  u32 icr0;
1049 
1050  icr0 = avf_reg_read (ad, AVFINT_ICR0);
1051 
1052  if (ad->flags & AVF_DEVICE_F_ELOG)
1053  {
1054  /* *INDENT-OFF* */
1055  ELOG_TYPE_DECLARE (el) =
1056  {
1057  .format = "avf[%d] irq 0: icr0 0x%x",
1058  .format_args = "i4i4",
1059  };
1060  /* *INDENT-ON* */
1061  struct
1062  {
1063  u32 dev_instance;
1064  u32 icr0;
1065  } *ed;
1066 
1067  ed = ELOG_DATA (&vm->elog_main, el);
1068  ed->dev_instance = ad->dev_instance;
1069  ed->icr0 = icr0;
1070  }
1071 
1072  avf_irq_0_enable (ad);
1073 
1074  /* bit 30 - Send/Receive Admin queue interrupt indication */
1075  if (icr0 & (1 << 30))
1078 }
1079 
1080 static void
1082 {
1083  vnet_main_t *vnm = vnet_get_main ();
1085  avf_main_t *am = &avf_main;
1087  avf_device_t *ad = pool_elt_at_index (am->devices, pd);
1088  u16 qid;
1089  int i;
1090 
1091  if (ad->flags & AVF_DEVICE_F_ELOG)
1092  {
1093  /* *INDENT-OFF* */
1094  ELOG_TYPE_DECLARE (el) =
1095  {
1096  .format = "avf[%d] irq %d: received",
1097  .format_args = "i4i2",
1098  };
1099  /* *INDENT-ON* */
1100  struct
1101  {
1102  u32 dev_instance;
1103  u16 line;
1104  } *ed;
1105 
1106  ed = ELOG_DATA (&vm->elog_main, el);
1107  ed->dev_instance = ad->dev_instance;
1108  ed->line = line;
1109  }
1110 
1111  qid = line - 1;
1112  if (vec_len (ad->rxqs) > qid && ad->rxqs[qid].int_mode != 0)
1114  for (i = 0; i < vec_len (ad->rxqs); i++)
1115  avf_irq_n_enable (ad, i);
1116 }
1117 
1118 void
1120 {
1121  vnet_main_t *vnm = vnet_get_main ();
1122  avf_main_t *am = &avf_main;
1123  int i;
1124 
1125  if (ad->hw_if_index)
1126  {
1130  }
1131 
1133 
1134  vlib_physmem_free (vm, am->physmem_region, ad->atq);
1135  vlib_physmem_free (vm, am->physmem_region, ad->arq);
1136  vlib_physmem_free (vm, am->physmem_region, ad->atq_bufs);
1137  vlib_physmem_free (vm, am->physmem_region, ad->arq_bufs);
1138 
1139  /* *INDENT-OFF* */
1140  vec_foreach_index (i, ad->rxqs)
1141  {
1142  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
1143  vlib_physmem_free (vm, am->physmem_region, (void *) rxq->descs);
1144  if (rxq->n_enqueued)
1145  vlib_buffer_free_from_ring (vm, rxq->bufs, rxq->next, rxq->size,
1146  rxq->n_enqueued);
1147  vec_free (rxq->bufs);
1148  }
1149  /* *INDENT-ON* */
1150  vec_free (ad->rxqs);
1151 
1152  /* *INDENT-OFF* */
1153  vec_foreach_index (i, ad->txqs)
1154  {
1155  avf_txq_t *txq = vec_elt_at_index (ad->txqs, i);
1156  vlib_physmem_free (vm, am->physmem_region, (void *) txq->descs);
1157  if (txq->n_enqueued)
1158  {
1159  u16 first = (txq->next - txq->n_enqueued) & (txq->size -1);
1160  vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
1161  txq->n_enqueued);
1162  }
1163  vec_free (txq->bufs);
1164  }
1165  /* *INDENT-ON* */
1166  vec_free (ad->txqs);
1167 
1168  clib_error_free (ad->error);
1169  memset (ad, 0, sizeof (*ad));
1170  pool_put (am->devices, ad);
1171 }
1172 
1173 void
1175 {
1176  vnet_main_t *vnm = vnet_get_main ();
1177  avf_main_t *am = &avf_main;
1178  avf_device_t *ad;
1180  clib_error_t *error = 0;
1181  int i;
1182 
1183  /* check input args */
1184  args->rxq_size = (args->rxq_size == 0) ? AVF_RXQ_SZ : args->rxq_size;
1185  args->txq_size = (args->txq_size == 0) ? AVF_TXQ_SZ : args->txq_size;
1186 
1187  if ((args->rxq_size & (args->rxq_size - 1))
1188  || (args->txq_size & (args->txq_size - 1)))
1189  {
1190  args->rv = VNET_API_ERROR_INVALID_VALUE;
1191  args->error =
1192  clib_error_return (error, "queue size must be a power of two");
1193  return;
1194  }
1195 
1196  pool_get (am->devices, ad);
1197  ad->dev_instance = ad - am->devices;
1198  ad->per_interface_next_index = ~0;
1199 
1200  if (args->enable_elog)
1201  ad->flags |= AVF_DEVICE_F_ELOG;
1202 
1203  if ((error = vlib_pci_device_open (&args->addr, avf_pci_device_ids, &h)))
1204  {
1205  pool_put (am->devices, ad);
1206  args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1207  args->error =
1208  clib_error_return (error, "pci-addr %U", format_vlib_pci_addr,
1209  &args->addr);
1210  return;
1211  }
1212  ad->pci_dev_handle = h;
1213 
1215 
1216  if ((error = vlib_pci_bus_master_enable (h)))
1217  goto error;
1218 
1219  if ((error = vlib_pci_map_region (h, 0, &ad->bar0)))
1220  goto error;
1221 
1222  if ((error = vlib_pci_register_msix_handler (h, 0, 1, &avf_irq_0_handler)))
1223  goto error;
1224 
1225  if ((error = vlib_pci_register_msix_handler (h, 1, 1, &avf_irq_n_handler)))
1226  goto error;
1227 
1228  if ((error = vlib_pci_enable_msix_irq (h, 0, 2)))
1229  goto error;
1230 
1231  if (am->physmem_region_alloc == 0)
1232  {
1234  error = vlib_physmem_region_alloc (vm, "avf descriptors", 4 << 20, 0,
1235  flags, &am->physmem_region);
1236  if (error)
1237  goto error;
1238  am->physmem_region_alloc = 1;
1239  }
1240  ad->atq = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
1241  sizeof (avf_aq_desc_t) * AVF_MBOX_LEN,
1242  64);
1243  if (error)
1244  goto error;
1245 
1246  ad->arq = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
1247  sizeof (avf_aq_desc_t) * AVF_MBOX_LEN,
1248  64);
1249  if (error)
1250  goto error;
1251 
1252  ad->atq_bufs = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
1253  AVF_MBOX_BUF_SZ * AVF_MBOX_LEN,
1254  64);
1255  if (error)
1256  goto error;
1257 
1258  ad->arq_bufs = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
1259  AVF_MBOX_BUF_SZ * AVF_MBOX_LEN,
1260  64);
1261  if (error)
1262  goto error;
1263 
1264  if ((error = vlib_pci_intr_enable (h)))
1265  goto error;
1266 
1267  /* FIXME detect */
1268  ad->flags |= AVF_DEVICE_F_IOVA;
1269 
1270  if ((error = avf_device_init (vm, am, ad, args)))
1271  goto error;
1272 
1273  /* create interface */
1274  error = ethernet_register_interface (vnm, avf_device_class.index,
1275  ad->dev_instance, ad->hwaddr,
1277 
1278  if (error)
1279  goto error;
1280 
1282  args->sw_if_index = ad->sw_if_index = sw->sw_if_index;
1283 
1287  avf_input_node.index);
1288 
1289  for (i = 0; i < ad->n_rx_queues; i++)
1291 
1292  if (pool_elts (am->devices) == 1)
1295 
1296  return;
1297 
1298 error:
1299  avf_delete_if (vm, ad);
1300  args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1301  args->error = clib_error_return (error, "pci-addr %U",
1302  format_vlib_pci_addr, &args->addr);
1303  vlib_log_err (am->log_class, "%U", format_clib_error, args->error);
1304 }
1305 
1306 static clib_error_t *
1308 {
1309  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1310  avf_main_t *am = &avf_main;
1312  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1313 
1314  if (ad->flags & AVF_DEVICE_F_ERROR)
1315  return clib_error_return (0, "device is in error state");
1316 
1317  if (is_up)
1318  {
1321  ad->flags |= AVF_DEVICE_F_ADMIN_UP;
1322  }
1323  else
1324  {
1326  ad->flags &= ~AVF_DEVICE_F_ADMIN_UP;
1327  }
1328  return 0;
1329 }
1330 
1331 static clib_error_t *
1334 {
1335  avf_main_t *am = &avf_main;
1336  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1338  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
1339 
1341  rxq->int_mode = 0;
1342  else
1343  rxq->int_mode = 1;
1344 
1345  return 0;
1346 }
1347 
1348 static void
1350  u32 node_index)
1351 {
1352  avf_main_t *am = &avf_main;
1353  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1355 
1356  /* Shut off redirection */
1357  if (node_index == ~0)
1358  {
1359  ad->per_interface_next_index = node_index;
1360  return;
1361  }
1362 
1364  vlib_node_add_next (vlib_get_main (), avf_input_node.index, node_index);
1365 }
1366 
1367 static char *avf_tx_func_error_strings[] = {
1368 #define _(n,s) s,
1370 #undef _
1371 };
1372 
1373 /* *INDENT-OFF* */
1375 {
1376  .name = "Adaptive Virtual Function (AVF) interface",
1377  .format_device = format_avf_device,
1378  .format_device_name = format_avf_device_name,
1379  .admin_up_down_function = avf_interface_admin_up_down,
1380  .rx_mode_change_function = avf_interface_rx_mode_change,
1381  .rx_redirect_to_node = avf_set_interface_next_node,
1382  .tx_function_n_errors = AVF_TX_N_ERROR,
1383  .tx_function_error_strings = avf_tx_func_error_strings,
1384 };
1385 /* *INDENT-ON* */
1386 
1387 clib_error_t *
1389 {
1390  avf_main_t *am = &avf_main;
1391  clib_error_t *error;
1393  int i;
1394 
1395  if ((error = vlib_call_init_function (vm, pci_bus_init)))
1396  return error;
1397 
1400 
1401  /* initialize ptype based loopup table */
1403 
1404  /* *INDENT-OFF* */
1405  vec_foreach_index (i, am->ptypes)
1406  {
1407  avf_ptype_t *p = vec_elt_at_index (am->ptypes, i);
1408  if ((i >= 22) && (i <= 87))
1409  {
1411  p->flags = VNET_BUFFER_F_IS_IP4;
1412  }
1413  else if ((i >= 88) && (i <= 153))
1414  {
1416  p->flags = VNET_BUFFER_F_IS_IP6;
1417  }
1418  else
1421  p->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1422  }
1423  /* *INDENT-ON* */
1424 
1425  am->log_class = vlib_log_register_class ("avf_plugin", 0);
1426  vlib_log_debug (am->log_class, "initialized");
1427 
1428  return 0;
1429 }
1430 
1432 
1433 /*
1434  * fd.io coding-style-patch-verification: ON
1435  *
1436  * Local Variables:
1437  * eval: (c-set-style "gnu")
1438  * End:
1439  */
static void avf_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:1349
vlib_log_class_t vlib_log_register_class(char *class, char *subclass)
Definition: log.c:227
u8 next_node
Definition: avf.h:178
vmrglw vmrglh hi
u8 int_mode
Definition: avf.h:88
format_function_t format_vlib_pci_addr
Definition: pci.h:288
#define vec_foreach_index(var, v)
Iterate over vector indices.
#define AVF_ARQLEN
Definition: virtchnl.h:37
virtchnl_queue_pair_info_t qpair[1]
Definition: virtchnl.h:280
u32 hw_if_index
Definition: avf.h:111
u8 * format_clib_error(u8 *s, va_list *va)
Definition: error.c:191
#define AVF_ATQH
Definition: virtchnl.h:30
#define vlib_log_warn(...)
Definition: log.h:51
static clib_error_t * avf_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
Definition: device.c:1332
#define clib_min(x, y)
Definition: clib.h:291
#define VLIB_PHYSMEM_F_INIT_MHEAP
Definition: physmem.h:57
#define VNET_HW_INTERFACE_FLAG_SPEED_1G
Definition: interface.h:507
static uword random_default_seed(void)
Default random seed (unix/linux user-mode)
Definition: random.h:91
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: interface.c:469
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:699
clib_error_t * avf_init(vlib_main_t *vm)
Definition: device.c:1388
static uword * vlib_process_wait_for_event(vlib_main_t *vm)
Definition: node_funcs.h:619
clib_error_t * avf_send_to_pf(vlib_main_t *vm, avf_device_t *ad, virtchnl_ops_t op, void *in, int in_len, void *out, int out_len)
Definition: device.c:355
#define VLIB_PHYSMEM_F_HUGETLB
Definition: physmem.h:58
void ethernet_delete_interface(vnet_main_t *vnm, u32 hw_if_index)
Definition: interface.c:323
avf_ptype_t * ptypes
Definition: avf.h:197
#define AVF_ATQBAH
Definition: virtchnl.h:35
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
void avf_arq_slot_init(avf_device_t *ad, u16 slot)
Definition: device.c:296
clib_error_t * error
Definition: avf.h:146
u64 atq_bufs_pa
Definition: avf.h:126
virtchnl_vsi_type_t vsi_type
Definition: virtchnl.h:146
unsigned long u64
Definition: types.h:89
virtchnl_vector_map_t vecmap[1]
Definition: virtchnl.h:308
#define NULL
Definition: clib.h:57
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:227
virtchnl_link_speed_t link_speed
Definition: avf.h:140
#define AVF_ARQBAH
Definition: virtchnl.h:29
static void avf_adminq_init(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:318
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
#define AVF_QRX_TAIL(q)
Definition: virtchnl.h:41
#define AVF_ARQT
Definition: virtchnl.h:33
static clib_error_t * vlib_physmem_region_alloc(vlib_main_t *vm, char *name, u32 size, u8 numa_node, u32 flags, vlib_physmem_region_index_t *idx)
format_function_t format_avf_device
Definition: avf.h:222
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:562
int i
vlib_pci_addr_t addr
Definition: avf.h:204
#define AVF_AQ_F_SI
Definition: virtchnl.h:51
u32 dev_instance
Definition: avf.h:109
#define AVF_QTX_TAIL(q)
Definition: virtchnl.h:40
virtchnl_link_speed_t
Definition: virtchnl.h:179
#define VNET_HW_INTERFACE_FLAG_LINK_UP
Definition: interface.h:494
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:448
static void * vlib_physmem_alloc_aligned(vlib_main_t *vm, vlib_physmem_region_index_t idx, clib_error_t **error, uword n_bytes, uword alignment)
Definition: physmem_funcs.h:97
avf_device_t * devices
Definition: avf.h:189
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:228
static u64 vlib_get_buffer_data_physical_address(vlib_main_t *vm, u32 buffer_index)
Definition: buffer_funcs.h:319
volatile u32 * qtx_tail
Definition: avf.h:94
clib_error_t * avf_op_config_irq_map(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:601
static vlib_node_registration_t avf_process_node
(constructor) VLIB_REGISTER_NODE (avf_process_node)
Definition: device.c:1034
#define AVF_ATQLEN
Definition: virtchnl.h:31
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1118
unsigned char u8
Definition: types.h:56
vnet_device_class_t avf_device_class
#define AVF_ARQH
Definition: virtchnl.h:34
clib_error_t * avf_device_reset(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:669
clib_error_t * vlib_pci_register_msix_handler(vlib_pci_dev_handle_t h, u32 start, u32 count, pci_msix_handler_function_t *msix_handler)
Definition: pci.c:766
clib_error_t * avf_op_disable_vlan_stripping(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:536
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
virtchnl_ops_t
Definition: virtchnl.h:88
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
Definition: node_funcs.h:448
static uword avf_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
Definition: device.c:984
#define AVF_AQ_F_DD
Definition: virtchnl.h:43
vnet_hw_interface_rx_mode
Definition: interface.h:51
memset(h->entries, 0, sizeof(h->entries[0])*entries)
#define AVFINT_ICR0_ENA1
Definition: virtchnl.h:27
clib_error_t * avf_request_queues(vlib_main_t *vm, avf_device_t *ad, u16 num_queue_pairs)
Definition: device.c:695
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:443
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:163
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:542
clib_spinlock_t lock
Definition: avf.h:97
#define PCI_DEVICE_ID_INTEL_AVF
Definition: device.c:32
static u32 avf_reg_read(avf_device_t *ad, u32 addr)
Definition: avf.h:273
clib_error_t * avf_txq_init(vlib_main_t *vm, avf_device_t *ad, u16 qid, u16 txq_size)
Definition: device.c:259
static_always_inline void vnet_device_input_set_interrupt_pending(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
Definition: devices.h:136
#define AVF_MBOX_BUF_SZ
Definition: device.c:26
volatile u32 * qrx_tail
Definition: avf.h:82
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
#define VNET_HW_INTERFACE_FLAG_SPEED_25G
Definition: interface.h:512
clib_error_t * vlib_pci_device_open(vlib_pci_addr_t *addr, pci_device_id_t ids[], vlib_pci_dev_handle_t *handle)
Definition: pci.c:1046
clib_error_t * avf_config_promisc_mode(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:543
#define VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES
Definition: buffer.h:441
int physmem_region_alloc
Definition: avf.h:192
unsigned int u32
Definition: types.h:88
vlib_pci_dev_handle_t pci_dev_handle
Definition: avf.h:112
#define vlib_call_init_function(vm, x)
Definition: init.h:260
virtchnl_ether_addr_t list[1]
Definition: virtchnl.h:323
#define vlib_log_debug(...)
Definition: log.h:54
void * arq_bufs
Definition: avf.h:125
avf_main_t avf_main
Definition: device.c:36
avf_aq_desc_t * arq
Definition: avf.h:123
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:57
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
Definition: buffer_funcs.h:602
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
void avf_process_one_device(vlib_main_t *vm, avf_device_t *ad, int is_irq)
Definition: device.c:852
static void avf_irq_n_handler(vlib_pci_dev_handle_t h, u16 line)
Definition: device.c:1081
const u32 device_input_next_node_advance[((VNET_DEVICE_INPUT_N_NEXT_NODES/CLIB_CACHE_LINE_BYTES)+1)*CLIB_CACHE_LINE_BYTES]
Definition: devices.c:47
i8 buffer_advance
Definition: avf.h:179
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:464
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:960
clib_error_t * avf_device_init(vlib_main_t *vm, avf_main_t *am, avf_device_t *ad, avf_create_if_args_t *args)
Definition: device.c:735
static u32 avf_get_u32(void *start, int offset)
Definition: avf.h:227
virtchnl_txq_info_t txq
Definition: virtchnl.h:271
#define AVF_ATQT
Definition: virtchnl.h:38
unsigned short u16
Definition: types.h:57
u32 vlib_pci_dev_handle_t
Definition: pci.h:97
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:274
u64 qword[4]
Definition: avf.h:57
#define ELOG_DATA(em, f)
Definition: elog.h:481
#define AVF_AQ_F_RD
Definition: virtchnl.h:48
#define VIRTCHNL_VERSION_MAJOR
Definition: virtchnl.h:21
clib_error_t * avf_op_enable_queues(vlib_main_t *vm, avf_device_t *ad, u32 rx, u32 tx)
Definition: device.c:641
#define AVF_ITR_INT
Definition: device.c:29
#define VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE
Definition: interface.h:532
static void avf_reg_flush(avf_device_t *ad)
Definition: avf.h:279
#define AVF_RXQ_SZ
Definition: device.c:27
static char * avf_tx_func_error_strings[]
Definition: device.c:1367
#define AVF_MBOX_LEN
Definition: device.c:25
#define AVFINT_ICR0
Definition: virtchnl.h:26
static clib_error_t * vlib_pci_bus_master_enable(vlib_pci_dev_handle_t h)
Definition: pci.h:244
void avf_delete_if(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:1119
u32 flags
Definition: vhost_user.h:115
static uword random_uword(u32 *seed)
machine word size random number generator
Definition: random.h:135
u16 n_rx_queues
Definition: avf.h:119
u8 hwaddr[6]
Definition: avf.h:134
u16 atq_next_slot
Definition: avf.h:128
static u32 avf_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
Definition: device.c:976
static void avf_irq_0_enable(avf_device_t *ad)
Definition: device.c:58
#define AVF_AQ_F_BUF
Definition: virtchnl.h:50
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:155
vlib_main_t * vm
Definition: buffer.c:294
vlib_node_registration_t avf_input_node
(constructor) VLIB_REGISTER_NODE (avf_input_node)
Definition: input.c:534
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:339
clib_error_t * pci_bus_init(vlib_main_t *vm)
Definition: pci.c:251
static void avf_irq_0_disable(avf_device_t *ad)
Definition: device.c:46
Definition: avf.h:79
clib_error_t * avf_op_get_stats(vlib_main_t *vm, avf_device_t *ad, virtchnl_eth_stats_t *es)
Definition: device.c:658
u32 flags
Definition: avf.h:180
#define clib_memcpy(a, b, c)
Definition: string.h:75
vlib_log_class_t log_class
Definition: avf.h:194
elog_main_t elog_main
Definition: main.h:157
avf_tx_desc_t * descs
Definition: avf.h:98
u8 * format_hexdump(u8 *s, va_list *va)
Definition: std-formats.c:281
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:439
clib_error_t * avf_aq_desc_enq(vlib_main_t *vm, avf_device_t *ad, avf_aq_desc_t *dt, void *data, int len)
Definition: device.c:100
virtchnl_ops_t v_opcode
Definition: virtchnl.h:218
#define AVFINT_DYN_CTL0
Definition: virtchnl.h:28
#define VNET_HW_INTERFACE_FLAG_SPEED_10G
Definition: interface.h:510
u16 vsi_id
Definition: avf.h:132
u32 per_interface_next_index
Definition: avf.h:107
clib_error_t * avf_op_add_eth_addr(vlib_main_t *vm, avf_device_t *ad, u8 count, u8 *macs)
Definition: device.c:621
static clib_error_t * avf_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: device.c:1307
u32 feature_bitmap
Definition: avf.h:133
virtchnl_status_code_t v_retval
Definition: virtchnl.h:223
u32 * bufs
Definition: avf.h:99
#define VNET_HW_INTERFACE_FLAG_FULL_DUPLEX
Definition: interface.h:498
void vlib_pci_device_close(vlib_pci_dev_handle_t h)
Definition: pci.c:1091
#define ASSERT(truth)
avf_aq_desc_t * atq
Definition: avf.h:122
void vnet_hw_interface_assign_rx_thread(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id, uword thread_index)
Definition: devices.c:138
u32 flags
Definition: avf.h:106
#define PCI_DEVICE_ID_INTEL_X722_VF
Definition: device.c:34
#define AVFINT_DYN_CTLN(x)
Definition: virtchnl.h:25
Definition: avf.h:91
u32 * bufs
Definition: avf.h:86
clib_error_t * avf_op_config_rss_lut(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:497
static void avf_irq_n_enable(avf_device_t *ad, u8 line)
Definition: device.c:85
void * bar0
Definition: avf.h:113
#define PCI_DEVICE_ID_INTEL_X710_VF
Definition: device.c:33
static void vlib_physmem_free(vlib_main_t *vm, vlib_physmem_region_index_t idx, void *mem)
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
Definition: interface.c:277
u16 n_enqueued
Definition: avf.h:100
u16 n_enqueued
Definition: avf.h:87
VNET_DEVICE_CLASS(bond_dev_class)
virtchnl_pf_event_t * events
Definition: avf.h:130
size_t count
Definition: vapi.c:46
virtchnl_event_codes_t event
Definition: virtchnl.h:189
static void avf_reg_write(avf_device_t *ad, u32 addr, u32 val)
Definition: avf.h:267
clib_error_t * avf_op_version(vlib_main_t *vm, avf_device_t *ad, virtchnl_version_info_t *ver)
Definition: device.c:465
static uword pointer_to_uword(const void *p)
Definition: types.h:131
static uword avf_dma_addr(vlib_main_t *vm, avf_device_t *ad, void *p)
Definition: device.c:309
#define clib_max(x, y)
Definition: clib.h:284
virtchnl_eth_stats_t eth_stats
Definition: avf.h:143
void * atq_bufs
Definition: avf.h:124
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
#define AVFGEN_RSTAT
Definition: virtchnl.h:39
u16 num_queue_pairs
Definition: avf.h:135
u16 next
Definition: avf.h:95
static void avf_irq_0_handler(vlib_pci_dev_handle_t h, u16 line)
Definition: device.c:1042
virtchnl_rxq_info_t rxq
Definition: virtchnl.h:272
#define AVF_ARQBAL
Definition: virtchnl.h:32
u32 rss_lut_size
Definition: avf.h:139
u16 n_tx_queues
Definition: avf.h:118
#define VNET_HW_INTERFACE_FLAG_SPEED_100M
Definition: interface.h:506
#define AVF_AQ_F_CMP
Definition: virtchnl.h:44
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
format_function_t format_avf_device_name
Definition: avf.h:223
uword vlib_pci_get_private_data(vlib_pci_dev_handle_t h)
Definition: pci.c:134
#define foreach_virtchnl_op
Definition: virtchnl.h:56
#define foreach_avf_tx_func_error
Definition: avf.h:292
static u64 vlib_physmem_virtual_to_physical(vlib_main_t *vm, vlib_physmem_region_index_t idx, void *mem)
u64 uword
Definition: types.h:112
clib_error_t * avf_rxq_init(vlib_main_t *vm, avf_device_t *ad, u16 qid, u16 rxq_size)
Definition: device.c:216
u16 size
Definition: avf.h:84
void vlib_pci_set_private_data(vlib_pci_dev_handle_t h, uword private_data)
Definition: pci.c:141
u16 arq_next_slot
Definition: avf.h:129
#define clib_error_free(e)
Definition: error.h:86
clib_error_t * vlib_pci_map_region(vlib_pci_dev_handle_t h, u32 resource, void **result)
Definition: pci.c:1033
avf_rxq_t * rxqs
Definition: avf.h:116
virtchnl_vsi_resource_t vsi_res[1]
Definition: virtchnl.h:160
int vnet_hw_interface_unassign_rx_thread(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
Definition: devices.c:187
#define AVF_TXQ_SZ
Definition: device.c:28
struct virtchnl_pf_event_t::@404::@405 link_event
clib_error_t * vlib_pci_enable_msix_irq(vlib_pci_dev_handle_t h, u16 start, u16 count)
Definition: pci.c:822
clib_error_t * error
Definition: avf.h:212
static clib_error_t * vlib_pci_intr_enable(vlib_pci_dev_handle_t h)
Definition: pci.h:212
#define VNET_HW_INTERFACE_FLAG_SPEED_40G
Definition: interface.h:513
avf_per_thread_data_t * per_thread_data
Definition: avf.h:190
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
u16 size
Definition: avf.h:96
u8 data[0]
Packet data.
Definition: buffer.h:175
u32 sw_if_index
Definition: avf.h:110
#define vec_foreach(var, vec)
Vector iterator.
#define vlib_log_err(...)
Definition: log.h:50
u64 arq_bufs_pa
Definition: avf.h:127
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:111
void avf_create_if(vlib_main_t *vm, avf_create_if_args_t *args)
Definition: device.c:1174
#define AVF_ATQBAL
Definition: virtchnl.h:36
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
#define VIRTCHNL_VERSION_MINOR
Definition: virtchnl.h:22
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:503
static void avf_irq_n_disable(avf_device_t *ad, u8 line)
Definition: device.c:76
#define PCI_VENDOR_ID_INTEL
Definition: device.c:31
clib_error_t * avf_cmd_rx_ctl_reg_write(vlib_main_t *vm, avf_device_t *ad, u32 reg, u32 val)
Definition: device.c:185
vlib_physmem_region_index_t physmem_region
Definition: avf.h:191
u16 next
Definition: avf.h:83
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:58
static void vnet_hw_interface_set_input_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: devices.h:79
clib_error_t * avf_op_config_rss_key(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:516
avf_txq_t * txqs
Definition: avf.h:117
avf_rx_desc_t * descs
Definition: avf.h:85
clib_error_t * avf_op_config_vsi_queues(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:555
u16 vendor_id
Definition: pci.h:120
union virtchnl_pf_event_t::@404 event_data
#define AVF_AQ_F_ERR
Definition: virtchnl.h:45
u16 max_vectors
Definition: avf.h:136
clib_error_t * avf_op_get_vf_resources(vlib_main_t *vm, avf_device_t *ad, virtchnl_vf_resource_t *res)
Definition: device.c:485
u32 rss_key_size
Definition: avf.h:138
u16 max_mtu
Definition: avf.h:137
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128