21 #include <sys/ioctl.h> 22 #include <sys/socket.h> 25 #include <sys/types.h> 27 #include <netinet/in.h> 30 #include <linux/if_arp.h> 31 #include <linux/if_tun.h> 52 #define VHOST_DEBUG_VQ 0 54 #define DBG_SOCK(args...) \ 56 vhost_user_main_t *_vum = &vhost_user_main; \ 61 #if VHOST_DEBUG_VQ == 1 62 #define DBG_VQ(args...) clib_warning(args); 64 #define DBG_VQ(args...) 72 #define VHOST_USER_DOWN_DISCARD_COUNT 256 78 #define VHOST_USER_RX_BUFFER_STARVATION 32 88 #define VHOST_USER_RX_COPY_THRESHOLD 64 98 #define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 40) 100 #define UNIX_GET_FD(unixfd_idx) \ 101 (unixfd_idx != ~0) ? \ 102 pool_elt_at_index (unix_main.file_pool, \ 103 unixfd_idx)->file_descriptor : -1; 105 #define foreach_virtio_trace_flags \ 106 _ (SIMPLE_CHAINED, 0, "Simple descriptor chaining") \ 107 _ (SINGLE_DESC, 1, "Single descriptor packet") \ 108 _ (INDIRECT, 2, "Indirect descriptor") \ 109 _ (MAP_ERROR, 4, "Memory mapping error") 113 #define _(n,i,s) VIRTIO_TRACE_F_##n, 120 #define foreach_vhost_user_tx_func_error \ 121 _(NONE, "no error") \ 122 _(NOT_READY, "vhost vring not ready") \ 123 _(DOWN, "vhost interface is down") \ 124 _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \ 125 _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \ 126 _(MMAP_FAIL, "mmap failure") \ 127 _(INDIRECT_OVERFLOW, "indirect descriptor table overflow") 131 #define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f, 143 #define foreach_vhost_user_input_func_error \ 144 _(NO_ERROR, "no error") \ 145 _(NO_BUFFER, "no available buffer") \ 146 _(MMAP_FAIL, "mmap failure") \ 147 _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \ 148 _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \ 149 _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)") 153 #define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f, 171 .name =
"vhost-user",
179 u32 show_dev_instance = ~0;
185 if (show_dev_instance != ~0)
186 i = show_dev_instance;
188 s =
format (s,
"VirtualEthernet0/0/%d", i);
203 DBG_SOCK (
"renumbered vhost-user interface dev_instance %d to %d",
214 ((vui->
regions[i].guest_phys_addr +
215 vui->
regions[i].memory_size) > addr)))
221 __m128i rl, rh, al, ah, r;
222 al = _mm_set1_epi64x (addr + 1);
223 ah = _mm_set1_epi64x (addr);
226 rl = _mm_cmpgt_epi64 (al, rl);
228 rh = _mm_cmpgt_epi64 (rh, ah);
229 r = _mm_and_si128 (rl, rh);
232 rl = _mm_cmpgt_epi64 (al, rl);
234 rh = _mm_cmpgt_epi64 (rh, ah);
235 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x22);
238 rl = _mm_cmpgt_epi64 (al, rl);
240 rh = _mm_cmpgt_epi64 (rh, ah);
241 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x44);
244 rl = _mm_cmpgt_epi64 (al, rl);
246 rh = _mm_cmpgt_epi64 (rh, ah);
247 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x88);
249 r = _mm_shuffle_epi8 (r, _mm_set_epi64x (0, 0x0e060c040a020800));
250 i = __builtin_ctzll (_mm_movemask_epi8 (r) |
263 if ((vui->
regions[i].guest_phys_addr <= addr) &&
273 DBG_VQ (
"failed to map guest mem addr %llx", addr);
284 if ((vui->
regions[i].userspace_addr <= addr) &&
314 ssize_t map_sz = (vui->
regions[
i].memory_size +
316 page_sz - 1) & ~(page_sz - 1);
323 (
"unmap memory region %d addr 0x%lx len 0x%lx page_sz 0x%x", i,
330 clib_warning (
"failed to unmap memory region (errno %d)",
344 u32 thread_index = 0;
361 if (thread_index == 0)
364 for (thread_index = 0;
393 vec_foreach (queue, vui->rx_queues)
395 rv = vnet_hw_interface_unassign_rx_thread (vnm, vui->hw_if_index,
398 clib_warning (
"Warning: unable to unassign interface %d, " 399 "queue %d: rc=%d", vui->hw_if_index, *queue, rv);
408 for (qid = 0; qid < VHOST_VRING_MAX_N / 2; qid++)
410 txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
413 if (txvq->mode == VNET_HW_INTERFACE_RX_MODE_UNKNOWN)
415 txvq->mode = VNET_HW_INTERFACE_RX_MODE_POLLING;
416 vec_add1 (vui->rx_queues, qid);
425 vnet_hw_interface_set_input_node (vnm, vui->hw_if_index,
426 vhost_user_input_node.index);
427 vec_foreach (queue, vui->rx_queues)
429 vnet_hw_interface_assign_rx_thread (vnm, vui->hw_if_index, *queue,
431 txvq = &vui->vrings[VHOST_VRING_IDX_TX (*queue)];
432 rv = vnet_hw_interface_set_rx_mode (vnm, vui->hw_if_index, *queue,
435 clib_warning (
"Warning: unable to set rx mode for interface %d, " 436 "queue %d: rc=%d", vui->hw_if_index, *queue, rv);
446 int i, found[2] = { };
452 return found[0] && found[1];
460 if (is_up != vui->
is_up)
463 is_up ?
"ready" :
"down");
492 __attribute__ ((unused))
int n;
503 __attribute__ ((unused))
int n;
532 return __sync_lock_test_and_set (vui->
vring_locks[qid], 1);
558 memset (vring, 0,
sizeof (*vring));
572 if (qid == 0 || qid == 1)
594 if (vring->
errfd != -1)
596 close (vring->
errfd);
625 #define VHOST_LOG_PAGE 0x1000 631 || !(vui->
features & (1 << FEAT_VHOST_F_LOG_ALL))))
641 DBG_SOCK (
"vhost_user_log_dirty_pages(): out of range\n");
660 #define vhost_user_log_dirty_ring(vui, vq, member) \ 661 if (PREDICT_FALSE(vq->log_used)) { \ 662 vhost_user_log_dirty_pages(vui, vq->log_guest_addr + STRUCT_OFFSET_OF(vring_used_t, member), \ 663 sizeof(vq->used->member)); \ 670 int fd, number_of_fds = 0;
672 vhost_user_msg_t msg;
677 struct cmsghdr *cmsg;
686 memset (&mh, 0,
sizeof (mh));
687 memset (control, 0,
sizeof (control));
693 iov[0].iov_base = (
void *) &msg;
698 mh.msg_control = control;
699 mh.msg_controllen =
sizeof (control);
710 DBG_SOCK (
"recvmsg returned error %d %s", errno, strerror (errno));
714 DBG_SOCK (
"n (%d) != VHOST_USER_MSG_HDR_SZ (%d)",
720 if (mh.msg_flags & MSG_CTRUNC)
726 cmsg = CMSG_FIRSTHDR (&mh);
728 if (cmsg && (cmsg->cmsg_len > 0) && (cmsg->cmsg_level == SOL_SOCKET) &&
729 (cmsg->cmsg_type == SCM_RIGHTS) &&
730 (cmsg->cmsg_len - CMSG_LEN (0) <=
731 VHOST_MEMORY_MAX_NREGIONS *
sizeof (
int)))
733 number_of_fds = (cmsg->cmsg_len - CMSG_LEN (0)) /
sizeof (
int);
734 clib_memcpy (fds, CMSG_DATA (cmsg), number_of_fds *
sizeof (
int));
738 if ((msg.flags & 7) != 1)
740 DBG_SOCK (
"malformed message received. closing socket");
751 DBG_SOCK (
"read failed %s", strerror (errno));
754 else if (rv != msg.size)
756 DBG_SOCK (
"message too short (read %dB should be %dB)", rv, msg.size);
765 msg.u64 = (1ULL << FEAT_VIRTIO_NET_F_MRG_RXBUF) |
766 (1ULL << FEAT_VIRTIO_NET_F_CTRL_VQ) |
767 (1ULL << FEAT_VIRTIO_F_ANY_LAYOUT) |
768 (1ULL << FEAT_VIRTIO_F_INDIRECT_DESC) |
769 (1ULL << FEAT_VHOST_F_LOG_ALL) |
770 (1ULL << FEAT_VIRTIO_NET_F_GUEST_ANNOUNCE) |
771 (1ULL << FEAT_VIRTIO_NET_F_MQ) |
772 (1ULL << FEAT_VHOST_USER_F_PROTOCOL_FEATURES) |
773 (1ULL << FEAT_VIRTIO_F_VERSION_1);
775 msg.size =
sizeof (msg.u64);
776 DBG_SOCK (
"if %d msg VHOST_USER_GET_FEATURES - reply 0x%016llx",
781 DBG_SOCK (
"if %d msg VHOST_USER_SET_FEATURES features 0x%016llx",
787 ((1 << FEAT_VIRTIO_NET_F_MRG_RXBUF) |
788 (1ULL << FEAT_VIRTIO_F_VERSION_1)))
794 (vui->
features & (1 << FEAT_VIRTIO_F_ANY_LAYOUT)) ? 1 : 0;
806 DBG_SOCK (
"if %d msg VHOST_USER_SET_MEM_TABLE nregions %d",
809 if ((msg.memory.nregions < 1) ||
810 (msg.memory.nregions > VHOST_MEMORY_MAX_NREGIONS))
813 DBG_SOCK (
"number of mem regions must be between 1 and %i",
814 VHOST_MEMORY_MAX_NREGIONS);
819 if (msg.memory.nregions != number_of_fds)
821 DBG_SOCK (
"each memory region must have FD");
825 for (i = 0; i < msg.memory.nregions; i++)
828 sizeof (vhost_user_memory_region_t));
833 ssize_t map_sz = (vui->
regions[
i].memory_size +
835 page_sz - 1) & ~(page_sz - 1);
838 MAP_SHARED, fds[i], 0);
844 (
"map memory region %d addr 0 len 0x%lx fd %d mapped 0x%lx " 850 clib_warning (
"failed to map memory. errno is %d", errno);
856 vui->
nregions = msg.memory.nregions;
860 DBG_SOCK (
"if %d msg VHOST_USER_SET_VRING_NUM idx %d num %d",
863 if ((msg.state.num > 32768) ||
864 (msg.state.num == 0) ||
865 ((msg.state.num - 1) & msg.state.num))
867 vui->
vrings[msg.state.index].
qsz = msg.state.num;
871 DBG_SOCK (
"if %d msg VHOST_USER_SET_VRING_ADDR idx %d",
876 DBG_SOCK (
"invalid vring index VHOST_USER_SET_VRING_ADDR:" 881 if (msg.size < sizeof (msg.addr))
883 DBG_SOCK (
"vhost message is too short (%d < %d)",
884 msg.size, sizeof (msg.addr));
888 vui->
vrings[msg.state.index].
desc = (vring_desc_t *)
890 vui->
vrings[msg.state.index].
used = (vring_used_t *)
892 vui->
vrings[msg.state.index].
avail = (vring_avail_t *)
899 DBG_SOCK (
"failed to map user memory for hw_if_index %d",
910 if (!(vui->
features & (1 << FEAT_VHOST_USER_F_PROTOCOL_FEATURES)))
932 DBG_SOCK (
"if %d msg VHOST_USER_SET_VRING_CALL %d",
935 q = (
u8) (msg.u64 & 0xFF);
946 if (!(msg.u64 & 0x100))
948 if (number_of_fds != 1)
950 DBG_SOCK (
"More than one fd received !");
955 template.file_descriptor = fds[0];
956 template.private_data =
965 DBG_SOCK (
"if %d msg VHOST_USER_SET_VRING_KICK %d",
968 q = (
u8) (msg.u64 & 0xFF);
978 if (!(msg.u64 & 0x100))
980 if (number_of_fds != 1)
982 DBG_SOCK (
"More than one fd received !");
987 template.file_descriptor = fds[0];
988 template.private_data =
1003 DBG_SOCK (
"if %d msg VHOST_USER_SET_VRING_ERR %d",
1006 q = (
u8) (msg.u64 & 0xFF);
1011 if (!(msg.u64 & 0x100))
1013 if (number_of_fds != 1)
1024 DBG_SOCK (
"if %d msg VHOST_USER_SET_VRING_BASE idx %d num %d",
1025 vui->
hw_if_index, msg.state.index, msg.state.num);
1033 DBG_SOCK (
"invalid vring index VHOST_USER_GET_VRING_BASE:" 1044 msg.size =
sizeof (msg.state);
1048 DBG_SOCK (
"if %d msg VHOST_USER_GET_VRING_BASE idx %d num %d",
1049 vui->
hw_if_index, msg.state.index, msg.state.num);
1061 if (msg.size != sizeof (msg.log))
1064 (
"invalid msg size for VHOST_USER_SET_LOG_BASE: %d instead of %d",
1065 msg.size, sizeof (msg.log));
1073 (
"VHOST_USER_PROTOCOL_F_LOG_SHMFD not set but VHOST_USER_SET_LOG_BASE received");
1081 (msg.log.size + msg.log.offset + page_sz - 1) & ~(page_sz - 1);
1083 vui->
log_base_addr = mmap (0, map_sz, PROT_READ | PROT_WRITE,
1087 (
"map log region addr 0 len 0x%lx off 0x%lx fd %d mapped 0x%lx",
1092 clib_warning (
"failed to map memory. errno is %d", errno);
1100 msg.size =
sizeof (msg.u64);
1114 msg.size =
sizeof (msg.u64);
1116 (
"if %d msg VHOST_USER_GET_PROTOCOL_FEATURES - reply 0x%016llx",
1122 (
"if %d msg VHOST_USER_SET_PROTOCOL_FEATURES features 0x%016llx",
1132 msg.size =
sizeof (msg.u64);
1133 DBG_SOCK (
"if %d msg VHOST_USER_GET_QUEUE_NUM - reply %d",
1138 DBG_SOCK (
"if %d VHOST_USER_SET_VRING_ENABLE: %s queue %d",
1139 vui->
hw_if_index, msg.state.num ?
"enable" :
"disable",
1143 DBG_SOCK (
"invalid vring index VHOST_USER_SET_VRING_ENABLE:" 1152 DBG_SOCK (
"unknown vhost-user message %d received. closing socket",
1164 DBG_SOCK (
"could not send message response");
1199 int client_fd, client_len;
1200 struct sockaddr_un client;
1207 client_len =
sizeof (client);
1209 (
struct sockaddr *) &client,
1210 (socklen_t *) & client_len);
1218 template.file_descriptor = client_fd;
1277 if (t->virtio_ring_flags & (1 << VIRTIO_TRACE_F_##n)) \ 1278 s = format (s, "%U %s %s\n", format_white_space, indent, #n, st); 1281 s =
format (s,
"%U virtio_net_hdr first_desc_len %u\n",
1284 s =
format (s,
"%U flags 0x%02x gso_type %u\n",
1286 t->
hdr.hdr.flags, t->
hdr.hdr.gso_type);
1289 s =
format (s,
"%U num_buff %u",
1301 u32 qsz_mask = txvq->
qsz - 1;
1303 u32 desc_current = txvq->
avail->ring[last_avail_idx & qsz_mask];
1304 vring_desc_t *hdr_desc = 0;
1305 virtio_net_hdr_mrg_rxbuf_t *hdr;
1308 memset (t, 0,
sizeof (*t));
1312 hdr_desc = &txvq->
desc[desc_current];
1323 if (!(txvq->
desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
1324 !(txvq->
desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
1331 if (!hdr_desc || !(hdr =
map_guest_mem (vui, hdr_desc->addr, &hint)))
1338 memcpy (&t->
hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
1350 rv = write (fd, &x,
sizeof (x));
1354 (
"Error: Could not write to unix socket for callfd %d", fd);
1364 u16 copy_len,
u32 * map_hint)
1366 void *src0, *src1, *src2, *src3;
1389 clib_memcpy ((
void *) cpy[0].dst, src0, cpy[0].len);
1390 clib_memcpy ((
void *) cpy[1].dst, src1, cpy[1].len);
1420 u32 discarded_packets = 0;
1422 u16 qsz_mask = txvq->
qsz - 1;
1423 while (discarded_packets != discard_max)
1428 u16 desc_chain_head =
1436 discarded_packets++;
1443 return discarded_packets;
1456 b_current->
flags = 0;
1457 while (b_current != b_head)
1463 b_current->
flags = 0;
1476 u16 n_rx_packets = 0;
1479 u32 n_left_to_next, *to_next;
1513 txvq->
used->flags = 0;
1549 VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
1552 qsz_mask = txvq->
qsz - 1;
1588 interface_main.sw_if_counters +
1594 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
1602 while (n_left > 0 && n_left_to_next > 0)
1607 u32 desc_data_offset;
1608 vring_desc_t *desc_table = txvq->
desc;
1625 to_next[0] = bi_current;
1631 cpus[thread_index].rx_buffers)
1632 [vum->
cpus[thread_index].
1633 rx_buffers_len - 1], LOAD);
1668 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
1682 desc_data_offset = desc_table[desc_current].len;
1688 if (desc_data_offset == desc_table[desc_current].len)
1693 desc_current = desc_table[desc_current].next;
1694 desc_data_offset = 0;
1735 bi_current = bi_next;
1743 desc_table[desc_current].
len - desc_data_offset;
1745 cpy->
len = (cpy->
len > desc_data_l) ? desc_data_l : cpy->
len;
1748 cpy->
src = desc_table[desc_current].addr + desc_data_offset;
1750 desc_data_offset += cpy->
len;
1782 u32 bi = to_next[-1];
1784 to_next, n_left_to_next,
1799 copy_len, &map_hint)))
1802 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
1819 copy_len, &map_hint)))
1822 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
1848 return n_rx_packets;
1856 uword n_rx_packets = 0;
1865 (node->
state == VLIB_NODE_STATE_POLLING))
1874 return n_rx_packets;
1881 .name =
"vhost-user-input",
1882 .sibling_of =
"device-input",
1885 .state = VLIB_NODE_STATE_DISABLED,
1904 u32 qsz_mask = rxvq->
qsz - 1;
1906 u32 desc_current = rxvq->
avail->ring[last_avail_idx & qsz_mask];
1907 vring_desc_t *hdr_desc = 0;
1910 memset (t, 0,
sizeof (*t));
1914 hdr_desc = &rxvq->
desc[desc_current];
1925 if (!(rxvq->
desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
1926 !(rxvq->
desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
1936 u16 copy_len,
u32 * map_hint)
1938 void *dst0, *dst1, *dst2, *dst3;
1960 clib_memcpy (dst0, (
void *) cpy[0].src, cpy[0].len);
1961 clib_memcpy (dst1, (
void *) cpy[1].src, cpy[1].len);
2004 error = VHOST_USER_TX_FUNC_ERROR_DOWN;
2010 error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
2016 (vui->per_cpu_tx_qid, thread_index));
2017 rxvq = &vui->vrings[qid];
2021 qsz_mask = rxvq->
qsz - 1;
2024 error = VHOST_USER_TX_FUNC_ERROR_NONE;
2030 u16 desc_head, desc_index, desc_len;
2031 vring_desc_t *desc_table;
2032 uword buffer_map_addr;
2047 vui, qid / 2, b0, rxvq);
2052 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
2056 desc_table = rxvq->
desc;
2057 desc_head = desc_index =
2065 (rxvq->
desc[desc_head].len < sizeof (vring_desc_t)))
2067 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
2075 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
2081 desc_len = vui->virtio_net_hdr_sz;
2082 buffer_map_addr = desc_table[desc_index].addr;
2083 buffer_len = desc_table[desc_index].len;
2087 virtio_net_hdr_mrg_rxbuf_t *hdr =
2091 hdr->hdr.gso_type = 0;
2092 hdr->num_buffers = 1;
2097 cpy->
len = vui->virtio_net_hdr_sz;
2098 cpy->
dst = buffer_map_addr;
2102 buffer_map_addr += vui->virtio_net_hdr_sz;
2103 buffer_len -= vui->virtio_net_hdr_sz;
2108 if (buffer_len == 0)
2113 desc_index = desc_table[desc_index].next;
2114 buffer_map_addr = desc_table[desc_index].addr;
2115 buffer_len = desc_table[desc_index].len;
2117 else if (vui->virtio_net_hdr_sz == 12)
2119 virtio_net_hdr_mrg_rxbuf_t *hdr =
2142 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
2146 desc_table = rxvq->
desc;
2147 desc_head = desc_index =
2150 (rxvq->
desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
2155 (rxvq->
desc[desc_head].len < sizeof (vring_desc_t)))
2157 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
2163 rxvq->
desc[desc_index].addr,
2166 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
2171 buffer_map_addr = desc_table[desc_index].addr;
2172 buffer_len = desc_table[desc_index].len;
2176 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
2184 cpy->
len = bytes_left;
2185 cpy->
len = (cpy->
len > buffer_len) ? buffer_len : cpy->
len;
2186 cpy->
dst = buffer_map_addr;
2190 bytes_left -= cpy->
len;
2191 buffer_len -= cpy->
len;
2192 buffer_map_addr += cpy->
len;
2193 desc_len += cpy->
len;
2239 copy_len, &map_hint)))
2242 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
2258 copy_len, &map_hint)))
2261 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
2280 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
2299 if (
PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
2305 thread_index, vui->sw_if_index, n_left);
2317 f64 timeout = 3153600000.0 ;
2318 uword event_type, *event_data = 0;
2321 f64 now, poll_time_remaining;
2327 poll_time_remaining =
2336 timeout = poll_time_remaining;
2357 next_timeout = timeout;
2358 vec_foreach (queue, vui->rx_queues)
2360 vhost_user_vring_t *rxvq =
2361 &vui->vrings[VHOST_VRING_IDX_RX (*queue)];
2362 vhost_user_vring_t *txvq =
2363 &vui->vrings[VHOST_VRING_IDX_TX (*queue)];
2365 if (txvq->n_since_last_int)
2367 if (now >= txvq->int_deadline)
2368 vhost_user_send_call (vm, txvq);
2370 next_timeout = txvq->int_deadline - now;
2373 if (rxvq->n_since_last_int)
2375 if (now >= rxvq->int_deadline)
2376 vhost_user_send_call (vm, rxvq);
2378 next_timeout = rxvq->int_deadline - now;
2381 if ((next_timeout < timeout) && (next_timeout > 0.0))
2382 timeout = next_timeout;
2389 clib_warning (
"BUG: unhandled event type %d", event_type);
2396 timeout = 3153600000.0;
2405 .name =
"vhost-user-send-interrupt-process",
2456 txvq->
used->flags = 0;
2459 clib_warning (
"BUG: unhandled mode %d changed for if %d queue %d", mode,
2488 .name =
"vhost-user",
2509 struct sockaddr_un sun;
2512 f64 timeout = 3153600000.0 ;
2513 uword *event_data = 0;
2516 sun.sun_family = AF_UNIX;
2531 if (vui->unix_server_index == ~0) {
2532 if (vui->unix_file_index == ~0)
2535 ((sockfd = socket (AF_UNIX, SOCK_STREAM, 0)) < 0))
2541 if (!vui->sock_errno || (vui->sock_errno != errno))
2544 (
"Error: Could not open unix socket for %s",
2545 vui->sock_filename);
2546 vui->sock_errno = errno;
2552 strncpy (sun.sun_path, (char *) vui->sock_filename,
2553 sizeof (sun.sun_path) - 1);
2556 if (fcntl(sockfd, F_SETFL, O_NONBLOCK) < 0)
2557 clib_unix_warning (
"fcntl");
2559 if (connect (sockfd, (struct sockaddr *) &sun,
2560 sizeof (struct sockaddr_un)) == 0)
2563 if (fcntl(sockfd, F_SETFL, 0) < 0)
2564 clib_unix_warning (
"fcntl2");
2566 vui->sock_errno = 0;
2567 template.file_descriptor = sockfd;
2568 template.private_data =
2569 vui - vhost_user_main.vhost_user_interfaces;
2570 vui->unix_file_index = unix_file_add (&unix_main, &template);
2577 vui->sock_errno = errno;
2584 socklen_t len = sizeof (error);
2585 int fd = UNIX_GET_FD(vui->unix_file_index);
2587 getsockopt (fd, SOL_SOCKET, SO_ERROR, &error, &len);
2591 DBG_SOCK (
"getsockopt returned %d", retval);
2592 vhost_user_if_disconnect (vui);
2604 .function = vhost_user_process,
2606 .name =
"vhost-user-process",
2654 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
2656 DBG_SOCK (
"Deleting vhost-user interface %s (instance %d)",
2710 vhost_user_delete_if (vnm, vm, vui->sw_if_index);
2726 struct sockaddr_un un = { };
2729 if ((fd = socket (AF_UNIX, SOCK_STREAM, 0)) < 0)
2730 return VNET_API_ERROR_SYSCALL_ERROR_1;
2732 un.sun_family = AF_UNIX;
2733 strncpy ((
char *) un.sun_path, (
char *) sock_filename,
2734 sizeof (un.sun_path) - 1);
2737 unlink ((
char *) sock_filename);
2739 if (bind (fd, (
struct sockaddr *) &un,
sizeof (un)) == -1)
2741 rv = VNET_API_ERROR_SYSCALL_ERROR_2;
2745 if (listen (fd, 1) == -1)
2747 rv = VNET_API_ERROR_SYSCALL_ERROR_3;
2785 vhost_user_dev_class.index,
2804 const char *sock_filename,
2805 u64 feature_mask,
u32 * sw_if_index)
2814 if (server_sock_fd != -1)
2818 template.file_descriptor = server_sock_fd;
2862 const char *sock_filename,
2866 u8 renumber,
u32 custom_dev_instance,
u8 * hwaddr)
2871 int server_sock_fd = -1;
2875 if (sock_filename ==
NULL || !(strlen (sock_filename) > 0))
2877 return VNET_API_ERROR_INVALID_ARGUMENT;
2888 return VNET_API_ERROR_IF_ALREADY_EXISTS;
2904 feature_mask, &sw_if_idx);
2910 *sw_if_index = sw_if_idx;
2920 const char *sock_filename,
2923 u64 feature_mask,
u8 renumber,
u32 custom_dev_instance)
2928 int server_sock_fd = -1;
2935 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
2937 if (sock_filename ==
NULL || !(strlen (sock_filename) > 0))
2938 return VNET_API_ERROR_INVALID_ARGUMENT;
2947 if (if_index && (*if_index != vui->
if_index))
2948 return VNET_API_ERROR_IF_ALREADY_EXISTS;
2953 &server_sock_fd)) != 0)
2958 sock_filename, feature_mask, &sw_if_idx);
2975 u8 *sock_filename =
NULL;
2978 u64 feature_mask = (
u64) ~ (0ULL);
2980 u32 custom_dev_instance = ~0;
2991 if (
unformat (line_input,
"socket %s", &sock_filename))
2993 else if (
unformat (line_input,
"server"))
2995 else if (
unformat (line_input,
"feature-mask 0x%llx", &feature_mask))
3001 else if (
unformat (line_input,
"renumber %d", &custom_dev_instance))
3017 is_server, &sw_if_index, feature_mask,
3018 renumber, custom_dev_instance, hw)))
3040 u32 sw_if_index = ~0;
3050 if (
unformat (line_input,
"sw_if_index %d", &sw_if_index))
3090 u32 *hw_if_indices = 0;
3102 for (i = 0; i <
vec_len (hw_if_indices); i++)
3119 strncpy ((
char *) vuid->
if_name, (
char *) s,
3127 *out_vuids = r_vuids;
3141 u32 hw_if_index, *hw_if_indices = 0;
3152 struct feat_struct *feat_entry;
3154 static struct feat_struct feat_array[] = {
3155 #define _(s,b) { .str = #s, .bit = b, }, 3161 #define foreach_protocol_feature \ 3162 _(VHOST_USER_PROTOCOL_F_MQ) \ 3163 _(VHOST_USER_PROTOCOL_F_LOG_SHMFD) 3165 static struct feat_struct proto_feat_array[] = {
3166 #define _(s) { .str = #s, .bit = s}, 3177 vec_add1 (hw_if_indices, hw_if_index);
3188 if (
vec_len (hw_if_indices) == 0)
3197 vlib_cli_output (vm,
" number of rx virtqueues in interrupt mode: %d",
3200 for (i = 0; i <
vec_len (hw_if_indices); i++)
3205 hi->
name, hw_if_indices[i]);
3208 " features mask (0x%llx): \n" 3209 " features (0x%llx): \n",
3213 feat_entry = (
struct feat_struct *) &feat_array;
3214 while (feat_entry->str)
3216 if (vui->
features & (1ULL << feat_entry->bit))
3224 feat_entry = (
struct feat_struct *) &proto_feat_array;
3225 while (feat_entry->str)
3273 " region fd guest_phys_addr memory_size userspace_addr mmap_offset mmap_addr\n");
3275 " ====== ===== ================== ================== ================== ================== ==================\n");
3277 for (j = 0; j < vui->
nregions; j++)
3280 " %d %-5d 0x%016lx 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n",
3282 vui->
regions[j].guest_phys_addr,
3284 vui->
regions[j].userspace_addr,
3294 (q & 1) ?
"RX" :
"TX",
3298 " qsz %d last_avail_idx %d last_used_idx %d\n",
3304 " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
3319 " id addr len flags next user_addr\n");
3321 " ===== ================== ===== ====== ===== ==================\n");
3322 for (j = 0; j < vui->
vrings[q].
qsz; j++)
3326 " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n",
3401 .path =
"create vhost-user",
3402 .short_help =
"create vhost-user socket <socket-filename> [server] " 3403 "[feature-mask <hex>] [hwaddr <mac-addr>] [renumber <dev_instance>] ",
3422 .path =
"delete vhost-user",
3423 .short_help =
"delete vhost-user {<interface> | sw_if_index <sw_idx>}",
3562 .path =
"show vhost-user",
3563 .short_help =
"show vhost-user [<interface> [<interface> [..]]] [descriptors]",
3597 else if (
unformat (line_input,
"off"))
3620 .path =
"debug vhost-user",
3621 .short_help =
"debug vhost-user <on | off>",
3637 else if (
unformat (input,
"dont-dump-memory"))
unformat_function_t unformat_vnet_hw_interface
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
static clib_error_t * vhost_user_init(vlib_main_t *vm)
static void vhost_user_vring_close(vhost_user_intf_t *vui, u32 qid)
static void vnet_device_increment_rx_packets(u32 thread_index, u64 count)
static void vhost_user_if_disconnect(vhost_user_intf_t *vui)
#define vec_foreach_index(var, v)
Iterate over vector indices.
sll srl srl sll sra u16x4 i
#define VRING_AVAIL_F_NO_INTERRUPT
u32 virtio_ring_flags
The device index.
virtio_net_hdr_mrg_rxbuf_t hdr
Length of the first data descriptor.
static uword random_default_seed(void)
Default random seed (unix/linux user-mode)
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
#define clib_smp_swap(addr, new)
unix_file_function_t * read_function
vhost_cpu_t * cpus
Per-CPU data for vhost-user.
static void vhost_user_create_ethernet(vnet_main_t *vnm, vlib_main_t *vm, vhost_user_intf_t *vui, u8 *hwaddress)
Create ethernet interface for vhost user interface.
#define VHOST_USER_DOWN_DISCARD_COUNT
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
u8 runtime_data[0]
Function dependent node-runtime data.
void ethernet_delete_interface(vnet_main_t *vnm, u32 hw_if_index)
#define VHOST_VRING_IDX_TX(qid)
vnet_main_t * vnet_get_main(void)
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
static clib_error_t * vhost_user_socket_error(unix_file_t *uf)
void vhost_user_rx_trace(vhost_trace_t *t, vhost_user_intf_t *vui, u16 qid, vlib_buffer_t *b, vhost_user_vring_t *txvq)
u64 region_guest_addr_hi[VHOST_MEMORY_MAX_NREGIONS]
vnet_interface_main_t interface_main
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
uword mhash_unset(mhash_t *h, void *key, uword *old_value)
static f64 vlib_time_now(vlib_main_t *vm)
#define foreach_virtio_trace_flags
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
vhost_copy_t copy[VHOST_USER_COPY_ARRAY_N]
static void vhost_user_term_if(vhost_user_intf_t *vui)
Disables and reset interface structure.
#define VLIB_BUFFER_PRE_DATA_SIZE
static uword vhost_user_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
#define VHOST_USER_EVENT_START_TIMER
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
int vnet_interface_name_renumber(u32 sw_if_index, u32 new_show_dev_instance)
struct _vlib_node_registration vlib_node_registration_t
static_always_inline u32 vhost_user_input_copy(vhost_user_intf_t *vui, vhost_copy_t *cpy, u16 copy_len, u32 *map_hint)
#define VHOST_USER_MSG_HDR_SZ
static clib_error_t * vhost_user_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
clib_error_t * show_vhost_user_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
unformat_function_t unformat_vnet_sw_interface
#define VNET_HW_INTERFACE_FLAG_LINK_UP
static char * vhost_user_input_func_error_strings[]
static char * vhost_user_tx_func_error_strings[]
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
format_function_t format_vnet_sw_if_index_name
vhost_trace_t * current_trace
static uword vlib_process_suspend_time_is_zero(f64 dt)
Returns TRUE if a process suspend time is less than 1us.
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static int vhost_user_name_renumber(vnet_hw_interface_t *hi, u32 new_dev_instance)
static void vhost_user_vui_init(vnet_main_t *vnm, vhost_user_intf_t *vui, int server_sock_fd, const char *sock_filename, u64 feature_mask, u32 *sw_if_index)
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
#define VHOST_VRING_F_LOG
vnet_hw_interface_rx_mode
VNET_DEVICE_CLASS(vhost_user_dev_class, static)
#define VLIB_BUFFER_NEXT_PRESENT
static u8 * format_vhost_user_interface_name(u8 *s, va_list *args)
#define static_always_inline
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
#define VLIB_INIT_FUNCTION(x)
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
vlib_combined_counter_main_t * combined_sw_if_counters
static_always_inline void vnet_device_input_set_interrupt_pending(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
#define VLIB_BUFFER_TOTAL_LENGTH_VALID
#define foreach_protocol_feature
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
vhost_user_tx_func_error_t
static void unmap_all_mem_regions(vhost_user_intf_t *vui)
static void vhost_user_set_interrupt_pending(vhost_user_intf_t *vui, u32 ifq)
vhost_user_input_func_error_t
#define vlib_call_init_function(vm, x)
#define VHOST_USER_TX_COPY_THRESHOLD
static clib_error_t * vhost_user_socket_read(unix_file_t *uf)
static uword pointer_to_uword(const void *p)
#define UNIX_GET_FD(unixfd_idx)
static int vhost_user_init_server_sock(const char *sock_filename, int *sock_fd)
Open server unix socket on specified sock_filename.
static uword vhost_user_send_interrupt_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
VLIB_DEVICE_TX_FUNCTION_MULTIARCH(vhost_user_dev_class, vhost_user_tx)
#define VHOST_USER_EVENT_STOP_TIMER
static uword unix_file_add(unix_main_t *um, unix_file_t *template)
static void vhost_user_vring_unlock(vhost_user_intf_t *vui, u32 qid)
Unlock the vring lock.
format_function_t format_vnet_sw_interface_name
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static_always_inline uword vnet_get_device_input_thread_index(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
u16 state
Input node state.
u16 current_length
Nbytes between current data and the end of this buffer.
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
int vhost_user_delete_if(vnet_main_t *vnm, vlib_main_t *vm, u32 sw_if_index)
static void * map_user_mem(vhost_user_intf_t *vui, uword addr)
u32 random
Pseudo random iterator.
static_always_inline void vhost_user_log_dirty_pages(vhost_user_intf_t *vui, u64 addr, u64 len)
uword mhash_set_mem(mhash_t *h, void *key, uword *new_value, uword *old_value)
#define VIRTQ_DESC_F_INDIRECT
#define clib_error_return_unix(e, args...)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
int vnet_hw_interface_get_rx_mode(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id, vnet_hw_interface_rx_mode *mode)
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX
#define pool_put(P, E)
Free an object E in pool P.
format_function_t format_vnet_hw_interface_rx_mode
void vhost_user_tx_trace(vhost_trace_t *t, vhost_user_intf_t *vui, u16 qid, vlib_buffer_t *b, vhost_user_vring_t *rxvq)
#define VLIB_CONFIG_FUNCTION(x, n,...)
#define vhost_user_log_dirty_ring(vui, vq, member)
static vlib_node_registration_t vhost_user_process_node
(constructor) VLIB_REGISTER_NODE (vhost_user_process_node)
void vhost_user_unmap_all(void)
vlib_simple_counter_main_t * sw_if_counters
u32 region_mmap_fd[VHOST_MEMORY_MAX_NREGIONS]
static void vhost_user_send_call(vlib_main_t *vm, vhost_user_vring_t *vq)
#define VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE
u32 node_index
Node index.
vhost_user_memory_region_t regions[VHOST_MEMORY_MAX_NREGIONS]
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
int vhost_user_dump_ifs(vnet_main_t *vnm, vlib_main_t *vm, vhost_user_intf_details_t **out_vuids)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
static clib_error_t * vhost_user_exit(vlib_main_t *vm)
static void vhost_user_tx_thread_placement(vhost_user_intf_t *vui)
static void vhost_user_vring_init(vhost_user_intf_t *vui, u32 qid)
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
static vlib_node_registration_t vhost_user_send_interrupt_node
(constructor) VLIB_REGISTER_NODE (vhost_user_send_interrupt_node)
u32 * show_dev_instance_by_real_dev_instance
int vhost_user_create_if(vnet_main_t *vnm, vlib_main_t *vm, const char *sock_filename, u8 is_server, u32 *sw_if_index, u64 feature_mask, u8 renumber, u32 custom_dev_instance, u8 *hwaddr)
u16 device_index
The interface queue index (Not the virtio vring idx)
vhost_user_intf_t * vhost_user_interfaces
static void mhash_init_c_string(mhash_t *h, uword n_value_bytes)
static_always_inline uword vlib_get_thread_index(void)
static clib_error_t * vhost_user_kickfd_read_ready(unix_file_t *uf)
#define CLIB_PREFETCH(addr, size, type)
static_always_inline void vhost_user_log_dirty_pages_2(vhost_user_intf_t *vui, u64 addr, u64 len, u8 is_host_address)
#define vec_free(V)
Free vector's memory (no header).
static int vhost_user_vring_try_lock(vhost_user_intf_t *vui, u32 qid)
Try once to lock the vring.
#define VLIB_MAIN_LOOP_EXIT_FUNCTION(x)
int vhost_user_modify_if(vnet_main_t *vnm, vlib_main_t *vm, const char *sock_filename, u8 is_server, u32 sw_if_index, u64 feature_mask, u8 renumber, u32 custom_dev_instance)
#define clib_warning(format, args...)
#define VLIB_BUFFER_IS_TRACED
#define clib_memcpy(a, b, c)
#define VHOST_MEMORY_MAX_NREGIONS
static_always_inline void * map_guest_mem(vhost_user_intf_t *vui, uword addr, u32 *hint)
clib_error_t * debug_vhost_user_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
static u32 vlib_buffer_alloc_from_free_list(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u32 free_list_index)
Allocate buffers from specific freelist into supplied array.
void vlib_worker_thread_barrier_sync(vlib_main_t *vm)
static uword vhost_user_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
u16 first_desc_len
Runtime queue flags.
#define VHOST_USER_PROTOCOL_F_LOG_SHMFD
#define VLIB_BUFFER_DATA_SIZE
static void vhost_user_input_rewind_buffers(vlib_main_t *vm, vhost_cpu_t *cpu, vlib_buffer_t *b_head)
#define VLIB_CLI_COMMAND(x,...)
#define VNET_SW_INTERFACE_FLAG_ADMIN_UP
u32 max_l3_packet_bytes[VLIB_N_RX_TX]
u32 rx_buffers[VHOST_USER_RX_BUFFERS_N]
uword unformat_ethernet_address(unformat_input_t *input, va_list *args)
#define VHOST_USER_RX_BUFFER_STARVATION
static uword * mhash_get(mhash_t *h, const void *key)
static long get_huge_page_size(int fd)
u32 next_buffer
Next buffer for this linked-list of buffers.
clib_error_t * vhost_user_delete_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
static void clib_mem_free(void *p)
#define VIRTQ_DESC_F_NEXT
volatile u32 * vring_locks[VHOST_VRING_MAX_N]
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
mhash_t if_index_by_sock_name
#define clib_error_report(e)
static void * vlib_frame_args(vlib_frame_t *f)
Get pointer to frame scalar data.
static u8 * format_vhost_trace(u8 *s, va_list *va)
#define VHOST_USER_RX_COPY_THRESHOLD
static void vhost_user_vring_lock(vhost_user_intf_t *vui, u32 qid)
Spin until the vring is successfully locked.
static vlib_main_t * vlib_get_main(void)
static void vhost_user_rx_thread_placement()
Unassign existing interface/queue to thread mappings and re-assign new interface/queue to thread mapp...
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static void vhost_user_update_iface_state(vhost_user_intf_t *vui)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
#define foreach_vhost_user_tx_func_error
void * region_mmap_addr[VHOST_MEMORY_MAX_NREGIONS]
static clib_error_t * vhost_user_socksvr_accept_ready(unix_file_t *uf)
static u32 vhost_user_if_input(vlib_main_t *vm, vhost_user_main_t *vum, vhost_user_intf_t *vui, u16 qid, vlib_node_runtime_t *node, vnet_hw_interface_rx_mode mode)
static clib_error_t * vhost_user_config(vlib_main_t *vm, unformat_input_t *input)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define VRING_USED_F_NO_NOTIFY
#define VHOST_USER_RX_BUFFERS_N
int vhost_user_intf_ready(vhost_user_intf_t *vui)
Returns whether at least one TX and one RX vring are enabled.
vhost_user_vring_t vrings[VHOST_VRING_MAX_N]
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
#define VHOST_VRING_MAX_N
#define VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE
#define clib_unix_warning(format, args...)
vlib_node_registration_t vhost_user_input_node
(constructor) VLIB_REGISTER_NODE (vhost_user_input_node)
#define DBG_SOCK(args...)
u32 vhost_user_rx_discard_packet(vlib_main_t *vm, vhost_user_intf_t *vui, vhost_user_vring_t *txvq, u32 discard_max)
Try to discard packets from the tx ring (VPP RX path).
static vhost_user_main_t vhost_user_main
static void * clib_mem_alloc_aligned(uword size, uword align)
#define VLIB_NODE_FUNCTION_MULTIARCH(node, fn)
static u32 random_u32(u32 *seed)
32-bit random number generator
void vlib_worker_thread_barrier_release(vlib_main_t *vm)
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
#define VLIB_REGISTER_NODE(x,...)
static vlib_thread_main_t * vlib_get_thread_main()
#define VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE
u64 region_guest_addr_lo[VHOST_MEMORY_MAX_NREGIONS]
static clib_error_t * vhost_user_callfd_read_ready(unix_file_t *uf)
#define vec_foreach(var, vec)
Vector iterator.
#define foreach_vhost_user_input_func_error
u16 flags
Copy of main node flags.
#define CLIB_MEMORY_BARRIER()
virtio_net_hdr_mrg_rxbuf_t tx_headers[VLIB_FRAME_SIZE]
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header, unspecified alignment)
#define CLIB_CACHE_LINE_BYTES
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
static_always_inline u32 vhost_user_tx_copy(vhost_user_intf_t *vui, vhost_copy_t *cpy, u16 copy_len, u32 *map_hint)
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
static clib_error_t * vhost_user_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
#define VHOST_USER_PROTOCOL_F_MQ
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
clib_error_t * vhost_user_connect_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
VNET_HW_INTERFACE_CLASS(vhost_interface_class, static)
int dont_dump_vhost_user_memory
#define VHOST_VRING_IDX_RX(qid)
static void unix_file_del(unix_main_t *um, unix_file_t *f)