4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "hw/virtio/vhost.h"
31 /* Todo:need to add the multiqueue support here */
32 typedef struct VhostVDPAState {
34 struct vhost_vdpa vhost_vdpa;
35 VHostNetState *vhost_net;
37 /* Control commands shadow buffers */
38 void *cvq_cmd_out_buffer;
39 virtio_net_ctrl_ack *status;
41 /* The device always have SVQ enabled */
46 const int vdpa_feature_bits[] = {
47 VIRTIO_F_NOTIFY_ON_EMPTY,
48 VIRTIO_RING_F_INDIRECT_DESC,
49 VIRTIO_RING_F_EVENT_IDX,
53 VIRTIO_NET_F_GUEST_CSUM,
55 VIRTIO_NET_F_GUEST_TSO4,
56 VIRTIO_NET_F_GUEST_TSO6,
57 VIRTIO_NET_F_GUEST_ECN,
58 VIRTIO_NET_F_GUEST_UFO,
59 VIRTIO_NET_F_HOST_TSO4,
60 VIRTIO_NET_F_HOST_TSO6,
61 VIRTIO_NET_F_HOST_ECN,
62 VIRTIO_NET_F_HOST_UFO,
63 VIRTIO_NET_F_MRG_RXBUF,
66 VIRTIO_NET_F_CTRL_RX_EXTRA,
67 VIRTIO_NET_F_CTRL_VLAN,
68 VIRTIO_NET_F_CTRL_MAC_ADDR,
72 VIRTIO_F_IOMMU_PLATFORM,
76 VIRTIO_NET_F_HASH_REPORT,
77 VIRTIO_NET_F_GUEST_ANNOUNCE,
79 VHOST_INVALID_FEATURE_BIT
82 /** Supported device specific feature bits with SVQ */
83 static const uint64_t vdpa_svq_device_features =
84 BIT_ULL(VIRTIO_NET_F_CSUM) |
85 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
86 BIT_ULL(VIRTIO_NET_F_MTU) |
87 BIT_ULL(VIRTIO_NET_F_MAC) |
88 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
89 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
90 BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
91 BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
92 BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
93 BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
94 BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
95 BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
96 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
97 BIT_ULL(VIRTIO_NET_F_STATUS) |
98 BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
99 BIT_ULL(VIRTIO_NET_F_MQ) |
100 BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
101 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
102 BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
103 BIT_ULL(VIRTIO_NET_F_STANDBY);
105 #define VHOST_VDPA_NET_CVQ_ASID 1
107 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
109 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
110 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
114 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
116 uint64_t invalid_dev_features =
117 features & ~vdpa_svq_device_features &
118 /* Transport are all accepted at this point */
119 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
120 VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
122 if (invalid_dev_features) {
123 error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
124 invalid_dev_features);
128 return vhost_svq_valid_features(features, errp);
131 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
135 struct vhost_dev *hdev;
137 hdev = (struct vhost_dev *)&net->dev;
138 ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
139 if (device_id != VIRTIO_ID_NET) {
145 static int vhost_vdpa_add(NetClientState *ncs, void *be,
146 int queue_pair_index, int nvqs)
148 VhostNetOptions options;
149 struct vhost_net *net = NULL;
153 options.backend_type = VHOST_BACKEND_TYPE_VDPA;
154 assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
155 s = DO_UPCAST(VhostVDPAState, nc, ncs);
156 options.net_backend = ncs;
158 options.busyloop_timeout = 0;
161 net = vhost_net_init(&options);
163 error_report("failed to init vhost_net for queue");
167 ret = vhost_vdpa_net_check_device_id(net);
173 vhost_net_cleanup(net);
179 static void vhost_vdpa_cleanup(NetClientState *nc)
181 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
182 struct vhost_dev *dev = &s->vhost_net->dev;
184 qemu_vfree(s->cvq_cmd_out_buffer);
185 qemu_vfree(s->status);
186 if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
187 g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
190 vhost_net_cleanup(s->vhost_net);
191 g_free(s->vhost_net);
194 if (s->vhost_vdpa.device_fd >= 0) {
195 qemu_close(s->vhost_vdpa.device_fd);
196 s->vhost_vdpa.device_fd = -1;
200 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
202 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
207 static bool vhost_vdpa_has_ufo(NetClientState *nc)
209 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
210 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
211 uint64_t features = 0;
212 features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
213 features = vhost_net_get_features(s->vhost_net, features);
214 return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
218 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
221 const char *driver = object_class_get_name(oc);
223 if (!g_str_has_prefix(driver, "virtio-net-")) {
224 error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
231 /** Dummy receive in case qemu falls back to userland tap networking */
232 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
238 static NetClientInfo net_vhost_vdpa_info = {
239 .type = NET_CLIENT_DRIVER_VHOST_VDPA,
240 .size = sizeof(VhostVDPAState),
241 .receive = vhost_vdpa_receive,
242 .cleanup = vhost_vdpa_cleanup,
243 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
244 .has_ufo = vhost_vdpa_has_ufo,
245 .check_peer_type = vhost_vdpa_check_peer_type,
248 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index)
250 struct vhost_vring_state state = {
253 int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
255 if (unlikely(r < 0)) {
256 error_report("Cannot get VQ %u group: %s", vq_index,
264 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
268 struct vhost_vring_state asid = {
274 r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
275 if (unlikely(r < 0)) {
276 error_report("Can't set vq group %u asid %u, errno=%d (%s)",
277 asid.index, asid.num, errno, g_strerror(errno));
282 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
284 VhostIOVATree *tree = v->iova_tree;
287 * No need to specify size or to look for more translations since
288 * this contiguous chunk was allocated by us.
290 .translated_addr = (hwaddr)(uintptr_t)addr,
292 const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
295 if (unlikely(!map)) {
296 error_report("Cannot locate expected map");
300 r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
301 if (unlikely(r != 0)) {
302 error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
305 vhost_iova_tree_remove(tree, *map);
308 static size_t vhost_vdpa_net_cvq_cmd_len(void)
311 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
312 * In buffer is always 1 byte, so it should fit here
314 return sizeof(struct virtio_net_ctrl_hdr) +
315 2 * sizeof(struct virtio_net_ctrl_mac) +
316 MAC_TABLE_ENTRIES * ETH_ALEN;
319 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
321 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
324 /** Map CVQ buffer. */
325 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
331 map.translated_addr = (hwaddr)(uintptr_t)buf;
333 map.perm = write ? IOMMU_RW : IOMMU_RO,
334 r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
335 if (unlikely(r != IOVA_OK)) {
336 error_report("Cannot map injected element");
340 r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
341 vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
342 if (unlikely(r < 0)) {
349 vhost_iova_tree_remove(v->iova_tree, map);
353 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
356 struct vhost_vdpa *v;
357 uint64_t backend_features;
361 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
363 s = DO_UPCAST(VhostVDPAState, nc, nc);
366 v->shadow_data = s->always_svq;
367 v->shadow_vqs_enabled = s->always_svq;
368 s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
371 /* SVQ is already configured for all virtqueues */
376 * If we early return in these cases SVQ will not be enabled. The migration
377 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
379 * Calling VHOST_GET_BACKEND_FEATURES as they are not available in v->dev
382 r = ioctl(v->device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
383 if (unlikely(r < 0)) {
384 error_report("Cannot get vdpa backend_features: %s(%d)",
385 g_strerror(errno), errno);
388 if (!(backend_features & VHOST_BACKEND_F_IOTLB_ASID) ||
389 !vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
394 * Check if all the virtqueues of the virtio device are in a different vq
395 * than the last vq. VQ group of last group passed in cvq_group.
397 cvq_index = v->dev->vq_index_end - 1;
398 cvq_group = vhost_vdpa_get_vring_group(v->device_fd, cvq_index);
399 if (unlikely(cvq_group < 0)) {
402 for (int i = 0; i < cvq_index; ++i) {
403 int64_t group = vhost_vdpa_get_vring_group(v->device_fd, i);
405 if (unlikely(group < 0)) {
409 if (group == cvq_group) {
414 r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
415 if (unlikely(r < 0)) {
419 v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
421 v->shadow_vqs_enabled = true;
422 s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
425 if (!s->vhost_vdpa.shadow_vqs_enabled) {
429 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
430 vhost_vdpa_net_cvq_cmd_page_len(), false);
431 if (unlikely(r < 0)) {
435 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
436 vhost_vdpa_net_cvq_cmd_page_len(), true);
437 if (unlikely(r < 0)) {
438 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
444 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
446 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
448 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
450 if (s->vhost_vdpa.shadow_vqs_enabled) {
451 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
452 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
453 if (!s->always_svq) {
455 * If only the CVQ is shadowed we can delete this safely.
456 * If all the VQs are shadows this will be needed by the time the
457 * device is started again to register SVQ vrings and similar.
459 g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
464 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
467 /* Buffers for the device */
468 const struct iovec out = {
469 .iov_base = s->cvq_cmd_out_buffer,
472 const struct iovec in = {
473 .iov_base = s->status,
474 .iov_len = sizeof(virtio_net_ctrl_ack),
476 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
479 r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
480 if (unlikely(r != 0)) {
481 if (unlikely(r == -ENOSPC)) {
482 qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
489 * We can poll here since we've had BQL from the time we sent the
490 * descriptor. Also, we need to take the answer before SVQ pulls by itself,
491 * when BQL is released
493 return vhost_svq_poll(svq);
496 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
497 uint8_t cmd, const void *data,
500 const struct virtio_net_ctrl_hdr ctrl = {
505 assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
507 memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
508 memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
510 return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
511 sizeof(virtio_net_ctrl_ack));
514 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
516 uint64_t features = n->parent_obj.guest_features;
517 if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
518 ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
519 VIRTIO_NET_CTRL_MAC_ADDR_SET,
520 n->mac, sizeof(n->mac));
521 if (unlikely(dev_written < 0)) {
525 return *s->status != VIRTIO_NET_OK;
531 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
534 struct virtio_net_ctrl_mq mq;
535 uint64_t features = n->parent_obj.guest_features;
538 if (!(features & BIT_ULL(VIRTIO_NET_F_MQ))) {
542 mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
543 dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
544 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &mq,
546 if (unlikely(dev_written < 0)) {
550 return *s->status != VIRTIO_NET_OK;
553 static int vhost_vdpa_net_load(NetClientState *nc)
555 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
556 struct vhost_vdpa *v = &s->vhost_vdpa;
560 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
562 if (!v->shadow_vqs_enabled) {
566 n = VIRTIO_NET(v->dev->vdev);
567 r = vhost_vdpa_net_load_mac(s, n);
568 if (unlikely(r < 0)) {
571 r = vhost_vdpa_net_load_mq(s, n);
579 static NetClientInfo net_vhost_vdpa_cvq_info = {
580 .type = NET_CLIENT_DRIVER_VHOST_VDPA,
581 .size = sizeof(VhostVDPAState),
582 .receive = vhost_vdpa_receive,
583 .start = vhost_vdpa_net_cvq_start,
584 .load = vhost_vdpa_net_load,
585 .stop = vhost_vdpa_net_cvq_stop,
586 .cleanup = vhost_vdpa_cleanup,
587 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
588 .has_ufo = vhost_vdpa_has_ufo,
589 .check_peer_type = vhost_vdpa_check_peer_type,
593 * Validate and copy control virtqueue commands.
595 * Following QEMU guidelines, we offer a copy of the buffers to the device to
596 * prevent TOCTOU bugs.
598 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
599 VirtQueueElement *elem,
602 VhostVDPAState *s = opaque;
604 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
605 /* Out buffer sent to both the vdpa device and the device model */
607 .iov_base = s->cvq_cmd_out_buffer,
609 /* in buffer used for device model */
610 const struct iovec in = {
612 .iov_len = sizeof(status),
614 ssize_t dev_written = -EINVAL;
616 out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
617 s->cvq_cmd_out_buffer,
618 vhost_vdpa_net_cvq_cmd_len());
619 dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
620 if (unlikely(dev_written < 0)) {
624 if (unlikely(dev_written < sizeof(status))) {
625 error_report("Insufficient written data (%zu)", dev_written);
629 if (*s->status != VIRTIO_NET_OK) {
630 return VIRTIO_NET_ERR;
633 status = VIRTIO_NET_ERR;
634 virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
635 if (status != VIRTIO_NET_OK) {
636 error_report("Bad CVQ processing in model");
640 in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
642 if (unlikely(in_len < sizeof(status))) {
643 error_report("Bad device CVQ written length");
645 vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
647 return dev_written < 0 ? dev_written : 0;
650 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
651 .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
654 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
658 int queue_pair_index,
662 struct vhost_vdpa_iova_range iova_range,
663 VhostIOVATree *iova_tree)
665 NetClientState *nc = NULL;
670 nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
673 nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
676 qemu_set_info_str(nc, TYPE_VHOST_VDPA);
677 s = DO_UPCAST(VhostVDPAState, nc, nc);
679 s->vhost_vdpa.device_fd = vdpa_device_fd;
680 s->vhost_vdpa.index = queue_pair_index;
682 s->vhost_vdpa.shadow_vqs_enabled = svq;
683 s->vhost_vdpa.iova_range = iova_range;
684 s->vhost_vdpa.shadow_data = svq;
685 s->vhost_vdpa.iova_tree = iova_tree;
687 s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
688 vhost_vdpa_net_cvq_cmd_page_len());
689 memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len());
690 s->status = qemu_memalign(qemu_real_host_page_size(),
691 vhost_vdpa_net_cvq_cmd_page_len());
692 memset(s->status, 0, vhost_vdpa_net_cvq_cmd_page_len());
694 s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
695 s->vhost_vdpa.shadow_vq_ops_opaque = s;
697 ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
699 qemu_del_net_client(nc);
705 static int vhost_vdpa_get_iova_range(int fd,
706 struct vhost_vdpa_iova_range *iova_range)
708 int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
710 return ret < 0 ? -errno : 0;
713 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
715 int ret = ioctl(fd, VHOST_GET_FEATURES, features);
716 if (unlikely(ret < 0)) {
717 error_setg_errno(errp, errno,
718 "Fail to query features from vhost-vDPA device");
723 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
724 int *has_cvq, Error **errp)
726 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
727 g_autofree struct vhost_vdpa_config *config = NULL;
728 __virtio16 *max_queue_pairs;
731 if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
737 if (features & (1 << VIRTIO_NET_F_MQ)) {
738 config = g_malloc0(config_size + sizeof(*max_queue_pairs));
739 config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
740 config->len = sizeof(*max_queue_pairs);
742 ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
744 error_setg(errp, "Fail to get config from vhost-vDPA device");
748 max_queue_pairs = (__virtio16 *)&config->buf;
750 return lduw_le_p(max_queue_pairs);
756 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
757 NetClientState *peer, Error **errp)
759 const NetdevVhostVDPAOptions *opts;
762 g_autofree NetClientState **ncs = NULL;
763 g_autoptr(VhostIOVATree) iova_tree = NULL;
764 struct vhost_vdpa_iova_range iova_range;
766 int queue_pairs, r, i = 0, has_cvq = 0;
768 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
769 opts = &netdev->u.vhost_vdpa;
770 if (!opts->vhostdev && !opts->vhostfd) {
772 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
776 if (opts->vhostdev && opts->vhostfd) {
778 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
782 if (opts->vhostdev) {
783 vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
784 if (vdpa_device_fd == -1) {
789 vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
790 if (vdpa_device_fd == -1) {
791 error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
796 r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
797 if (unlikely(r < 0)) {
801 queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
803 if (queue_pairs < 0) {
804 qemu_close(vdpa_device_fd);
808 vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
810 if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
814 iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
817 ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
819 for (i = 0; i < queue_pairs; i++) {
820 ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
821 vdpa_device_fd, i, 2, true, opts->x_svq,
822 iova_range, iova_tree);
828 nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
829 vdpa_device_fd, i, 1, false,
830 opts->x_svq, iova_range, iova_tree);
835 /* iova_tree ownership belongs to last NetClientState */
836 g_steal_pointer(&iova_tree);
841 for (i--; i >= 0; i--) {
842 qemu_del_net_client(ncs[i]);
847 qemu_close(vdpa_device_fd);