#include "qemu/range.h"
#include "qemu/error-report.h"
#include "qemu/memfd.h"
+#include "qemu/log.h"
#include "standard-headers/linux/vhost_types.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
#include "migration/blocker.h"
#include "migration/qemu-file-types.h"
#include "sysemu/dma.h"
-#include "sysemu/tcg.h"
#include "trace.h"
/* enabled until disconnected backend stabilizes */
#define _VHOST_DEBUG 1
#ifdef _VHOST_DEBUG
-#define VHOST_OPS_DEBUG(fmt, ...) \
- do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
- strerror(errno), errno); } while (0)
+#define VHOST_OPS_DEBUG(retval, fmt, ...) \
+ do { \
+ error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
+ strerror(-retval), -retval); \
+ } while (0)
#else
-#define VHOST_OPS_DEBUG(fmt, ...) \
+#define VHOST_OPS_DEBUG(retval, fmt, ...) \
do { } while (0)
#endif
}
}
+static bool vhost_dev_has_iommu(struct vhost_dev *dev)
+{
+ VirtIODevice *vdev = dev->vdev;
+
+ /*
+ * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
+ * incremental memory mapping API via IOTLB API. For platform that
+ * does not have IOMMU, there's no need to enable this feature
+ * which may cause unnecessary IOTLB miss/update transactions.
+ */
+ if (vdev) {
+ return virtio_bus_device_iommu_enabled(vdev) &&
+ virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
+ } else {
+ return false;
+ }
+}
+
static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
MemoryRegionSection *section,
hwaddr first,
continue;
}
- vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
- range_get_last(vq->used_phys, vq->used_size));
+ if (vhost_dev_has_iommu(dev)) {
+ IOMMUTLBEntry iotlb;
+ hwaddr used_phys = vq->used_phys, used_size = vq->used_size;
+ hwaddr phys, s, offset;
+
+ while (used_size) {
+ rcu_read_lock();
+ iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
+ used_phys,
+ true,
+ MEMTXATTRS_UNSPECIFIED);
+ rcu_read_unlock();
+
+ if (!iotlb.target_as) {
+ qemu_log_mask(LOG_GUEST_ERROR, "translation "
+ "failure for used_iova %"PRIx64"\n",
+ used_phys);
+ return -EINVAL;
+ }
+
+ offset = used_phys & iotlb.addr_mask;
+ phys = iotlb.translated_addr + offset;
+
+ /*
+ * Distance from start of used ring until last byte of
+ * IOMMU page.
+ */
+ s = iotlb.addr_mask - offset;
+ /*
+ * Size of used ring, or of the part of it until end
+ * of IOMMU page. To avoid zero result, do the adding
+ * outside of MIN().
+ */
+ s = MIN(s, used_size - 1) + 1;
+
+ vhost_dev_sync_region(dev, section, start_addr, end_addr, phys,
+ range_get_last(phys, s));
+ used_size -= s;
+ used_phys += s;
+ }
+ } else {
+ vhost_dev_sync_region(dev, section, start_addr,
+ end_addr, vq->used_phys,
+ range_get_last(vq->used_phys, vq->used_size));
+ }
}
return 0;
}
return log_size;
}
+static int vhost_set_backend_type(struct vhost_dev *dev,
+ VhostBackendType backend_type)
+{
+ int r = 0;
+
+ switch (backend_type) {
+#ifdef CONFIG_VHOST_KERNEL
+ case VHOST_BACKEND_TYPE_KERNEL:
+ dev->vhost_ops = &kernel_ops;
+ break;
+#endif
+#ifdef CONFIG_VHOST_USER
+ case VHOST_BACKEND_TYPE_USER:
+ dev->vhost_ops = &user_ops;
+ break;
+#endif
+#ifdef CONFIG_VHOST_VDPA
+ case VHOST_BACKEND_TYPE_VDPA:
+ dev->vhost_ops = &vdpa_ops;
+ break;
+#endif
+ default:
+ error_report("Unknown vhost backend type");
+ r = -1;
+ }
+
+ return r;
+}
+
static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
{
Error *err = NULL;
releasing the current log, to ensure no logging is lost */
r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_log_base failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_log_base failed");
}
vhost_log_put(dev, true);
dev->log_size = size;
}
-static int vhost_dev_has_iommu(struct vhost_dev *dev)
-{
- VirtIODevice *vdev = dev->vdev;
-
- /*
- * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
- * incremental memory mapping API via IOTLB API. For platform that
- * does not have IOMMU, there's no need to enable this feature
- * which may cause unnecessary IOTLB miss/update trnasactions.
- */
- return vdev->dma_as != &address_space_memory &&
- virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
-}
-
static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
hwaddr *plen, bool is_write)
{
if (!dev->log_enabled) {
r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_mem_table failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
}
goto out;
}
}
r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_mem_table failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
}
/* To log less, can only decrease log size after table update. */
if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
if (dev->vhost_ops->vhost_vq_get_addr) {
r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_vq_get_addr failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_vq_get_addr failed");
+ return r;
}
} else {
addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc;
addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0;
r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_addr failed");
}
- return 0;
+ return r;
}
static int vhost_dev_set_features(struct vhost_dev *dev,
}
r = dev->vhost_ops->vhost_set_features(dev, features);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_features failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_features failed");
goto out;
}
if (dev->vhost_ops->vhost_set_backend_cap) {
r = dev->vhost_ops->vhost_set_backend_cap(dev);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_backend_cap failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_backend_cap failed");
goto out;
}
}
out:
- return r < 0 ? -errno : 0;
+ return r;
}
static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
err_vq:
for (; i >= 0; --i) {
idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
+ addr = virtio_queue_get_desc_addr(dev->vdev, idx);
+ if (!addr) {
+ continue;
+ }
vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
dev->log_enabled);
}
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
return false;
}
-#ifdef HOST_WORDS_BIGENDIAN
+#if HOST_BIG_ENDIAN
return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
#else
return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
bool is_big_endian,
int vhost_vq_index)
{
+ int r;
struct vhost_vring_state s = {
.index = vhost_vq_index,
.num = is_big_endian
};
- if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
- return 0;
- }
-
- VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
- if (errno == ENOTTY) {
- error_report("vhost does not support cross-endian");
- return -ENOSYS;
+ r = dev->vhost_ops->vhost_set_vring_endian(dev, &s);
+ if (r < 0) {
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_endian failed");
}
-
- return -errno;
+ return r;
}
static int vhost_memory_region_lookup(struct vhost_dev *hdev,
return ret;
}
-static int vhost_virtqueue_start(struct vhost_dev *dev,
- struct VirtIODevice *vdev,
- struct vhost_virtqueue *vq,
- unsigned idx)
+int vhost_virtqueue_start(struct vhost_dev *dev,
+ struct VirtIODevice *vdev,
+ struct vhost_virtqueue *vq,
+ unsigned idx)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
VirtioBusState *vbus = VIRTIO_BUS(qbus);
vq->num = state.num = virtio_queue_get_num(vdev, idx);
r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_vring_num failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_num failed");
+ return r;
}
state.num = virtio_queue_get_last_avail_idx(vdev, idx);
r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_vring_base failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_base failed");
+ return r;
}
if (vhost_needs_vring_endian(vdev)) {
virtio_is_big_endian(vdev),
vhost_vq_index);
if (r) {
- return -errno;
+ return r;
}
}
r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
if (r < 0) {
- r = -errno;
goto fail_alloc;
}
file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
- r = -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_kick failed");
goto fail_kick;
}
return r;
}
-static void vhost_virtqueue_stop(struct vhost_dev *dev,
- struct VirtIODevice *vdev,
- struct vhost_virtqueue *vq,
- unsigned idx)
+void vhost_virtqueue_stop(struct vhost_dev *dev,
+ struct VirtIODevice *vdev,
+ struct vhost_virtqueue *vq,
+ unsigned idx)
{
int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
struct vhost_vring_state state = {
r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost VQ %u ring restore failed: %d", idx, r);
+ VHOST_OPS_DEBUG(r, "vhost VQ %u ring restore failed: %d", idx, r);
/* Connection to the backend is broken, so let's sync internal
* last avail idx to the device used idx.
*/
r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_busyloop_timeout failed");
return r;
}
return 0;
}
+static void vhost_virtqueue_error_notifier(EventNotifier *n)
+{
+ struct vhost_virtqueue *vq = container_of(n, struct vhost_virtqueue,
+ error_notifier);
+ struct vhost_dev *dev = vq->dev;
+ int index = vq - dev->vqs;
+
+ if (event_notifier_test_and_clear(n) && dev->vdev) {
+ VHOST_OPS_DEBUG(-EINVAL, "vhost vring error in virtqueue %d",
+ dev->vq_index + index);
+ }
+}
+
static int vhost_virtqueue_init(struct vhost_dev *dev,
struct vhost_virtqueue *vq, int n)
{
return r;
}
- file.fd = event_notifier_get_fd(&vq->masked_notifier);
+ file.fd = event_notifier_get_wfd(&vq->masked_notifier);
r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_vring_call failed");
- r = -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed");
goto fail_call;
}
vq->dev = dev;
+ if (dev->vhost_ops->vhost_set_vring_err) {
+ r = event_notifier_init(&vq->error_notifier, 0);
+ if (r < 0) {
+ goto fail_call;
+ }
+
+ file.fd = event_notifier_get_fd(&vq->error_notifier);
+ r = dev->vhost_ops->vhost_set_vring_err(dev, &file);
+ if (r) {
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_err failed");
+ goto fail_err;
+ }
+
+ event_notifier_set_handler(&vq->error_notifier,
+ vhost_virtqueue_error_notifier);
+ }
+
return 0;
+
+fail_err:
+ event_notifier_cleanup(&vq->error_notifier);
fail_call:
event_notifier_cleanup(&vq->masked_notifier);
return r;
static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
{
event_notifier_cleanup(&vq->masked_notifier);
+ if (vq->dev->vhost_ops->vhost_set_vring_err) {
+ event_notifier_set_handler(&vq->error_notifier, NULL);
+ event_notifier_cleanup(&vq->error_notifier);
+ }
}
int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
- VhostBackendType backend_type, uint32_t busyloop_timeout)
+ VhostBackendType backend_type, uint32_t busyloop_timeout,
+ Error **errp)
{
uint64_t features;
int i, r, n_initialized_vqs = 0;
- Error *local_err = NULL;
hdev->vdev = NULL;
hdev->migration_blocker = NULL;
r = vhost_set_backend_type(hdev, backend_type);
assert(r >= 0);
- r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
+ r = hdev->vhost_ops->vhost_backend_init(hdev, opaque, errp);
if (r < 0) {
goto fail;
}
r = hdev->vhost_ops->vhost_set_owner(hdev);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_owner failed");
+ error_setg_errno(errp, -r, "vhost_set_owner failed");
goto fail;
}
r = hdev->vhost_ops->vhost_get_features(hdev, &features);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_get_features failed");
+ error_setg_errno(errp, -r, "vhost_get_features failed");
goto fail;
}
for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
if (r < 0) {
+ error_setg_errno(errp, -r, "Failed to initialize virtqueue %d", i);
goto fail;
}
}
r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
busyloop_timeout);
if (r < 0) {
+ error_setg_errno(errp, -r, "Failed to set busyloop timeout");
goto fail_busyloop;
}
}
hdev->features = features;
hdev->memory_listener = (MemoryListener) {
+ .name = "vhost",
.begin = vhost_begin,
.commit = vhost_commit,
.region_add = vhost_region_addnop,
};
hdev->iommu_listener = (MemoryListener) {
+ .name = "vhost-iommu",
.region_add = vhost_iommu_region_add,
.region_del = vhost_iommu_region_del,
};
}
if (hdev->migration_blocker != NULL) {
- r = migrate_add_blocker(hdev->migration_blocker, &local_err);
- if (local_err) {
- error_report_err(local_err);
+ r = migrate_add_blocker(hdev->migration_blocker, errp);
+ if (r < 0) {
error_free(hdev->migration_blocker);
goto fail_busyloop;
}
QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
- error_report("vhost backend memory slots limit is less"
- " than current number of present memory slots");
- r = -1;
+ error_setg(errp, "vhost backend memory slots limit is less"
+ " than current number of present memory slots");
+ r = -EINVAL;
goto fail_busyloop;
}
{
int i;
+ trace_vhost_dev_cleanup(hdev);
+
for (i = 0; i < hdev->nvqs; ++i) {
vhost_virtqueue_cleanup(hdev->vqs + i);
}
if (mask) {
assert(vdev->use_guest_notifier_mask);
- file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
+ file.fd = event_notifier_get_wfd(&hdev->vqs[index].masked_notifier);
} else {
- file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
+ file.fd = event_notifier_get_wfd(virtio_queue_get_guest_notifier(vvq));
}
file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_vring_call failed");
+ error_report("vhost_set_vring_call failed %d", -r);
+ }
+}
+
+bool vhost_config_pending(struct vhost_dev *hdev)
+{
+ assert(hdev->vhost_ops);
+ if ((hdev->started == false) ||
+ (hdev->vhost_ops->vhost_set_config_call == NULL)) {
+ return false;
+ }
+
+ EventNotifier *notifier =
+ &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier;
+ return event_notifier_test_and_clear(notifier);
+}
+
+void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev, bool mask)
+{
+ int fd;
+ int r;
+ EventNotifier *notifier =
+ &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier;
+ EventNotifier *config_notifier = &vdev->config_notifier;
+ assert(hdev->vhost_ops);
+
+ if ((hdev->started == false) ||
+ (hdev->vhost_ops->vhost_set_config_call == NULL)) {
+ return;
+ }
+ if (mask) {
+ assert(vdev->use_guest_notifier_mask);
+ fd = event_notifier_get_fd(notifier);
+ } else {
+ fd = event_notifier_get_fd(config_notifier);
+ }
+ r = hdev->vhost_ops->vhost_set_config_call(hdev, fd);
+ if (r < 0) {
+ error_report("vhost_set_config_call failed %d", -r);
+ }
+}
+
+static void vhost_stop_config_intr(struct vhost_dev *dev)
+{
+ int fd = -1;
+ assert(dev->vhost_ops);
+ if (dev->vhost_ops->vhost_set_config_call) {
+ dev->vhost_ops->vhost_set_config_call(dev, fd);
+ }
+}
+
+static void vhost_start_config_intr(struct vhost_dev *dev)
+{
+ int r;
+
+ assert(dev->vhost_ops);
+ int fd = event_notifier_get_fd(&dev->vdev->config_notifier);
+ if (dev->vhost_ops->vhost_set_config_call) {
+ r = dev->vhost_ops->vhost_set_config_call(dev, fd);
+ if (!r) {
+ event_notifier_set(&dev->vdev->config_notifier);
+ }
}
}
}
int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
- uint32_t config_len)
+ uint32_t config_len, Error **errp)
{
assert(hdev->vhost_ops);
if (hdev->vhost_ops->vhost_get_config) {
- return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
+ return hdev->vhost_ops->vhost_get_config(hdev, config, config_len,
+ errp);
}
- return -1;
+ error_setg(errp, "vhost_get_config not implemented");
+ return -ENOSYS;
}
int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
size, flags);
}
- return -1;
+ return -ENOSYS;
}
void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
if (err) {
error_report_err(err);
- return -1;
+ return -ENOMEM;
}
vhost_dev_free_inflight(inflight);
}
if (inflight->size != size) {
- if (vhost_dev_resize_inflight(inflight, size)) {
- return -1;
+ int ret = vhost_dev_resize_inflight(inflight, size);
+ if (ret < 0) {
+ return ret;
}
}
inflight->queue_size = qemu_get_be16(f);
r = vhost_dev_set_features(hdev, hdev->log_enabled);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_dev_prepare_inflight failed");
+ VHOST_OPS_DEBUG(r, "vhost_dev_prepare_inflight failed");
return r;
}
if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) {
r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_inflight_fd failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_inflight_fd failed");
+ return r;
}
}
if (dev->vhost_ops->vhost_get_inflight_fd) {
r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight);
if (r) {
- VHOST_OPS_DEBUG("vhost_get_inflight_fd failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_get_inflight_fd failed");
+ return r;
}
}
return 0;
}
+static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable)
+{
+ if (!hdev->vhost_ops->vhost_set_vring_enable) {
+ return 0;
+ }
+
+ /*
+ * For vhost-user devices, if VHOST_USER_F_PROTOCOL_FEATURES has not
+ * been negotiated, the rings start directly in the enabled state, and
+ * .vhost_set_vring_enable callback will fail since
+ * VHOST_USER_SET_VRING_ENABLE is not supported.
+ */
+ if (hdev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER &&
+ !virtio_has_feature(hdev->backend_features,
+ VHOST_USER_F_PROTOCOL_FEATURES)) {
+ return 0;
+ }
+
+ return hdev->vhost_ops->vhost_set_vring_enable(hdev, enable);
+}
+
/* Host notifiers must be enabled at this point. */
-int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
+int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
{
int i, r;
/* should only be called after backend is connected */
assert(hdev->vhost_ops);
+ trace_vhost_dev_start(hdev, vdev->name, vrings);
+
+ vdev->vhost_started = true;
hdev->started = true;
hdev->vdev = vdev;
r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_mem_table failed");
- r = -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
goto fail_mem;
}
for (i = 0; i < hdev->nvqs; ++i) {
}
}
+ r = event_notifier_init(
+ &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier, 0);
+ if (r < 0) {
+ return r;
+ }
+ event_notifier_test_and_clear(
+ &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier);
+ if (!vdev->use_guest_notifier_mask) {
+ vhost_config_mask(hdev, vdev, true);
+ }
if (hdev->log_enabled) {
uint64_t log_base;
hdev->log_size ? log_base : 0,
hdev->log);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_log_base failed");
- r = -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_log_base failed");
+ goto fail_log;
+ }
+ }
+ if (vrings) {
+ r = vhost_dev_set_vring_enable(hdev, true);
+ if (r) {
goto fail_log;
}
}
if (hdev->vhost_ops->vhost_dev_start) {
r = hdev->vhost_ops->vhost_dev_start(hdev, true);
if (r) {
- goto fail_log;
+ goto fail_start;
}
}
if (vhost_dev_has_iommu(hdev) &&
vhost_device_iotlb_miss(hdev, vq->used_phys, true);
}
}
+ vhost_start_config_intr(hdev);
return 0;
+fail_start:
+ if (vrings) {
+ vhost_dev_set_vring_enable(hdev, false);
+ }
fail_log:
vhost_log_put(hdev, false);
fail_vq:
fail_mem:
fail_features:
-
+ vdev->vhost_started = false;
hdev->started = false;
return r;
}
/* Host notifiers must be enabled at this point. */
-void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
+void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
{
int i;
/* should only be called after backend is connected */
assert(hdev->vhost_ops);
+ event_notifier_test_and_clear(
+ &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier);
+ event_notifier_test_and_clear(&vdev->config_notifier);
+
+ trace_vhost_dev_stop(hdev, vdev->name, vrings);
if (hdev->vhost_ops->vhost_dev_start) {
hdev->vhost_ops->vhost_dev_start(hdev, false);
}
+ if (vrings) {
+ vhost_dev_set_vring_enable(hdev, false);
+ }
for (i = 0; i < hdev->nvqs; ++i) {
vhost_virtqueue_stop(hdev,
vdev,
}
memory_listener_unregister(&hdev->iommu_listener);
}
+ vhost_stop_config_intr(hdev);
vhost_log_put(hdev, true);
hdev->started = false;
+ vdev->vhost_started = false;
hdev->vdev = NULL;
}
return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
}
- return -1;
+ return -ENOSYS;
}