#include "hw/virtio/virtio.h"
#include "qemu/atomic.h"
#include "hw/virtio/virtio-bus.h"
-#include "migration/migration.h"
#include "hw/virtio/virtio-access.h"
#include "sysemu/dma.h"
VRingMemoryRegionCaches *new;
hwaddr addr, size;
int event_size;
+ int64_t len;
event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
}
new = g_new0(VRingMemoryRegionCaches, 1);
size = virtio_queue_get_desc_size(vdev, n);
- address_space_cache_init(&new->desc, vdev->dma_as,
- addr, size, false);
+ len = address_space_cache_init(&new->desc, vdev->dma_as,
+ addr, size, false);
+ if (len < size) {
+ virtio_error(vdev, "Cannot map desc");
+ goto err_desc;
+ }
size = virtio_queue_get_used_size(vdev, n) + event_size;
- address_space_cache_init(&new->used, vdev->dma_as,
- vq->vring.used, size, true);
+ len = address_space_cache_init(&new->used, vdev->dma_as,
+ vq->vring.used, size, true);
+ if (len < size) {
+ virtio_error(vdev, "Cannot map used");
+ goto err_used;
+ }
size = virtio_queue_get_avail_size(vdev, n) + event_size;
- address_space_cache_init(&new->avail, vdev->dma_as,
- vq->vring.avail, size, false);
+ len = address_space_cache_init(&new->avail, vdev->dma_as,
+ vq->vring.avail, size, false);
+ if (len < size) {
+ virtio_error(vdev, "Cannot map avail");
+ goto err_avail;
+ }
atomic_rcu_set(&vq->vring.caches, new);
if (old) {
call_rcu(old, virtio_free_region_cache, rcu);
}
+ return;
+
+err_avail:
+ address_space_cache_destroy(&new->used);
+err_used:
+ address_space_cache_destroy(&new->desc);
+err_desc:
+ g_free(new);
}
/* virt queue functions */
{
VRing *vring = &vdev->vq[n].vring;
- if (!vring->desc) {
+ if (!vring->num || !vring->desc || !vring->align) {
/* not yet setup -> nothing to do */
return;
}
virtio_init_region_cache(vdev, n);
}
+/* Called within rcu_read_lock(). */
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
MemoryRegionCache *cache, int i)
{
virtio_tswap16s(vdev, &desc->next);
}
+static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
+{
+ VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ assert(caches != NULL);
+ return caches;
+}
+/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_flags(VirtQueue *vq)
{
- hwaddr pa;
- pa = vq->vring.avail + offsetof(VRingAvail, flags);
- return virtio_lduw_phys(vq->vdev, pa);
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
+ hwaddr pa = offsetof(VRingAvail, flags);
+ return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
}
+/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_idx(VirtQueue *vq)
{
- hwaddr pa;
- pa = vq->vring.avail + offsetof(VRingAvail, idx);
- vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa);
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
+ hwaddr pa = offsetof(VRingAvail, idx);
+ vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
return vq->shadow_avail_idx;
}
+/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
{
- hwaddr pa;
- pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
- return virtio_lduw_phys(vq->vdev, pa);
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
+ hwaddr pa = offsetof(VRingAvail, ring[i]);
+ return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
}
+/* Called within rcu_read_lock(). */
static inline uint16_t vring_get_used_event(VirtQueue *vq)
{
return vring_avail_ring(vq, vq->vring.num);
}
+/* Called within rcu_read_lock(). */
static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
int i)
{
- hwaddr pa;
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
+ hwaddr pa = offsetof(VRingUsed, ring[i]);
virtio_tswap32s(vq->vdev, &uelem->id);
virtio_tswap32s(vq->vdev, &uelem->len);
- pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
- address_space_write(vq->vdev->dma_as, pa, MEMTXATTRS_UNSPECIFIED,
- (void *)uelem, sizeof(VRingUsedElem));
+ address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
+ address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
}
+/* Called within rcu_read_lock(). */
static uint16_t vring_used_idx(VirtQueue *vq)
{
- hwaddr pa;
- pa = vq->vring.used + offsetof(VRingUsed, idx);
- return virtio_lduw_phys(vq->vdev, pa);
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
+ hwaddr pa = offsetof(VRingUsed, idx);
+ return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
}
+/* Called within rcu_read_lock(). */
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
{
- hwaddr pa;
- pa = vq->vring.used + offsetof(VRingUsed, idx);
- virtio_stw_phys(vq->vdev, pa, val);
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
+ hwaddr pa = offsetof(VRingUsed, idx);
+ virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
+ address_space_cache_invalidate(&caches->used, pa, sizeof(val));
vq->used_idx = val;
}
+/* Called within rcu_read_lock(). */
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
{
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
VirtIODevice *vdev = vq->vdev;
- hwaddr pa;
- pa = vq->vring.used + offsetof(VRingUsed, flags);
- virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask);
+ hwaddr pa = offsetof(VRingUsed, flags);
+ uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
+
+ virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
+ address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
}
+/* Called within rcu_read_lock(). */
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
{
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
VirtIODevice *vdev = vq->vdev;
- hwaddr pa;
- pa = vq->vring.used + offsetof(VRingUsed, flags);
- virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask);
+ hwaddr pa = offsetof(VRingUsed, flags);
+ uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
+
+ virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
+ address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
}
+/* Called within rcu_read_lock(). */
static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
{
+ VRingMemoryRegionCaches *caches;
hwaddr pa;
if (!vq->notification) {
return;
}
- pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
- virtio_stw_phys(vq->vdev, pa, val);
+
+ caches = vring_get_region_caches(vq);
+ pa = offsetof(VRingUsed, ring[vq->vring.num]);
+ virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
+ address_space_cache_invalidate(&caches->used, pa, sizeof(val));
}
void virtio_queue_set_notification(VirtQueue *vq, int enable)
{
vq->notification = enable;
+
+ if (!vq->vring.desc) {
+ return;
+ }
+
+ rcu_read_lock();
if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_set_avail_event(vq, vring_avail_idx(vq));
} else if (enable) {
/* Expose avail event/used flags before caller checks the avail idx. */
smp_mb();
}
+ rcu_read_unlock();
}
int virtio_queue_ready(VirtQueue *vq)
}
/* Fetch avail_idx from VQ memory only when we really need to know if
- * guest has added some buffers. */
-int virtio_queue_empty(VirtQueue *vq)
+ * guest has added some buffers.
+ * Called within rcu_read_lock(). */
+static int virtio_queue_empty_rcu(VirtQueue *vq)
{
+ if (unlikely(!vq->vring.avail)) {
+ return 1;
+ }
+
if (vq->shadow_avail_idx != vq->last_avail_idx) {
return 0;
}
return vring_avail_idx(vq) == vq->last_avail_idx;
}
+int virtio_queue_empty(VirtQueue *vq)
+{
+ bool empty;
+
+ if (unlikely(!vq->vring.avail)) {
+ return 1;
+ }
+
+ if (vq->shadow_avail_idx != vq->last_avail_idx) {
+ return 0;
+ }
+
+ rcu_read_lock();
+ empty = vring_avail_idx(vq) == vq->last_avail_idx;
+ rcu_read_unlock();
+ return empty;
+}
+
static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
return true;
}
+/* Called within rcu_read_lock(). */
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len, unsigned int idx)
{
return;
}
+ if (unlikely(!vq->vring.used)) {
+ return;
+ }
+
idx = (idx + vq->used_idx) % vq->vring.num;
uelem.id = elem->index;
vring_used_write(vq, &uelem, idx);
}
+/* Called within rcu_read_lock(). */
void virtqueue_flush(VirtQueue *vq, unsigned int count)
{
uint16_t old, new;
return;
}
+ if (unlikely(!vq->vring.used)) {
+ return;
+ }
+
/* Make sure buffer is written before we update index. */
smp_wmb();
trace_virtqueue_flush(vq, count);
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
+ rcu_read_lock();
virtqueue_fill(vq, elem, len, 0);
virtqueue_flush(vq, 1);
+ rcu_read_unlock();
}
+/* Called within rcu_read_lock(). */
static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
{
uint16_t num_heads = vring_avail_idx(vq) - idx;
return num_heads;
}
+/* Called within rcu_read_lock(). */
static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
unsigned int *head)
{
int64_t len = 0;
int rc;
+ if (unlikely(!vq->vring.desc)) {
+ if (in_bytes) {
+ *in_bytes = 0;
+ }
+ if (out_bytes) {
+ *out_bytes = 0;
+ }
+ return;
+ }
+
rcu_read_lock();
idx = vq->last_avail_idx;
total_bufs = in_total = out_total = 0;
max = vq->vring.num;
- caches = atomic_rcu_read(&vq->vring.caches);
+ caches = vring_get_region_caches(vq);
if (caches->desc.len < max * sizeof(VRingDesc)) {
virtio_error(vdev, "Cannot map descriptor ring");
goto err;
assert(sz >= sizeof(VirtQueueElement));
elem = g_malloc(out_sg_end);
+ trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
elem->out_num = out_num;
elem->in_num = in_num;
elem->in_addr = (void *)elem + in_addr_ofs;
int64_t len;
VirtIODevice *vdev = vq->vdev;
VirtQueueElement *elem = NULL;
- unsigned out_num, in_num;
+ unsigned out_num, in_num, elem_entries;
hwaddr addr[VIRTQUEUE_MAX_SIZE];
struct iovec iov[VIRTQUEUE_MAX_SIZE];
VRingDesc desc;
if (unlikely(vdev->broken)) {
return NULL;
}
- if (virtio_queue_empty(vq)) {
- return NULL;
+ rcu_read_lock();
+ if (virtio_queue_empty_rcu(vq)) {
+ goto done;
}
/* Needed after virtio_queue_empty(), see comment in
* virtqueue_num_heads(). */
smp_rmb();
/* When we start there are none of either input nor output. */
- out_num = in_num = 0;
+ out_num = in_num = elem_entries = 0;
max = vq->vring.num;
if (vq->inuse >= vq->vring.num) {
virtio_error(vdev, "Virtqueue size exceeded");
- return NULL;
+ goto done;
}
if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
- return NULL;
+ goto done;
}
if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
i = head;
- rcu_read_lock();
- caches = atomic_rcu_read(&vq->vring.caches);
+ caches = vring_get_region_caches(vq);
if (caches->desc.len < max * sizeof(VRingDesc)) {
virtio_error(vdev, "Cannot map descriptor ring");
goto done;
}
/* If we've got too many, that implies a descriptor loop. */
- if ((in_num + out_num) > max) {
+ if (++elem_entries > max) {
virtio_error(vdev, "Looped descriptor");
goto err_undo_map;
}
/* TODO: teach all callers that this can fail, and return failure instead
* of asserting here.
- * When we do, we might be able to re-enable NDEBUG below.
+ * This is just one thing (there are probably more) that must be
+ * fixed before we can allow NDEBUG compilation.
*/
-#ifdef NDEBUG
-#error building with NDEBUG is not supported
-#endif
assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
}
}
+static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
+{
+ VRingMemoryRegionCaches *caches;
+
+ caches = atomic_read(&vq->vring.caches);
+ atomic_rcu_set(&vq->vring.caches, NULL);
+ if (caches) {
+ call_rcu(caches, virtio_free_region_cache, rcu);
+ }
+}
+
void virtio_reset(void *opaque)
{
VirtIODevice *vdev = opaque;
vdev->vq[i].notification = true;
vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
vdev->vq[i].inuse = 0;
+ virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
}
}
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
{
+ if (!vdev->vq[n].vring.num) {
+ return;
+ }
vdev->vq[n].vring.desc = addr;
virtio_queue_update_rings(vdev, n);
}
void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
hwaddr avail, hwaddr used)
{
+ if (!vdev->vq[n].vring.num) {
+ return;
+ }
vdev->vq[n].vring.desc = desc;
vdev->vq[n].vring.avail = avail;
vdev->vq[n].vring.used = used;
*/
assert(k->has_variable_vring_alignment);
- vdev->vq[n].vring.align = align;
- virtio_queue_update_rings(vdev, n);
+ if (align) {
+ vdev->vq[n].vring.align = align;
+ virtio_queue_update_rings(vdev, n);
+ }
}
static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
void virtio_queue_notify(VirtIODevice *vdev, int n)
{
- virtio_queue_notify_vq(&vdev->vq[n]);
+ VirtQueue *vq = &vdev->vq[n];
+
+ if (unlikely(!vq->vring.desc || vdev->broken)) {
+ return;
+ }
+
+ trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
+ if (vq->handle_aio_output) {
+ event_notifier_set(&vq->host_notifier);
+ } else if (vq->handle_output) {
+ vq->handle_output(vdev, vq);
+ }
}
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
}
}
+/* Called within rcu_read_lock(). */
static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
{
uint16_t old, new;
void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
{
- if (!virtio_should_notify(vdev, vq)) {
+ bool should_notify;
+ rcu_read_lock();
+ should_notify = virtio_should_notify(vdev, vq);
+ rcu_read_unlock();
+
+ if (!should_notify) {
return;
}
event_notifier_set(&vq->guest_notifier);
}
+static void virtio_irq(VirtQueue *vq)
+{
+ virtio_set_isr(vq->vdev, 0x1);
+ virtio_notify_vector(vq->vdev, vq->vector);
+}
+
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
{
- if (!virtio_should_notify(vdev, vq)) {
+ bool should_notify;
+ rcu_read_lock();
+ should_notify = virtio_should_notify(vdev, vq);
+ rcu_read_unlock();
+
+ if (!should_notify) {
return;
}
trace_virtio_notify(vdev, vq);
- virtio_set_isr(vq->vdev, 0x1);
- virtio_notify_vector(vdev, vq->vector);
+ virtio_irq(vq);
}
void virtio_notify_config(VirtIODevice *vdev)
}
};
-void virtio_save(VirtIODevice *vdev, QEMUFile *f)
+int virtio_save(VirtIODevice *vdev, QEMUFile *f)
{
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
if (k->has_variable_vring_alignment) {
qemu_put_be32(f, vdev->vq[i].vring.align);
}
- /* XXX virtio-1 devices */
+ /*
+ * Save desc now, the rest of the ring addresses are saved in
+ * subsections for VIRTIO-1 devices.
+ */
qemu_put_be64(f, vdev->vq[i].vring.desc);
qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
if (k->save_queue) {
}
if (vdc->vmsd) {
- vmstate_save_state(f, vdc->vmsd, vdev, NULL);
+ int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
+ if (ret) {
+ return ret;
+ }
}
/* Subsections */
- vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
+ return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
}
/* A wrapper for use as a VMState .put function */
static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
VMStateField *field, QJSON *vmdesc)
{
- virtio_save(VIRTIO_DEVICE(opaque), f);
-
- return 0;
+ return virtio_save(VIRTIO_DEVICE(opaque), f);
}
/* A wrapper for use as a VMState .get function */
vdev->vq[i].signalled_used_valid = false;
vdev->vq[i].notification = true;
- if (vdev->vq[i].vring.desc) {
- /* XXX virtio-1 devices */
- virtio_queue_update_rings(vdev, i);
- } else if (vdev->vq[i].last_avail_idx) {
+ if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
error_report("VQ %d address 0x0 "
"inconsistent with Host index 0x%x",
i, vdev->vq[i].last_avail_idx);
- return -1;
+ return -1;
}
if (k->load_queue) {
ret = k->load_queue(qbus->parent, i, f);
}
}
+ rcu_read_lock();
for (i = 0; i < num; i++) {
if (vdev->vq[i].vring.desc) {
uint16_t nheads;
+
+ /*
+ * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
+ * only the region cache needs to be set up. Legacy devices need
+ * to calculate used and avail ring addresses based on the desc
+ * address.
+ */
+ if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
+ virtio_init_region_cache(vdev, i);
+ } else {
+ virtio_queue_update_rings(vdev, i);
+ }
+
nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
/* Check it isn't doing strange things with descriptor numbers. */
if (nheads > vdev->vq[i].vring.num) {
}
}
}
+ rcu_read_unlock();
return 0;
}
vdev->vq[n].shadow_avail_idx = idx;
}
+void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
+{
+ rcu_read_lock();
+ if (vdev->vq[n].vring.desc) {
+ vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
+ vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
+ }
+ rcu_read_unlock();
+}
+
void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
{
- vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
+ rcu_read_lock();
+ if (vdev->vq[n].vring.desc) {
+ vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
+ }
+ rcu_read_unlock();
}
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
{
VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
if (event_notifier_test_and_clear(n)) {
- virtio_notify_vector(vq->vdev, vq->vector);
+ virtio_irq(vq);
}
}
VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
bool progress;
- if (virtio_queue_empty(vq)) {
+ if (!vq->vring.desc || virtio_queue_empty(vq)) {
return false;
}
error_vreport(fmt, ap);
va_end(ap);
- vdev->broken = true;
-
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
- virtio_set_status(vdev, vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET);
+ vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
virtio_notify_config(vdev);
}
+
+ vdev->broken = true;
}
static void virtio_memory_listener_commit(MemoryListener *listener)
virtio_bus_device_plugged(vdev, &err);
if (err != NULL) {
error_propagate(errp, err);
+ vdc->unrealize(dev, NULL);
return;
}
}
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
- VRingMemoryRegionCaches *caches;
if (vdev->vq[i].vring.num == 0) {
break;
}
- caches = atomic_read(&vdev->vq[i].vring.caches);
- atomic_set(&vdev->vq[i].vring.caches, NULL);
- virtio_free_region_cache(caches);
+ virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
}
g_free(vdev->vq);
}
static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
{
VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
- int n, r, err;
+ int i, n, r, err;
+ memory_region_transaction_begin();
for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
VirtQueue *vq = &vdev->vq[n];
if (!virtio_queue_get_num(vdev, n)) {
}
event_notifier_set(&vq->host_notifier);
}
+ memory_region_transaction_commit();
return 0;
assign_error:
+ i = n; /* save n for a second iteration after transaction is committed. */
while (--n >= 0) {
VirtQueue *vq = &vdev->vq[n];
if (!virtio_queue_get_num(vdev, n)) {
r = virtio_bus_set_host_notifier(qbus, n, false);
assert(r >= 0);
}
+ memory_region_transaction_commit();
+
+ while (--i >= 0) {
+ if (!virtio_queue_get_num(vdev, i)) {
+ continue;
+ }
+ virtio_bus_cleanup_host_notifier(qbus, i);
+ }
return err;
}
VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
int n, r;
+ memory_region_transaction_begin();
for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
VirtQueue *vq = &vdev->vq[n];
r = virtio_bus_set_host_notifier(qbus, n, false);
assert(r >= 0);
}
+ memory_region_transaction_commit();
+
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+ virtio_bus_cleanup_host_notifier(qbus, n);
+ }
}
void virtio_device_stop_ioeventfd(VirtIODevice *vdev)