#include "hw/virtio/virtio.h"
#include "qemu/atomic.h"
#include "hw/virtio/virtio-bus.h"
-#include "migration/migration.h"
#include "hw/virtio/virtio-access.h"
#include "sysemu/dma.h"
VRingMemoryRegionCaches *new;
hwaddr addr, size;
int event_size;
+ int64_t len;
event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
}
new = g_new0(VRingMemoryRegionCaches, 1);
size = virtio_queue_get_desc_size(vdev, n);
- address_space_cache_init(&new->desc, vdev->dma_as,
- addr, size, false);
+ len = address_space_cache_init(&new->desc, vdev->dma_as,
+ addr, size, false);
+ if (len < size) {
+ virtio_error(vdev, "Cannot map desc");
+ goto err_desc;
+ }
size = virtio_queue_get_used_size(vdev, n) + event_size;
- address_space_cache_init(&new->used, vdev->dma_as,
- vq->vring.used, size, true);
+ len = address_space_cache_init(&new->used, vdev->dma_as,
+ vq->vring.used, size, true);
+ if (len < size) {
+ virtio_error(vdev, "Cannot map used");
+ goto err_used;
+ }
size = virtio_queue_get_avail_size(vdev, n) + event_size;
- address_space_cache_init(&new->avail, vdev->dma_as,
- vq->vring.avail, size, false);
+ len = address_space_cache_init(&new->avail, vdev->dma_as,
+ vq->vring.avail, size, false);
+ if (len < size) {
+ virtio_error(vdev, "Cannot map avail");
+ goto err_avail;
+ }
atomic_rcu_set(&vq->vring.caches, new);
if (old) {
call_rcu(old, virtio_free_region_cache, rcu);
}
+ return;
+
+err_avail:
+ address_space_cache_destroy(&new->used);
+err_used:
+ address_space_cache_destroy(&new->desc);
+err_desc:
+ g_free(new);
}
/* virt queue functions */
{
VRing *vring = &vdev->vq[n].vring;
- if (!vring->desc) {
+ if (!vring->num || !vring->desc || !vring->align) {
/* not yet setup -> nothing to do */
return;
}
virtio_tswap16s(vdev, &desc->next);
}
+static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
+{
+ VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ assert(caches != NULL);
+ return caches;
+}
/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_flags(VirtQueue *vq)
{
- VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingAvail, flags);
return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
}
/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_idx(VirtQueue *vq)
{
- VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingAvail, idx);
vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
return vq->shadow_avail_idx;
/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
{
- VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingAvail, ring[i]);
return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
}
static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
int i)
{
- VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingUsed, ring[i]);
virtio_tswap32s(vq->vdev, &uelem->id);
virtio_tswap32s(vq->vdev, &uelem->len);
/* Called within rcu_read_lock(). */
static uint16_t vring_used_idx(VirtQueue *vq)
{
- VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingUsed, idx);
return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
}
/* Called within rcu_read_lock(). */
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
{
- VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingUsed, idx);
virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
address_space_cache_invalidate(&caches->used, pa, sizeof(val));
/* Called within rcu_read_lock(). */
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
{
- VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
VirtIODevice *vdev = vq->vdev;
hwaddr pa = offsetof(VRingUsed, flags);
uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
/* Called within rcu_read_lock(). */
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
{
- VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
VirtIODevice *vdev = vq->vdev;
hwaddr pa = offsetof(VRingUsed, flags);
uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
return;
}
- caches = atomic_rcu_read(&vq->vring.caches);
+ caches = vring_get_region_caches(vq);
pa = offsetof(VRingUsed, ring[vq->vring.num]);
virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
address_space_cache_invalidate(&caches->used, pa, sizeof(val));
* Called within rcu_read_lock(). */
static int virtio_queue_empty_rcu(VirtQueue *vq)
{
+ if (unlikely(!vq->vring.avail)) {
+ return 1;
+ }
+
if (vq->shadow_avail_idx != vq->last_avail_idx) {
return 0;
}
{
bool empty;
+ if (unlikely(!vq->vring.avail)) {
+ return 1;
+ }
+
if (vq->shadow_avail_idx != vq->last_avail_idx) {
return 0;
}
return;
}
+ if (unlikely(!vq->vring.used)) {
+ return;
+ }
+
idx = (idx + vq->used_idx) % vq->vring.num;
uelem.id = elem->index;
return;
}
+ if (unlikely(!vq->vring.used)) {
+ return;
+ }
+
/* Make sure buffer is written before we update index. */
smp_wmb();
trace_virtqueue_flush(vq, count);
int64_t len = 0;
int rc;
+ if (unlikely(!vq->vring.desc)) {
+ if (in_bytes) {
+ *in_bytes = 0;
+ }
+ if (out_bytes) {
+ *out_bytes = 0;
+ }
+ return;
+ }
+
rcu_read_lock();
idx = vq->last_avail_idx;
total_bufs = in_total = out_total = 0;
max = vq->vring.num;
- caches = atomic_rcu_read(&vq->vring.caches);
+ caches = vring_get_region_caches(vq);
if (caches->desc.len < max * sizeof(VRingDesc)) {
virtio_error(vdev, "Cannot map descriptor ring");
goto err;
assert(sz >= sizeof(VirtQueueElement));
elem = g_malloc(out_sg_end);
+ trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
elem->out_num = out_num;
elem->in_num = in_num;
elem->in_addr = (void *)elem + in_addr_ofs;
int64_t len;
VirtIODevice *vdev = vq->vdev;
VirtQueueElement *elem = NULL;
- unsigned out_num, in_num;
+ unsigned out_num, in_num, elem_entries;
hwaddr addr[VIRTQUEUE_MAX_SIZE];
struct iovec iov[VIRTQUEUE_MAX_SIZE];
VRingDesc desc;
smp_rmb();
/* When we start there are none of either input nor output. */
- out_num = in_num = 0;
+ out_num = in_num = elem_entries = 0;
max = vq->vring.num;
i = head;
- caches = atomic_rcu_read(&vq->vring.caches);
+ caches = vring_get_region_caches(vq);
if (caches->desc.len < max * sizeof(VRingDesc)) {
virtio_error(vdev, "Cannot map descriptor ring");
goto done;
}
/* If we've got too many, that implies a descriptor loop. */
- if ((in_num + out_num) > max) {
+ if (++elem_entries > max) {
virtio_error(vdev, "Looped descriptor");
goto err_undo_map;
}
/* TODO: teach all callers that this can fail, and return failure instead
* of asserting here.
- * When we do, we might be able to re-enable NDEBUG below.
+ * This is just one thing (there are probably more) that must be
+ * fixed before we can allow NDEBUG compilation.
*/
-#ifdef NDEBUG
-#error building with NDEBUG is not supported
-#endif
assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
}
}
+static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
+{
+ VRingMemoryRegionCaches *caches;
+
+ caches = atomic_read(&vq->vring.caches);
+ atomic_rcu_set(&vq->vring.caches, NULL);
+ if (caches) {
+ call_rcu(caches, virtio_free_region_cache, rcu);
+ }
+}
+
void virtio_reset(void *opaque)
{
VirtIODevice *vdev = opaque;
vdev->vq[i].notification = true;
vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
vdev->vq[i].inuse = 0;
+ virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
}
}
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
{
+ if (!vdev->vq[n].vring.num) {
+ return;
+ }
vdev->vq[n].vring.desc = addr;
virtio_queue_update_rings(vdev, n);
}
void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
hwaddr avail, hwaddr used)
{
+ if (!vdev->vq[n].vring.num) {
+ return;
+ }
vdev->vq[n].vring.desc = desc;
vdev->vq[n].vring.avail = avail;
vdev->vq[n].vring.used = used;
*/
assert(k->has_variable_vring_alignment);
- vdev->vq[n].vring.align = align;
- virtio_queue_update_rings(vdev, n);
+ if (align) {
+ vdev->vq[n].vring.align = align;
+ virtio_queue_update_rings(vdev, n);
+ }
}
static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
void virtio_queue_notify(VirtIODevice *vdev, int n)
{
- virtio_queue_notify_vq(&vdev->vq[n]);
+ VirtQueue *vq = &vdev->vq[n];
+
+ if (unlikely(!vq->vring.desc || vdev->broken)) {
+ return;
+ }
+
+ trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
+ if (vq->handle_aio_output) {
+ event_notifier_set(&vq->host_notifier);
+ } else if (vq->handle_output) {
+ vq->handle_output(vdev, vq);
+ }
}
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
}
};
-void virtio_save(VirtIODevice *vdev, QEMUFile *f)
+int virtio_save(VirtIODevice *vdev, QEMUFile *f)
{
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
}
if (vdc->vmsd) {
- vmstate_save_state(f, vdc->vmsd, vdev, NULL);
+ int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
+ if (ret) {
+ return ret;
+ }
}
/* Subsections */
- vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
+ return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
}
/* A wrapper for use as a VMState .put function */
static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
VMStateField *field, QJSON *vmdesc)
{
- virtio_save(VIRTIO_DEVICE(opaque), f);
-
- return 0;
+ return virtio_save(VIRTIO_DEVICE(opaque), f);
}
/* A wrapper for use as a VMState .get function */
vdev->vq[n].shadow_avail_idx = idx;
}
+void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
+{
+ rcu_read_lock();
+ if (vdev->vq[n].vring.desc) {
+ vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
+ vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
+ }
+ rcu_read_unlock();
+}
+
void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
{
rcu_read_lock();
error_vreport(fmt, ap);
va_end(ap);
- vdev->broken = true;
-
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
- virtio_set_status(vdev, vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET);
+ vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
virtio_notify_config(vdev);
}
+
+ vdev->broken = true;
}
static void virtio_memory_listener_commit(MemoryListener *listener)
virtio_bus_device_plugged(vdev, &err);
if (err != NULL) {
error_propagate(errp, err);
+ vdc->unrealize(dev, NULL);
return;
}
}
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
- VRingMemoryRegionCaches *caches;
if (vdev->vq[i].vring.num == 0) {
break;
}
- caches = atomic_read(&vdev->vq[i].vring.caches);
- atomic_set(&vdev->vq[i].vring.caches, NULL);
- virtio_free_region_cache(caches);
+ virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
}
g_free(vdev->vq);
}
static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
{
VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
- int n, r, err;
+ int i, n, r, err;
+ memory_region_transaction_begin();
for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
VirtQueue *vq = &vdev->vq[n];
if (!virtio_queue_get_num(vdev, n)) {
}
event_notifier_set(&vq->host_notifier);
}
+ memory_region_transaction_commit();
return 0;
assign_error:
+ i = n; /* save n for a second iteration after transaction is committed. */
while (--n >= 0) {
VirtQueue *vq = &vdev->vq[n];
if (!virtio_queue_get_num(vdev, n)) {
r = virtio_bus_set_host_notifier(qbus, n, false);
assert(r >= 0);
}
+ memory_region_transaction_commit();
+
+ while (--i >= 0) {
+ if (!virtio_queue_get_num(vdev, i)) {
+ continue;
+ }
+ virtio_bus_cleanup_host_notifier(qbus, i);
+ }
return err;
}
VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
int n, r;
+ memory_region_transaction_begin();
for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
VirtQueue *vq = &vdev->vq[n];
r = virtio_bus_set_host_notifier(qbus, n, false);
assert(r >= 0);
}
+ memory_region_transaction_commit();
+
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+ virtio_bus_cleanup_host_notifier(qbus, n);
+ }
}
void virtio_device_stop_ioeventfd(VirtIODevice *vdev)