4 * Copyright IBM, Corp. 2007
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu-common.h"
19 #include "exec/address-spaces.h"
20 #include "qemu/error-report.h"
21 #include "hw/virtio/virtio.h"
22 #include "qemu/atomic.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "hw/virtio/virtio-access.h"
25 #include "sysemu/dma.h"
28 * The alignment to use between consumer and producer parts of vring.
29 * x86 pagesize again. This is the default, used by transports like PCI
30 * which don't provide a means for the guest to tell the host the alignment.
32 #define VIRTIO_PCI_VRING_ALIGN 4096
34 typedef struct VRingDesc
42 typedef struct VRingAvail
49 typedef struct VRingUsedElem
55 typedef struct VRingUsed
59 VRingUsedElem ring[0];
62 typedef struct VRingMemoryRegionCaches {
64 MemoryRegionCache desc;
65 MemoryRegionCache avail;
66 MemoryRegionCache used;
67 } VRingMemoryRegionCaches;
72 unsigned int num_default;
77 VRingMemoryRegionCaches *caches;
84 /* Next head to pop */
85 uint16_t last_avail_idx;
87 /* Last avail_idx read from VQ. */
88 uint16_t shadow_avail_idx;
92 /* Last used index value we have signalled on */
93 uint16_t signalled_used;
95 /* Last used index value we have signalled on */
96 bool signalled_used_valid;
98 /* Notification enabled? */
101 uint16_t queue_index;
106 VirtIOHandleOutput handle_output;
107 VirtIOHandleAIOOutput handle_aio_output;
109 EventNotifier guest_notifier;
110 EventNotifier host_notifier;
111 QLIST_ENTRY(VirtQueue) node;
114 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
120 address_space_cache_destroy(&caches->desc);
121 address_space_cache_destroy(&caches->avail);
122 address_space_cache_destroy(&caches->used);
126 static void virtio_init_region_cache(VirtIODevice *vdev, int n)
128 VirtQueue *vq = &vdev->vq[n];
129 VRingMemoryRegionCaches *old = vq->vring.caches;
130 VRingMemoryRegionCaches *new;
135 event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
137 addr = vq->vring.desc;
141 new = g_new0(VRingMemoryRegionCaches, 1);
142 size = virtio_queue_get_desc_size(vdev, n);
143 len = address_space_cache_init(&new->desc, vdev->dma_as,
146 virtio_error(vdev, "Cannot map desc");
150 size = virtio_queue_get_used_size(vdev, n) + event_size;
151 len = address_space_cache_init(&new->used, vdev->dma_as,
152 vq->vring.used, size, true);
154 virtio_error(vdev, "Cannot map used");
158 size = virtio_queue_get_avail_size(vdev, n) + event_size;
159 len = address_space_cache_init(&new->avail, vdev->dma_as,
160 vq->vring.avail, size, false);
162 virtio_error(vdev, "Cannot map avail");
166 atomic_rcu_set(&vq->vring.caches, new);
168 call_rcu(old, virtio_free_region_cache, rcu);
173 address_space_cache_destroy(&new->used);
175 address_space_cache_destroy(&new->desc);
180 /* virt queue functions */
181 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
183 VRing *vring = &vdev->vq[n].vring;
186 /* not yet setup -> nothing to do */
189 vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
190 vring->used = vring_align(vring->avail +
191 offsetof(VRingAvail, ring[vring->num]),
193 virtio_init_region_cache(vdev, n);
196 /* Called within rcu_read_lock(). */
197 static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
198 MemoryRegionCache *cache, int i)
200 address_space_read_cached(cache, i * sizeof(VRingDesc),
201 desc, sizeof(VRingDesc));
202 virtio_tswap64s(vdev, &desc->addr);
203 virtio_tswap32s(vdev, &desc->len);
204 virtio_tswap16s(vdev, &desc->flags);
205 virtio_tswap16s(vdev, &desc->next);
208 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
210 VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
211 assert(caches != NULL);
214 /* Called within rcu_read_lock(). */
215 static inline uint16_t vring_avail_flags(VirtQueue *vq)
217 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
218 hwaddr pa = offsetof(VRingAvail, flags);
219 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
222 /* Called within rcu_read_lock(). */
223 static inline uint16_t vring_avail_idx(VirtQueue *vq)
225 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
226 hwaddr pa = offsetof(VRingAvail, idx);
227 vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
228 return vq->shadow_avail_idx;
231 /* Called within rcu_read_lock(). */
232 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
234 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
235 hwaddr pa = offsetof(VRingAvail, ring[i]);
236 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
239 /* Called within rcu_read_lock(). */
240 static inline uint16_t vring_get_used_event(VirtQueue *vq)
242 return vring_avail_ring(vq, vq->vring.num);
245 /* Called within rcu_read_lock(). */
246 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
249 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
250 hwaddr pa = offsetof(VRingUsed, ring[i]);
251 virtio_tswap32s(vq->vdev, &uelem->id);
252 virtio_tswap32s(vq->vdev, &uelem->len);
253 address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
254 address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
257 /* Called within rcu_read_lock(). */
258 static uint16_t vring_used_idx(VirtQueue *vq)
260 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
261 hwaddr pa = offsetof(VRingUsed, idx);
262 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
265 /* Called within rcu_read_lock(). */
266 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
268 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
269 hwaddr pa = offsetof(VRingUsed, idx);
270 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
271 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
275 /* Called within rcu_read_lock(). */
276 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
278 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
279 VirtIODevice *vdev = vq->vdev;
280 hwaddr pa = offsetof(VRingUsed, flags);
281 uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
283 virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
284 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
287 /* Called within rcu_read_lock(). */
288 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
290 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
291 VirtIODevice *vdev = vq->vdev;
292 hwaddr pa = offsetof(VRingUsed, flags);
293 uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
295 virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
296 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
299 /* Called within rcu_read_lock(). */
300 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
302 VRingMemoryRegionCaches *caches;
304 if (!vq->notification) {
308 caches = vring_get_region_caches(vq);
309 pa = offsetof(VRingUsed, ring[vq->vring.num]);
310 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
311 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
314 void virtio_queue_set_notification(VirtQueue *vq, int enable)
316 vq->notification = enable;
318 if (!vq->vring.desc) {
323 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
324 vring_set_avail_event(vq, vring_avail_idx(vq));
326 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
328 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
331 /* Expose avail event/used flags before caller checks the avail idx. */
337 int virtio_queue_ready(VirtQueue *vq)
339 return vq->vring.avail != 0;
342 /* Fetch avail_idx from VQ memory only when we really need to know if
343 * guest has added some buffers.
344 * Called within rcu_read_lock(). */
345 static int virtio_queue_empty_rcu(VirtQueue *vq)
347 if (unlikely(!vq->vring.avail)) {
351 if (vq->shadow_avail_idx != vq->last_avail_idx) {
355 return vring_avail_idx(vq) == vq->last_avail_idx;
358 int virtio_queue_empty(VirtQueue *vq)
362 if (unlikely(!vq->vring.avail)) {
366 if (vq->shadow_avail_idx != vq->last_avail_idx) {
371 empty = vring_avail_idx(vq) == vq->last_avail_idx;
376 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
379 AddressSpace *dma_as = vq->vdev->dma_as;
384 for (i = 0; i < elem->in_num; i++) {
385 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
387 dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
388 elem->in_sg[i].iov_len,
389 DMA_DIRECTION_FROM_DEVICE, size);
394 for (i = 0; i < elem->out_num; i++)
395 dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
396 elem->out_sg[i].iov_len,
397 DMA_DIRECTION_TO_DEVICE,
398 elem->out_sg[i].iov_len);
401 /* virtqueue_detach_element:
402 * @vq: The #VirtQueue
403 * @elem: The #VirtQueueElement
404 * @len: number of bytes written
406 * Detach the element from the virtqueue. This function is suitable for device
407 * reset or other situations where a #VirtQueueElement is simply freed and will
408 * not be pushed or discarded.
410 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
414 virtqueue_unmap_sg(vq, elem, len);
418 * @vq: The #VirtQueue
419 * @elem: The #VirtQueueElement
420 * @len: number of bytes written
422 * Pretend the most recent element wasn't popped from the virtqueue. The next
423 * call to virtqueue_pop() will refetch the element.
425 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
428 vq->last_avail_idx--;
429 virtqueue_detach_element(vq, elem, len);
433 * @vq: The #VirtQueue
434 * @num: Number of elements to push back
436 * Pretend that elements weren't popped from the virtqueue. The next
437 * virtqueue_pop() will refetch the oldest element.
439 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
441 * Returns: true on success, false if @num is greater than the number of in use
444 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
446 if (num > vq->inuse) {
449 vq->last_avail_idx -= num;
454 /* Called within rcu_read_lock(). */
455 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
456 unsigned int len, unsigned int idx)
460 trace_virtqueue_fill(vq, elem, len, idx);
462 virtqueue_unmap_sg(vq, elem, len);
464 if (unlikely(vq->vdev->broken)) {
468 if (unlikely(!vq->vring.used)) {
472 idx = (idx + vq->used_idx) % vq->vring.num;
474 uelem.id = elem->index;
476 vring_used_write(vq, &uelem, idx);
479 /* Called within rcu_read_lock(). */
480 void virtqueue_flush(VirtQueue *vq, unsigned int count)
484 if (unlikely(vq->vdev->broken)) {
489 if (unlikely(!vq->vring.used)) {
493 /* Make sure buffer is written before we update index. */
495 trace_virtqueue_flush(vq, count);
498 vring_used_idx_set(vq, new);
500 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
501 vq->signalled_used_valid = false;
504 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
508 virtqueue_fill(vq, elem, len, 0);
509 virtqueue_flush(vq, 1);
513 /* Called within rcu_read_lock(). */
514 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
516 uint16_t num_heads = vring_avail_idx(vq) - idx;
518 /* Check it isn't doing very strange things with descriptor numbers. */
519 if (num_heads > vq->vring.num) {
520 virtio_error(vq->vdev, "Guest moved used index from %u to %u",
521 idx, vq->shadow_avail_idx);
524 /* On success, callers read a descriptor at vq->last_avail_idx.
525 * Make sure descriptor read does not bypass avail index read. */
533 /* Called within rcu_read_lock(). */
534 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
537 /* Grab the next descriptor number they're advertising, and increment
538 * the index we've seen. */
539 *head = vring_avail_ring(vq, idx % vq->vring.num);
541 /* If their number is silly, that's a fatal mistake. */
542 if (*head >= vq->vring.num) {
543 virtio_error(vq->vdev, "Guest says index %u is available", *head);
551 VIRTQUEUE_READ_DESC_ERROR = -1,
552 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
553 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
556 static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
557 MemoryRegionCache *desc_cache, unsigned int max,
560 /* If this descriptor says it doesn't chain, we're done. */
561 if (!(desc->flags & VRING_DESC_F_NEXT)) {
562 return VIRTQUEUE_READ_DESC_DONE;
565 /* Check they're not leading us off end of descriptors. */
567 /* Make sure compiler knows to grab that: we don't want it changing! */
571 virtio_error(vdev, "Desc next is %u", *next);
572 return VIRTQUEUE_READ_DESC_ERROR;
575 vring_desc_read(vdev, desc, desc_cache, *next);
576 return VIRTQUEUE_READ_DESC_MORE;
579 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
580 unsigned int *out_bytes,
581 unsigned max_in_bytes, unsigned max_out_bytes)
583 VirtIODevice *vdev = vq->vdev;
584 unsigned int max, idx;
585 unsigned int total_bufs, in_total, out_total;
586 VRingMemoryRegionCaches *caches;
587 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
591 if (unlikely(!vq->vring.desc)) {
602 idx = vq->last_avail_idx;
603 total_bufs = in_total = out_total = 0;
606 caches = vring_get_region_caches(vq);
607 if (caches->desc.len < max * sizeof(VRingDesc)) {
608 virtio_error(vdev, "Cannot map descriptor ring");
612 while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
613 MemoryRegionCache *desc_cache = &caches->desc;
614 unsigned int num_bufs;
618 num_bufs = total_bufs;
620 if (!virtqueue_get_head(vq, idx++, &i)) {
624 vring_desc_read(vdev, &desc, desc_cache, i);
626 if (desc.flags & VRING_DESC_F_INDIRECT) {
627 if (desc.len % sizeof(VRingDesc)) {
628 virtio_error(vdev, "Invalid size for indirect buffer table");
632 /* If we've got too many, that implies a descriptor loop. */
633 if (num_bufs >= max) {
634 virtio_error(vdev, "Looped descriptor");
638 /* loop over the indirect descriptor table */
639 len = address_space_cache_init(&indirect_desc_cache,
641 desc.addr, desc.len, false);
642 desc_cache = &indirect_desc_cache;
643 if (len < desc.len) {
644 virtio_error(vdev, "Cannot map indirect buffer");
648 max = desc.len / sizeof(VRingDesc);
650 vring_desc_read(vdev, &desc, desc_cache, i);
654 /* If we've got too many, that implies a descriptor loop. */
655 if (++num_bufs > max) {
656 virtio_error(vdev, "Looped descriptor");
660 if (desc.flags & VRING_DESC_F_WRITE) {
661 in_total += desc.len;
663 out_total += desc.len;
665 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
669 rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
670 } while (rc == VIRTQUEUE_READ_DESC_MORE);
672 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
676 if (desc_cache == &indirect_desc_cache) {
677 address_space_cache_destroy(&indirect_desc_cache);
680 total_bufs = num_bufs;
689 address_space_cache_destroy(&indirect_desc_cache);
691 *in_bytes = in_total;
694 *out_bytes = out_total;
700 in_total = out_total = 0;
704 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
705 unsigned int out_bytes)
707 unsigned int in_total, out_total;
709 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
710 return in_bytes <= in_total && out_bytes <= out_total;
713 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
714 hwaddr *addr, struct iovec *iov,
715 unsigned int max_num_sg, bool is_write,
716 hwaddr pa, size_t sz)
719 unsigned num_sg = *p_num_sg;
720 assert(num_sg <= max_num_sg);
723 virtio_error(vdev, "virtio: zero sized buffers are not allowed");
730 if (num_sg == max_num_sg) {
731 virtio_error(vdev, "virtio: too many write descriptors in "
736 iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
738 DMA_DIRECTION_FROM_DEVICE :
739 DMA_DIRECTION_TO_DEVICE);
740 if (!iov[num_sg].iov_base) {
741 virtio_error(vdev, "virtio: bogus descriptor or out of resources");
745 iov[num_sg].iov_len = len;
759 /* Only used by error code paths before we have a VirtQueueElement (therefore
760 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
763 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
768 for (i = 0; i < out_num + in_num; i++) {
769 int is_write = i >= out_num;
771 cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
776 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
777 hwaddr *addr, unsigned int *num_sg,
783 for (i = 0; i < *num_sg; i++) {
785 sg[i].iov_base = dma_memory_map(vdev->dma_as,
786 addr[i], &len, is_write ?
787 DMA_DIRECTION_FROM_DEVICE :
788 DMA_DIRECTION_TO_DEVICE);
789 if (!sg[i].iov_base) {
790 error_report("virtio: error trying to map MMIO memory");
793 if (len != sg[i].iov_len) {
794 error_report("virtio: unexpected memory split");
800 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
802 virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, &elem->in_num, 1);
803 virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, &elem->out_num, 0);
806 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
808 VirtQueueElement *elem;
809 size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
810 size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
811 size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
812 size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
813 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
814 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
816 assert(sz >= sizeof(VirtQueueElement));
817 elem = g_malloc(out_sg_end);
818 elem->out_num = out_num;
819 elem->in_num = in_num;
820 elem->in_addr = (void *)elem + in_addr_ofs;
821 elem->out_addr = (void *)elem + out_addr_ofs;
822 elem->in_sg = (void *)elem + in_sg_ofs;
823 elem->out_sg = (void *)elem + out_sg_ofs;
827 void *virtqueue_pop(VirtQueue *vq, size_t sz)
829 unsigned int i, head, max;
830 VRingMemoryRegionCaches *caches;
831 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
832 MemoryRegionCache *desc_cache;
834 VirtIODevice *vdev = vq->vdev;
835 VirtQueueElement *elem = NULL;
836 unsigned out_num, in_num;
837 hwaddr addr[VIRTQUEUE_MAX_SIZE];
838 struct iovec iov[VIRTQUEUE_MAX_SIZE];
842 if (unlikely(vdev->broken)) {
846 if (virtio_queue_empty_rcu(vq)) {
849 /* Needed after virtio_queue_empty(), see comment in
850 * virtqueue_num_heads(). */
853 /* When we start there are none of either input nor output. */
854 out_num = in_num = 0;
858 if (vq->inuse >= vq->vring.num) {
859 virtio_error(vdev, "Virtqueue size exceeded");
863 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
867 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
868 vring_set_avail_event(vq, vq->last_avail_idx);
873 caches = vring_get_region_caches(vq);
874 if (caches->desc.len < max * sizeof(VRingDesc)) {
875 virtio_error(vdev, "Cannot map descriptor ring");
879 desc_cache = &caches->desc;
880 vring_desc_read(vdev, &desc, desc_cache, i);
881 if (desc.flags & VRING_DESC_F_INDIRECT) {
882 if (desc.len % sizeof(VRingDesc)) {
883 virtio_error(vdev, "Invalid size for indirect buffer table");
887 /* loop over the indirect descriptor table */
888 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
889 desc.addr, desc.len, false);
890 desc_cache = &indirect_desc_cache;
891 if (len < desc.len) {
892 virtio_error(vdev, "Cannot map indirect buffer");
896 max = desc.len / sizeof(VRingDesc);
898 vring_desc_read(vdev, &desc, desc_cache, i);
901 /* Collect all the descriptors */
905 if (desc.flags & VRING_DESC_F_WRITE) {
906 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
908 VIRTQUEUE_MAX_SIZE - out_num, true,
909 desc.addr, desc.len);
912 virtio_error(vdev, "Incorrect order for descriptors");
915 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
916 VIRTQUEUE_MAX_SIZE, false,
917 desc.addr, desc.len);
923 /* If we've got too many, that implies a descriptor loop. */
924 if ((in_num + out_num) > max) {
925 virtio_error(vdev, "Looped descriptor");
929 rc = virtqueue_read_next_desc(vdev, &desc, desc_cache, max, &i);
930 } while (rc == VIRTQUEUE_READ_DESC_MORE);
932 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
936 /* Now copy what we have collected and mapped */
937 elem = virtqueue_alloc_element(sz, out_num, in_num);
939 for (i = 0; i < out_num; i++) {
940 elem->out_addr[i] = addr[i];
941 elem->out_sg[i] = iov[i];
943 for (i = 0; i < in_num; i++) {
944 elem->in_addr[i] = addr[out_num + i];
945 elem->in_sg[i] = iov[out_num + i];
950 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
952 address_space_cache_destroy(&indirect_desc_cache);
958 virtqueue_undo_map_desc(out_num, in_num, iov);
962 /* virtqueue_drop_all:
963 * @vq: The #VirtQueue
964 * Drops all queued buffers and indicates them to the guest
965 * as if they are done. Useful when buffers can not be
966 * processed but must be returned to the guest.
968 unsigned int virtqueue_drop_all(VirtQueue *vq)
970 unsigned int dropped = 0;
971 VirtQueueElement elem = {};
972 VirtIODevice *vdev = vq->vdev;
973 bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
975 if (unlikely(vdev->broken)) {
979 while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
980 /* works similar to virtqueue_pop but does not map buffers
981 * and does not allocate any memory */
983 if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
987 vq->last_avail_idx++;
989 vring_set_avail_event(vq, vq->last_avail_idx);
991 /* immediately push the element, nothing to unmap
992 * as both in_num and out_num are set to 0 */
993 virtqueue_push(vq, &elem, 0);
1000 /* Reading and writing a structure directly to QEMUFile is *awful*, but
1001 * it is what QEMU has always done by mistake. We can change it sooner
1002 * or later by bumping the version number of the affected vm states.
1003 * In the meanwhile, since the in-memory layout of VirtQueueElement
1004 * has changed, we need to marshal to and from the layout that was
1005 * used before the change.
1007 typedef struct VirtQueueElementOld {
1009 unsigned int out_num;
1010 unsigned int in_num;
1011 hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
1012 hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
1013 struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
1014 struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
1015 } VirtQueueElementOld;
1017 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
1019 VirtQueueElement *elem;
1020 VirtQueueElementOld data;
1023 qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1025 /* TODO: teach all callers that this can fail, and return failure instead
1026 * of asserting here.
1027 * When we do, we might be able to re-enable NDEBUG below.
1030 #error building with NDEBUG is not supported
1032 assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
1033 assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
1035 elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
1036 elem->index = data.index;
1038 for (i = 0; i < elem->in_num; i++) {
1039 elem->in_addr[i] = data.in_addr[i];
1042 for (i = 0; i < elem->out_num; i++) {
1043 elem->out_addr[i] = data.out_addr[i];
1046 for (i = 0; i < elem->in_num; i++) {
1047 /* Base is overwritten by virtqueue_map. */
1048 elem->in_sg[i].iov_base = 0;
1049 elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
1052 for (i = 0; i < elem->out_num; i++) {
1053 /* Base is overwritten by virtqueue_map. */
1054 elem->out_sg[i].iov_base = 0;
1055 elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
1058 virtqueue_map(vdev, elem);
1062 void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem)
1064 VirtQueueElementOld data;
1067 memset(&data, 0, sizeof(data));
1068 data.index = elem->index;
1069 data.in_num = elem->in_num;
1070 data.out_num = elem->out_num;
1072 for (i = 0; i < elem->in_num; i++) {
1073 data.in_addr[i] = elem->in_addr[i];
1076 for (i = 0; i < elem->out_num; i++) {
1077 data.out_addr[i] = elem->out_addr[i];
1080 for (i = 0; i < elem->in_num; i++) {
1081 /* Base is overwritten by virtqueue_map when loading. Do not
1082 * save it, as it would leak the QEMU address space layout. */
1083 data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
1086 for (i = 0; i < elem->out_num; i++) {
1087 /* Do not save iov_base as above. */
1088 data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
1090 qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1094 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
1096 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1097 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1099 if (unlikely(vdev->broken)) {
1104 k->notify(qbus->parent, vector);
1108 void virtio_update_irq(VirtIODevice *vdev)
1110 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1113 static int virtio_validate_features(VirtIODevice *vdev)
1115 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1117 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
1118 !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1122 if (k->validate_features) {
1123 return k->validate_features(vdev);
1129 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
1131 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1132 trace_virtio_set_status(vdev, val);
1134 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1135 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
1136 val & VIRTIO_CONFIG_S_FEATURES_OK) {
1137 int ret = virtio_validate_features(vdev);
1144 if (k->set_status) {
1145 k->set_status(vdev, val);
1151 bool target_words_bigendian(void);
1152 static enum virtio_device_endian virtio_default_endian(void)
1154 if (target_words_bigendian()) {
1155 return VIRTIO_DEVICE_ENDIAN_BIG;
1157 return VIRTIO_DEVICE_ENDIAN_LITTLE;
1161 static enum virtio_device_endian virtio_current_cpu_endian(void)
1163 CPUClass *cc = CPU_GET_CLASS(current_cpu);
1165 if (cc->virtio_is_big_endian(current_cpu)) {
1166 return VIRTIO_DEVICE_ENDIAN_BIG;
1168 return VIRTIO_DEVICE_ENDIAN_LITTLE;
1172 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
1174 VRingMemoryRegionCaches *caches;
1176 caches = atomic_read(&vq->vring.caches);
1177 atomic_rcu_set(&vq->vring.caches, NULL);
1179 call_rcu(caches, virtio_free_region_cache, rcu);
1183 void virtio_reset(void *opaque)
1185 VirtIODevice *vdev = opaque;
1186 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1189 virtio_set_status(vdev, 0);
1191 /* Guest initiated reset */
1192 vdev->device_endian = virtio_current_cpu_endian();
1195 vdev->device_endian = virtio_default_endian();
1202 vdev->broken = false;
1203 vdev->guest_features = 0;
1204 vdev->queue_sel = 0;
1206 atomic_set(&vdev->isr, 0);
1207 vdev->config_vector = VIRTIO_NO_VECTOR;
1208 virtio_notify_vector(vdev, vdev->config_vector);
1210 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1211 vdev->vq[i].vring.desc = 0;
1212 vdev->vq[i].vring.avail = 0;
1213 vdev->vq[i].vring.used = 0;
1214 vdev->vq[i].last_avail_idx = 0;
1215 vdev->vq[i].shadow_avail_idx = 0;
1216 vdev->vq[i].used_idx = 0;
1217 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
1218 vdev->vq[i].signalled_used = 0;
1219 vdev->vq[i].signalled_used_valid = false;
1220 vdev->vq[i].notification = true;
1221 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
1222 vdev->vq[i].inuse = 0;
1223 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
1227 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
1229 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1232 if (addr + sizeof(val) > vdev->config_len) {
1233 return (uint32_t)-1;
1236 k->get_config(vdev, vdev->config);
1238 val = ldub_p(vdev->config + addr);
1242 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
1244 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1247 if (addr + sizeof(val) > vdev->config_len) {
1248 return (uint32_t)-1;
1251 k->get_config(vdev, vdev->config);
1253 val = lduw_p(vdev->config + addr);
1257 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
1259 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1262 if (addr + sizeof(val) > vdev->config_len) {
1263 return (uint32_t)-1;
1266 k->get_config(vdev, vdev->config);
1268 val = ldl_p(vdev->config + addr);
1272 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1274 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1277 if (addr + sizeof(val) > vdev->config_len) {
1281 stb_p(vdev->config + addr, val);
1283 if (k->set_config) {
1284 k->set_config(vdev, vdev->config);
1288 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1290 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1291 uint16_t val = data;
1293 if (addr + sizeof(val) > vdev->config_len) {
1297 stw_p(vdev->config + addr, val);
1299 if (k->set_config) {
1300 k->set_config(vdev, vdev->config);
1304 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
1306 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1307 uint32_t val = data;
1309 if (addr + sizeof(val) > vdev->config_len) {
1313 stl_p(vdev->config + addr, val);
1315 if (k->set_config) {
1316 k->set_config(vdev, vdev->config);
1320 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
1322 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1325 if (addr + sizeof(val) > vdev->config_len) {
1326 return (uint32_t)-1;
1329 k->get_config(vdev, vdev->config);
1331 val = ldub_p(vdev->config + addr);
1335 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
1337 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1340 if (addr + sizeof(val) > vdev->config_len) {
1341 return (uint32_t)-1;
1344 k->get_config(vdev, vdev->config);
1346 val = lduw_le_p(vdev->config + addr);
1350 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
1352 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1355 if (addr + sizeof(val) > vdev->config_len) {
1356 return (uint32_t)-1;
1359 k->get_config(vdev, vdev->config);
1361 val = ldl_le_p(vdev->config + addr);
1365 void virtio_config_modern_writeb(VirtIODevice *vdev,
1366 uint32_t addr, uint32_t data)
1368 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1371 if (addr + sizeof(val) > vdev->config_len) {
1375 stb_p(vdev->config + addr, val);
1377 if (k->set_config) {
1378 k->set_config(vdev, vdev->config);
1382 void virtio_config_modern_writew(VirtIODevice *vdev,
1383 uint32_t addr, uint32_t data)
1385 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1386 uint16_t val = data;
1388 if (addr + sizeof(val) > vdev->config_len) {
1392 stw_le_p(vdev->config + addr, val);
1394 if (k->set_config) {
1395 k->set_config(vdev, vdev->config);
1399 void virtio_config_modern_writel(VirtIODevice *vdev,
1400 uint32_t addr, uint32_t data)
1402 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1403 uint32_t val = data;
1405 if (addr + sizeof(val) > vdev->config_len) {
1409 stl_le_p(vdev->config + addr, val);
1411 if (k->set_config) {
1412 k->set_config(vdev, vdev->config);
1416 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
1418 vdev->vq[n].vring.desc = addr;
1419 virtio_queue_update_rings(vdev, n);
1422 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
1424 return vdev->vq[n].vring.desc;
1427 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
1428 hwaddr avail, hwaddr used)
1430 vdev->vq[n].vring.desc = desc;
1431 vdev->vq[n].vring.avail = avail;
1432 vdev->vq[n].vring.used = used;
1433 virtio_init_region_cache(vdev, n);
1436 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
1438 /* Don't allow guest to flip queue between existent and
1439 * nonexistent states, or to set it to an invalid size.
1441 if (!!num != !!vdev->vq[n].vring.num ||
1442 num > VIRTQUEUE_MAX_SIZE ||
1446 vdev->vq[n].vring.num = num;
1449 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
1451 return QLIST_FIRST(&vdev->vector_queues[vector]);
1454 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
1456 return QLIST_NEXT(vq, node);
1459 int virtio_queue_get_num(VirtIODevice *vdev, int n)
1461 return vdev->vq[n].vring.num;
1464 int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
1466 return vdev->vq[n].vring.num_default;
1469 int virtio_get_num_queues(VirtIODevice *vdev)
1473 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1474 if (!virtio_queue_get_num(vdev, i)) {
1482 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
1484 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1485 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1487 /* virtio-1 compliant devices cannot change the alignment */
1488 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1489 error_report("tried to modify queue alignment for virtio-1 device");
1492 /* Check that the transport told us it was going to do this
1493 * (so a buggy transport will immediately assert rather than
1494 * silently failing to migrate this state)
1496 assert(k->has_variable_vring_alignment);
1498 vdev->vq[n].vring.align = align;
1499 virtio_queue_update_rings(vdev, n);
1502 static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
1504 if (vq->vring.desc && vq->handle_aio_output) {
1505 VirtIODevice *vdev = vq->vdev;
1507 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1508 return vq->handle_aio_output(vdev, vq);
1514 static void virtio_queue_notify_vq(VirtQueue *vq)
1516 if (vq->vring.desc && vq->handle_output) {
1517 VirtIODevice *vdev = vq->vdev;
1519 if (unlikely(vdev->broken)) {
1523 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1524 vq->handle_output(vdev, vq);
1528 void virtio_queue_notify(VirtIODevice *vdev, int n)
1530 VirtQueue *vq = &vdev->vq[n];
1532 if (unlikely(!vq->vring.desc || vdev->broken)) {
1536 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1537 if (vq->handle_aio_output) {
1538 event_notifier_set(&vq->host_notifier);
1539 } else if (vq->handle_output) {
1540 vq->handle_output(vdev, vq);
1544 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
1546 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
1550 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
1552 VirtQueue *vq = &vdev->vq[n];
1554 if (n < VIRTIO_QUEUE_MAX) {
1555 if (vdev->vector_queues &&
1556 vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
1557 QLIST_REMOVE(vq, node);
1559 vdev->vq[n].vector = vector;
1560 if (vdev->vector_queues &&
1561 vector != VIRTIO_NO_VECTOR) {
1562 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
1567 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
1568 VirtIOHandleOutput handle_output)
1572 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1573 if (vdev->vq[i].vring.num == 0)
1577 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
1580 vdev->vq[i].vring.num = queue_size;
1581 vdev->vq[i].vring.num_default = queue_size;
1582 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
1583 vdev->vq[i].handle_output = handle_output;
1584 vdev->vq[i].handle_aio_output = NULL;
1586 return &vdev->vq[i];
1589 void virtio_del_queue(VirtIODevice *vdev, int n)
1591 if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
1595 vdev->vq[n].vring.num = 0;
1596 vdev->vq[n].vring.num_default = 0;
1599 static void virtio_set_isr(VirtIODevice *vdev, int value)
1601 uint8_t old = atomic_read(&vdev->isr);
1603 /* Do not write ISR if it does not change, so that its cacheline remains
1604 * shared in the common case where the guest does not read it.
1606 if ((old & value) != value) {
1607 atomic_or(&vdev->isr, value);
1611 /* Called within rcu_read_lock(). */
1612 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
1616 /* We need to expose used array entries before checking used event. */
1618 /* Always notify when queue is empty (when feature acknowledge) */
1619 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1620 !vq->inuse && virtio_queue_empty(vq)) {
1624 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1625 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
1628 v = vq->signalled_used_valid;
1629 vq->signalled_used_valid = true;
1630 old = vq->signalled_used;
1631 new = vq->signalled_used = vq->used_idx;
1632 return !v || vring_need_event(vring_get_used_event(vq), new, old);
1635 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
1639 should_notify = virtio_should_notify(vdev, vq);
1642 if (!should_notify) {
1646 trace_virtio_notify_irqfd(vdev, vq);
1649 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
1650 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
1651 * incorrectly polling this bit during crashdump and hibernation
1652 * in MSI mode, causing a hang if this bit is never updated.
1653 * Recent releases of Windows do not really shut down, but rather
1654 * log out and hibernate to make the next startup faster. Hence,
1655 * this manifested as a more serious hang during shutdown with
1657 * Next driver release from 2016 fixed this problem, so working around it
1658 * is not a must, but it's easy to do so let's do it here.
1660 * Note: it's safe to update ISR from any thread as it was switched
1661 * to an atomic operation.
1663 virtio_set_isr(vq->vdev, 0x1);
1664 event_notifier_set(&vq->guest_notifier);
1667 static void virtio_irq(VirtQueue *vq)
1669 virtio_set_isr(vq->vdev, 0x1);
1670 virtio_notify_vector(vq->vdev, vq->vector);
1673 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
1677 should_notify = virtio_should_notify(vdev, vq);
1680 if (!should_notify) {
1684 trace_virtio_notify(vdev, vq);
1688 void virtio_notify_config(VirtIODevice *vdev)
1690 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
1693 virtio_set_isr(vdev, 0x3);
1695 virtio_notify_vector(vdev, vdev->config_vector);
1698 static bool virtio_device_endian_needed(void *opaque)
1700 VirtIODevice *vdev = opaque;
1702 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
1703 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1704 return vdev->device_endian != virtio_default_endian();
1706 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
1707 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
1710 static bool virtio_64bit_features_needed(void *opaque)
1712 VirtIODevice *vdev = opaque;
1714 return (vdev->host_features >> 32) != 0;
1717 static bool virtio_virtqueue_needed(void *opaque)
1719 VirtIODevice *vdev = opaque;
1721 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
1724 static bool virtio_ringsize_needed(void *opaque)
1726 VirtIODevice *vdev = opaque;
1729 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1730 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
1737 static bool virtio_extra_state_needed(void *opaque)
1739 VirtIODevice *vdev = opaque;
1740 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1741 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1743 return k->has_extra_state &&
1744 k->has_extra_state(qbus->parent);
1747 static bool virtio_broken_needed(void *opaque)
1749 VirtIODevice *vdev = opaque;
1751 return vdev->broken;
1754 static const VMStateDescription vmstate_virtqueue = {
1755 .name = "virtqueue_state",
1757 .minimum_version_id = 1,
1758 .fields = (VMStateField[]) {
1759 VMSTATE_UINT64(vring.avail, struct VirtQueue),
1760 VMSTATE_UINT64(vring.used, struct VirtQueue),
1761 VMSTATE_END_OF_LIST()
1765 static const VMStateDescription vmstate_virtio_virtqueues = {
1766 .name = "virtio/virtqueues",
1768 .minimum_version_id = 1,
1769 .needed = &virtio_virtqueue_needed,
1770 .fields = (VMStateField[]) {
1771 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1772 VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
1773 VMSTATE_END_OF_LIST()
1777 static const VMStateDescription vmstate_ringsize = {
1778 .name = "ringsize_state",
1780 .minimum_version_id = 1,
1781 .fields = (VMStateField[]) {
1782 VMSTATE_UINT32(vring.num_default, struct VirtQueue),
1783 VMSTATE_END_OF_LIST()
1787 static const VMStateDescription vmstate_virtio_ringsize = {
1788 .name = "virtio/ringsize",
1790 .minimum_version_id = 1,
1791 .needed = &virtio_ringsize_needed,
1792 .fields = (VMStateField[]) {
1793 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1794 VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
1795 VMSTATE_END_OF_LIST()
1799 static int get_extra_state(QEMUFile *f, void *pv, size_t size,
1800 VMStateField *field)
1802 VirtIODevice *vdev = pv;
1803 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1804 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1806 if (!k->load_extra_state) {
1809 return k->load_extra_state(qbus->parent, f);
1813 static int put_extra_state(QEMUFile *f, void *pv, size_t size,
1814 VMStateField *field, QJSON *vmdesc)
1816 VirtIODevice *vdev = pv;
1817 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1818 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1820 k->save_extra_state(qbus->parent, f);
1824 static const VMStateInfo vmstate_info_extra_state = {
1825 .name = "virtqueue_extra_state",
1826 .get = get_extra_state,
1827 .put = put_extra_state,
1830 static const VMStateDescription vmstate_virtio_extra_state = {
1831 .name = "virtio/extra_state",
1833 .minimum_version_id = 1,
1834 .needed = &virtio_extra_state_needed,
1835 .fields = (VMStateField[]) {
1837 .name = "extra_state",
1839 .field_exists = NULL,
1841 .info = &vmstate_info_extra_state,
1842 .flags = VMS_SINGLE,
1845 VMSTATE_END_OF_LIST()
1849 static const VMStateDescription vmstate_virtio_device_endian = {
1850 .name = "virtio/device_endian",
1852 .minimum_version_id = 1,
1853 .needed = &virtio_device_endian_needed,
1854 .fields = (VMStateField[]) {
1855 VMSTATE_UINT8(device_endian, VirtIODevice),
1856 VMSTATE_END_OF_LIST()
1860 static const VMStateDescription vmstate_virtio_64bit_features = {
1861 .name = "virtio/64bit_features",
1863 .minimum_version_id = 1,
1864 .needed = &virtio_64bit_features_needed,
1865 .fields = (VMStateField[]) {
1866 VMSTATE_UINT64(guest_features, VirtIODevice),
1867 VMSTATE_END_OF_LIST()
1871 static const VMStateDescription vmstate_virtio_broken = {
1872 .name = "virtio/broken",
1874 .minimum_version_id = 1,
1875 .needed = &virtio_broken_needed,
1876 .fields = (VMStateField[]) {
1877 VMSTATE_BOOL(broken, VirtIODevice),
1878 VMSTATE_END_OF_LIST()
1882 static const VMStateDescription vmstate_virtio = {
1885 .minimum_version_id = 1,
1886 .minimum_version_id_old = 1,
1887 .fields = (VMStateField[]) {
1888 VMSTATE_END_OF_LIST()
1890 .subsections = (const VMStateDescription*[]) {
1891 &vmstate_virtio_device_endian,
1892 &vmstate_virtio_64bit_features,
1893 &vmstate_virtio_virtqueues,
1894 &vmstate_virtio_ringsize,
1895 &vmstate_virtio_broken,
1896 &vmstate_virtio_extra_state,
1901 void virtio_save(VirtIODevice *vdev, QEMUFile *f)
1903 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1904 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1905 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1906 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
1909 if (k->save_config) {
1910 k->save_config(qbus->parent, f);
1913 qemu_put_8s(f, &vdev->status);
1914 qemu_put_8s(f, &vdev->isr);
1915 qemu_put_be16s(f, &vdev->queue_sel);
1916 qemu_put_be32s(f, &guest_features_lo);
1917 qemu_put_be32(f, vdev->config_len);
1918 qemu_put_buffer(f, vdev->config, vdev->config_len);
1920 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1921 if (vdev->vq[i].vring.num == 0)
1925 qemu_put_be32(f, i);
1927 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1928 if (vdev->vq[i].vring.num == 0)
1931 qemu_put_be32(f, vdev->vq[i].vring.num);
1932 if (k->has_variable_vring_alignment) {
1933 qemu_put_be32(f, vdev->vq[i].vring.align);
1936 * Save desc now, the rest of the ring addresses are saved in
1937 * subsections for VIRTIO-1 devices.
1939 qemu_put_be64(f, vdev->vq[i].vring.desc);
1940 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
1941 if (k->save_queue) {
1942 k->save_queue(qbus->parent, i, f);
1946 if (vdc->save != NULL) {
1951 vmstate_save_state(f, vdc->vmsd, vdev, NULL);
1955 vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
1958 /* A wrapper for use as a VMState .put function */
1959 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
1960 VMStateField *field, QJSON *vmdesc)
1962 virtio_save(VIRTIO_DEVICE(opaque), f);
1967 /* A wrapper for use as a VMState .get function */
1968 static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
1969 VMStateField *field)
1971 VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
1972 DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
1974 return virtio_load(vdev, f, dc->vmsd->version_id);
1977 const VMStateInfo virtio_vmstate_info = {
1979 .get = virtio_device_get,
1980 .put = virtio_device_put,
1983 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
1985 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1986 bool bad = (val & ~(vdev->host_features)) != 0;
1988 val &= vdev->host_features;
1989 if (k->set_features) {
1990 k->set_features(vdev, val);
1992 vdev->guest_features = val;
1993 return bad ? -1 : 0;
1996 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
1999 * The driver must not attempt to set features after feature negotiation
2002 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
2005 return virtio_set_features_nocheck(vdev, val);
2008 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
2014 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2015 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2016 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2019 * We poison the endianness to ensure it does not get used before
2020 * subsections have been loaded.
2022 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
2024 if (k->load_config) {
2025 ret = k->load_config(qbus->parent, f);
2030 qemu_get_8s(f, &vdev->status);
2031 qemu_get_8s(f, &vdev->isr);
2032 qemu_get_be16s(f, &vdev->queue_sel);
2033 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
2036 qemu_get_be32s(f, &features);
2039 * Temporarily set guest_features low bits - needed by
2040 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
2041 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
2043 * Note: devices should always test host features in future - don't create
2044 * new dependencies like this.
2046 vdev->guest_features = features;
2048 config_len = qemu_get_be32(f);
2051 * There are cases where the incoming config can be bigger or smaller
2052 * than what we have; so load what we have space for, and skip
2053 * any excess that's in the stream.
2055 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
2057 while (config_len > vdev->config_len) {
2062 num = qemu_get_be32(f);
2064 if (num > VIRTIO_QUEUE_MAX) {
2065 error_report("Invalid number of virtqueues: 0x%x", num);
2069 for (i = 0; i < num; i++) {
2070 vdev->vq[i].vring.num = qemu_get_be32(f);
2071 if (k->has_variable_vring_alignment) {
2072 vdev->vq[i].vring.align = qemu_get_be32(f);
2074 vdev->vq[i].vring.desc = qemu_get_be64(f);
2075 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
2076 vdev->vq[i].signalled_used_valid = false;
2077 vdev->vq[i].notification = true;
2079 if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
2080 error_report("VQ %d address 0x0 "
2081 "inconsistent with Host index 0x%x",
2082 i, vdev->vq[i].last_avail_idx);
2085 if (k->load_queue) {
2086 ret = k->load_queue(qbus->parent, i, f);
2092 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
2094 if (vdc->load != NULL) {
2095 ret = vdc->load(vdev, f, version_id);
2102 ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
2109 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
2114 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
2115 vdev->device_endian = virtio_default_endian();
2118 if (virtio_64bit_features_needed(vdev)) {
2120 * Subsection load filled vdev->guest_features. Run them
2121 * through virtio_set_features to sanity-check them against
2124 uint64_t features64 = vdev->guest_features;
2125 if (virtio_set_features_nocheck(vdev, features64) < 0) {
2126 error_report("Features 0x%" PRIx64 " unsupported. "
2127 "Allowed features: 0x%" PRIx64,
2128 features64, vdev->host_features);
2132 if (virtio_set_features_nocheck(vdev, features) < 0) {
2133 error_report("Features 0x%x unsupported. "
2134 "Allowed features: 0x%" PRIx64,
2135 features, vdev->host_features);
2141 for (i = 0; i < num; i++) {
2142 if (vdev->vq[i].vring.desc) {
2146 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
2147 * only the region cache needs to be set up. Legacy devices need
2148 * to calculate used and avail ring addresses based on the desc
2151 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2152 virtio_init_region_cache(vdev, i);
2154 virtio_queue_update_rings(vdev, i);
2157 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
2158 /* Check it isn't doing strange things with descriptor numbers. */
2159 if (nheads > vdev->vq[i].vring.num) {
2160 error_report("VQ %d size 0x%x Guest index 0x%x "
2161 "inconsistent with Host index 0x%x: delta 0x%x",
2162 i, vdev->vq[i].vring.num,
2163 vring_avail_idx(&vdev->vq[i]),
2164 vdev->vq[i].last_avail_idx, nheads);
2167 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
2168 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
2171 * Some devices migrate VirtQueueElements that have been popped
2172 * from the avail ring but not yet returned to the used ring.
2173 * Since max ring size < UINT16_MAX it's safe to use modulo
2174 * UINT16_MAX + 1 subtraction.
2176 vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
2177 vdev->vq[i].used_idx);
2178 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
2179 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
2181 i, vdev->vq[i].vring.num,
2182 vdev->vq[i].last_avail_idx,
2183 vdev->vq[i].used_idx);
2193 void virtio_cleanup(VirtIODevice *vdev)
2195 qemu_del_vm_change_state_handler(vdev->vmstate);
2198 static void virtio_vmstate_change(void *opaque, int running, RunState state)
2200 VirtIODevice *vdev = opaque;
2201 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2202 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2203 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
2204 vdev->vm_running = running;
2207 virtio_set_status(vdev, vdev->status);
2210 if (k->vmstate_change) {
2211 k->vmstate_change(qbus->parent, backend_run);
2215 virtio_set_status(vdev, vdev->status);
2219 void virtio_instance_init_common(Object *proxy_obj, void *data,
2220 size_t vdev_size, const char *vdev_name)
2222 DeviceState *vdev = data;
2224 object_initialize(vdev, vdev_size, vdev_name);
2225 object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL);
2226 object_unref(OBJECT(vdev));
2227 qdev_alias_all_properties(vdev, proxy_obj);
2230 void virtio_init(VirtIODevice *vdev, const char *name,
2231 uint16_t device_id, size_t config_size)
2233 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2234 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2236 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
2239 vdev->vector_queues =
2240 g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
2243 vdev->device_id = device_id;
2245 atomic_set(&vdev->isr, 0);
2246 vdev->queue_sel = 0;
2247 vdev->config_vector = VIRTIO_NO_VECTOR;
2248 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
2249 vdev->vm_running = runstate_is_running();
2250 vdev->broken = false;
2251 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2252 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
2253 vdev->vq[i].vdev = vdev;
2254 vdev->vq[i].queue_index = i;
2258 vdev->config_len = config_size;
2259 if (vdev->config_len) {
2260 vdev->config = g_malloc0(config_size);
2262 vdev->config = NULL;
2264 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
2266 vdev->device_endian = virtio_default_endian();
2267 vdev->use_guest_notifier_mask = true;
2270 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
2272 return vdev->vq[n].vring.desc;
2275 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
2277 return vdev->vq[n].vring.avail;
2280 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
2282 return vdev->vq[n].vring.used;
2285 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
2287 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
2290 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
2292 return offsetof(VRingAvail, ring) +
2293 sizeof(uint16_t) * vdev->vq[n].vring.num;
2296 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
2298 return offsetof(VRingUsed, ring) +
2299 sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
2302 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
2304 return vdev->vq[n].last_avail_idx;
2307 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
2309 vdev->vq[n].last_avail_idx = idx;
2310 vdev->vq[n].shadow_avail_idx = idx;
2313 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
2316 if (vdev->vq[n].vring.desc) {
2317 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
2322 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
2324 vdev->vq[n].signalled_used_valid = false;
2327 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
2329 return vdev->vq + n;
2332 uint16_t virtio_get_queue_index(VirtQueue *vq)
2334 return vq->queue_index;
2337 static void virtio_queue_guest_notifier_read(EventNotifier *n)
2339 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
2340 if (event_notifier_test_and_clear(n)) {
2345 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
2348 if (assign && !with_irqfd) {
2349 event_notifier_set_handler(&vq->guest_notifier,
2350 virtio_queue_guest_notifier_read);
2352 event_notifier_set_handler(&vq->guest_notifier, NULL);
2355 /* Test and clear notifier before closing it,
2356 * in case poll callback didn't have time to run. */
2357 virtio_queue_guest_notifier_read(&vq->guest_notifier);
2361 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
2363 return &vq->guest_notifier;
2366 static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
2368 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2369 if (event_notifier_test_and_clear(n)) {
2370 virtio_queue_notify_aio_vq(vq);
2374 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
2376 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2378 virtio_queue_set_notification(vq, 0);
2381 static bool virtio_queue_host_notifier_aio_poll(void *opaque)
2383 EventNotifier *n = opaque;
2384 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2387 if (!vq->vring.desc || virtio_queue_empty(vq)) {
2391 progress = virtio_queue_notify_aio_vq(vq);
2393 /* In case the handler function re-enabled notifications */
2394 virtio_queue_set_notification(vq, 0);
2398 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
2400 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2402 /* Caller polls once more after this to catch requests that race with us */
2403 virtio_queue_set_notification(vq, 1);
2406 void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
2407 VirtIOHandleAIOOutput handle_output)
2409 if (handle_output) {
2410 vq->handle_aio_output = handle_output;
2411 aio_set_event_notifier(ctx, &vq->host_notifier, true,
2412 virtio_queue_host_notifier_aio_read,
2413 virtio_queue_host_notifier_aio_poll);
2414 aio_set_event_notifier_poll(ctx, &vq->host_notifier,
2415 virtio_queue_host_notifier_aio_poll_begin,
2416 virtio_queue_host_notifier_aio_poll_end);
2418 aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
2419 /* Test and clear notifier before after disabling event,
2420 * in case poll callback didn't have time to run. */
2421 virtio_queue_host_notifier_aio_read(&vq->host_notifier);
2422 vq->handle_aio_output = NULL;
2426 void virtio_queue_host_notifier_read(EventNotifier *n)
2428 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2429 if (event_notifier_test_and_clear(n)) {
2430 virtio_queue_notify_vq(vq);
2434 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
2436 return &vq->host_notifier;
2439 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
2441 g_free(vdev->bus_name);
2442 vdev->bus_name = g_strdup(bus_name);
2445 void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
2450 error_vreport(fmt, ap);
2453 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2454 virtio_set_status(vdev, vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET);
2455 virtio_notify_config(vdev);
2458 vdev->broken = true;
2461 static void virtio_memory_listener_commit(MemoryListener *listener)
2463 VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
2466 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2467 if (vdev->vq[i].vring.num == 0) {
2470 virtio_init_region_cache(vdev, i);
2474 static void virtio_device_realize(DeviceState *dev, Error **errp)
2476 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2477 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2480 /* Devices should either use vmsd or the load/save methods */
2481 assert(!vdc->vmsd || !vdc->load);
2483 if (vdc->realize != NULL) {
2484 vdc->realize(dev, &err);
2486 error_propagate(errp, err);
2491 virtio_bus_device_plugged(vdev, &err);
2493 error_propagate(errp, err);
2497 vdev->listener.commit = virtio_memory_listener_commit;
2498 memory_listener_register(&vdev->listener, vdev->dma_as);
2501 static void virtio_device_unrealize(DeviceState *dev, Error **errp)
2503 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2504 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2507 virtio_bus_device_unplugged(vdev);
2509 if (vdc->unrealize != NULL) {
2510 vdc->unrealize(dev, &err);
2512 error_propagate(errp, err);
2517 g_free(vdev->bus_name);
2518 vdev->bus_name = NULL;
2521 static void virtio_device_free_virtqueues(VirtIODevice *vdev)
2528 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2529 if (vdev->vq[i].vring.num == 0) {
2532 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2537 static void virtio_device_instance_finalize(Object *obj)
2539 VirtIODevice *vdev = VIRTIO_DEVICE(obj);
2541 memory_listener_unregister(&vdev->listener);
2542 virtio_device_free_virtqueues(vdev);
2544 g_free(vdev->config);
2545 g_free(vdev->vector_queues);
2548 static Property virtio_properties[] = {
2549 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
2550 DEFINE_PROP_END_OF_LIST(),
2553 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
2555 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
2558 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2559 VirtQueue *vq = &vdev->vq[n];
2560 if (!virtio_queue_get_num(vdev, n)) {
2563 r = virtio_bus_set_host_notifier(qbus, n, true);
2568 event_notifier_set_handler(&vq->host_notifier,
2569 virtio_queue_host_notifier_read);
2572 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2573 /* Kick right away to begin processing requests already in vring */
2574 VirtQueue *vq = &vdev->vq[n];
2575 if (!vq->vring.num) {
2578 event_notifier_set(&vq->host_notifier);
2584 VirtQueue *vq = &vdev->vq[n];
2585 if (!virtio_queue_get_num(vdev, n)) {
2589 event_notifier_set_handler(&vq->host_notifier, NULL);
2590 r = virtio_bus_set_host_notifier(qbus, n, false);
2596 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
2598 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2599 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2601 return virtio_bus_start_ioeventfd(vbus);
2604 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
2606 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
2609 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2610 VirtQueue *vq = &vdev->vq[n];
2612 if (!virtio_queue_get_num(vdev, n)) {
2615 event_notifier_set_handler(&vq->host_notifier, NULL);
2616 r = virtio_bus_set_host_notifier(qbus, n, false);
2621 void virtio_device_stop_ioeventfd(VirtIODevice *vdev)
2623 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2624 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2626 virtio_bus_stop_ioeventfd(vbus);
2629 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
2631 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2632 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2634 return virtio_bus_grab_ioeventfd(vbus);
2637 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
2639 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2640 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2642 virtio_bus_release_ioeventfd(vbus);
2645 static void virtio_device_class_init(ObjectClass *klass, void *data)
2647 /* Set the default value here. */
2648 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2649 DeviceClass *dc = DEVICE_CLASS(klass);
2651 dc->realize = virtio_device_realize;
2652 dc->unrealize = virtio_device_unrealize;
2653 dc->bus_type = TYPE_VIRTIO_BUS;
2654 dc->props = virtio_properties;
2655 vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
2656 vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
2658 vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
2661 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
2663 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2664 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2666 return virtio_bus_ioeventfd_enabled(vbus);
2669 static const TypeInfo virtio_device_info = {
2670 .name = TYPE_VIRTIO_DEVICE,
2671 .parent = TYPE_DEVICE,
2672 .instance_size = sizeof(VirtIODevice),
2673 .class_init = virtio_device_class_init,
2674 .instance_finalize = virtio_device_instance_finalize,
2676 .class_size = sizeof(VirtioDeviceClass),
2679 static void virtio_register_types(void)
2681 type_register_static(&virtio_device_info);
2684 type_init(virtio_register_types)