4 * Copyright Red Hat, Inc. 2010
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
19 #include "qemu/atomic.h"
20 #include "qemu/range.h"
21 #include <linux/vhost.h>
22 #include "exec/address-spaces.h"
23 #include "hw/virtio/virtio-bus.h"
25 static void vhost_dev_sync_region(struct vhost_dev *dev,
26 MemoryRegionSection *section,
27 uint64_t mfirst, uint64_t mlast,
28 uint64_t rfirst, uint64_t rlast)
30 uint64_t start = MAX(mfirst, rfirst);
31 uint64_t end = MIN(mlast, rlast);
32 vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK;
33 vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
34 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
39 assert(end / VHOST_LOG_CHUNK < dev->log_size);
40 assert(start / VHOST_LOG_CHUNK < dev->log_size);
42 for (;from < to; ++from) {
43 vhost_log_chunk_t log;
45 /* We first check with non-atomic: much cheaper,
46 * and we expect non-dirty to be the common case. */
48 addr += VHOST_LOG_CHUNK;
51 /* Data must be read atomically. We don't really need barrier semantics
52 * but it's easier to use atomic_* than roll our own. */
53 log = atomic_xchg(from, 0);
54 while ((bit = sizeof(log) > sizeof(int) ?
55 ffsll(log) : ffs(log))) {
57 hwaddr section_offset;
60 page_addr = addr + bit * VHOST_LOG_PAGE;
61 section_offset = page_addr - section->offset_within_address_space;
62 mr_offset = section_offset + section->offset_within_region;
63 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
64 log &= ~(0x1ull << bit);
66 addr += VHOST_LOG_CHUNK;
70 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
71 MemoryRegionSection *section,
79 if (!dev->log_enabled || !dev->started) {
82 start_addr = section->offset_within_address_space;
83 end_addr = range_get_last(start_addr, int128_get64(section->size));
84 start_addr = MAX(first, start_addr);
85 end_addr = MIN(last, end_addr);
87 for (i = 0; i < dev->mem->nregions; ++i) {
88 struct vhost_memory_region *reg = dev->mem->regions + i;
89 vhost_dev_sync_region(dev, section, start_addr, end_addr,
91 range_get_last(reg->guest_phys_addr,
94 for (i = 0; i < dev->nvqs; ++i) {
95 struct vhost_virtqueue *vq = dev->vqs + i;
96 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
97 range_get_last(vq->used_phys, vq->used_size));
102 static void vhost_log_sync(MemoryListener *listener,
103 MemoryRegionSection *section)
105 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
107 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
110 static void vhost_log_sync_range(struct vhost_dev *dev,
111 hwaddr first, hwaddr last)
114 /* FIXME: this is N^2 in number of sections */
115 for (i = 0; i < dev->n_mem_sections; ++i) {
116 MemoryRegionSection *section = &dev->mem_sections[i];
117 vhost_sync_dirty_bitmap(dev, section, first, last);
121 /* Assign/unassign. Keep an unsorted array of non-overlapping
122 * memory regions in dev->mem. */
123 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
127 int from, to, n = dev->mem->nregions;
128 /* Track overlapping/split regions for sanity checking. */
129 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
131 for (from = 0, to = 0; from < n; ++from, ++to) {
132 struct vhost_memory_region *reg = dev->mem->regions + to;
137 /* clone old region */
139 memcpy(reg, dev->mem->regions + from, sizeof *reg);
142 /* No overlap is simple */
143 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
148 /* Split only happens if supplied region
149 * is in the middle of an existing one. Thus it can not
150 * overlap with any other existing region. */
153 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
154 memlast = range_get_last(start_addr, size);
156 /* Remove whole region */
157 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
158 --dev->mem->nregions;
165 if (memlast >= reglast) {
166 reg->memory_size = start_addr - reg->guest_phys_addr;
167 assert(reg->memory_size);
168 assert(!overlap_end);
174 if (start_addr <= reg->guest_phys_addr) {
175 change = memlast + 1 - reg->guest_phys_addr;
176 reg->memory_size -= change;
177 reg->guest_phys_addr += change;
178 reg->userspace_addr += change;
179 assert(reg->memory_size);
180 assert(!overlap_start);
185 /* This only happens if supplied region
186 * is in the middle of an existing one. Thus it can not
187 * overlap with any other existing region. */
188 assert(!overlap_start);
189 assert(!overlap_end);
190 assert(!overlap_middle);
191 /* Split region: shrink first part, shift second part. */
192 memcpy(dev->mem->regions + n, reg, sizeof *reg);
193 reg->memory_size = start_addr - reg->guest_phys_addr;
194 assert(reg->memory_size);
195 change = memlast + 1 - reg->guest_phys_addr;
196 reg = dev->mem->regions + n;
197 reg->memory_size -= change;
198 assert(reg->memory_size);
199 reg->guest_phys_addr += change;
200 reg->userspace_addr += change;
201 /* Never add more than 1 region */
202 assert(dev->mem->nregions == n);
203 ++dev->mem->nregions;
208 /* Called after unassign, so no regions overlap the given range. */
209 static void vhost_dev_assign_memory(struct vhost_dev *dev,
215 struct vhost_memory_region *merged = NULL;
216 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
217 struct vhost_memory_region *reg = dev->mem->regions + to;
218 uint64_t prlast, urlast;
219 uint64_t pmlast, umlast;
222 /* clone old region */
224 memcpy(reg, dev->mem->regions + from, sizeof *reg);
226 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
227 pmlast = range_get_last(start_addr, size);
228 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
229 umlast = range_get_last(uaddr, size);
231 /* check for overlapping regions: should never happen. */
232 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
233 /* Not an adjacent or overlapping region - do not merge. */
234 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
235 (pmlast + 1 != reg->guest_phys_addr ||
236 umlast + 1 != reg->userspace_addr)) {
246 u = MIN(uaddr, reg->userspace_addr);
247 s = MIN(start_addr, reg->guest_phys_addr);
248 e = MAX(pmlast, prlast);
249 uaddr = merged->userspace_addr = u;
250 start_addr = merged->guest_phys_addr = s;
251 size = merged->memory_size = e - s + 1;
252 assert(merged->memory_size);
256 struct vhost_memory_region *reg = dev->mem->regions + to;
257 memset(reg, 0, sizeof *reg);
258 reg->memory_size = size;
259 assert(reg->memory_size);
260 reg->guest_phys_addr = start_addr;
261 reg->userspace_addr = uaddr;
264 assert(to <= dev->mem->nregions + 1);
265 dev->mem->nregions = to;
268 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
270 uint64_t log_size = 0;
272 for (i = 0; i < dev->mem->nregions; ++i) {
273 struct vhost_memory_region *reg = dev->mem->regions + i;
274 uint64_t last = range_get_last(reg->guest_phys_addr,
276 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
278 for (i = 0; i < dev->nvqs; ++i) {
279 struct vhost_virtqueue *vq = dev->vqs + i;
280 uint64_t last = vq->used_phys + vq->used_size - 1;
281 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
286 static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
288 vhost_log_chunk_t *log;
292 log = g_malloc0(size * sizeof *log);
293 log_base = (uint64_t)(unsigned long)log;
294 r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base);
296 /* Sync only the range covered by the old log */
298 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
304 dev->log_size = size;
307 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
312 for (i = 0; i < dev->nvqs; ++i) {
313 struct vhost_virtqueue *vq = dev->vqs + i;
317 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
321 p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
322 if (!p || l != vq->ring_size) {
323 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
327 fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
330 cpu_physical_memory_unmap(p, l, 0, 0);
335 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
339 int i, n = dev->mem->nregions;
340 for (i = 0; i < n; ++i) {
341 struct vhost_memory_region *reg = dev->mem->regions + i;
342 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
350 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
355 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
363 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
364 memlast = range_get_last(start_addr, size);
366 /* Need to extend region? */
367 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
370 /* userspace_addr changed? */
371 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
374 static void vhost_set_memory(MemoryListener *listener,
375 MemoryRegionSection *section,
378 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
380 hwaddr start_addr = section->offset_within_address_space;
381 ram_addr_t size = int128_get64(section->size);
382 bool log_dirty = memory_region_is_logging(section->mr);
383 int s = offsetof(struct vhost_memory, regions) +
384 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
387 dev->mem = g_realloc(dev->mem, s);
395 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
396 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
398 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
399 /* Region exists with same address. Nothing to do. */
403 if (!vhost_dev_find_reg(dev, start_addr, size)) {
404 /* Removing region that we don't access. Nothing to do. */
409 vhost_dev_unassign_memory(dev, start_addr, size);
411 /* Add given mapping, merging adjacent regions if any */
412 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
414 /* Remove old mapping for this memory, if any. */
415 vhost_dev_unassign_memory(dev, start_addr, size);
417 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
418 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
419 dev->memory_changed = true;
422 static bool vhost_section(MemoryRegionSection *section)
424 return memory_region_is_ram(section->mr);
427 static void vhost_begin(MemoryListener *listener)
429 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
431 dev->mem_changed_end_addr = 0;
432 dev->mem_changed_start_addr = -1;
435 static void vhost_commit(MemoryListener *listener)
437 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
439 hwaddr start_addr = 0;
444 if (!dev->memory_changed) {
450 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
455 start_addr = dev->mem_changed_start_addr;
456 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
458 r = vhost_verify_ring_mappings(dev, start_addr, size);
462 if (!dev->log_enabled) {
463 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
465 dev->memory_changed = false;
468 log_size = vhost_get_log_size(dev);
469 /* We allocate an extra 4K bytes to log,
470 * to reduce the * number of reallocations. */
471 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
472 /* To log more, must increase log size before table update. */
473 if (dev->log_size < log_size) {
474 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
476 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
478 /* To log less, can only decrease log size after table update. */
479 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
480 vhost_dev_log_resize(dev, log_size);
482 dev->memory_changed = false;
485 static void vhost_region_add(MemoryListener *listener,
486 MemoryRegionSection *section)
488 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
491 if (!vhost_section(section)) {
495 ++dev->n_mem_sections;
496 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
497 dev->n_mem_sections);
498 dev->mem_sections[dev->n_mem_sections - 1] = *section;
499 memory_region_ref(section->mr);
500 vhost_set_memory(listener, section, true);
503 static void vhost_region_del(MemoryListener *listener,
504 MemoryRegionSection *section)
506 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
510 if (!vhost_section(section)) {
514 vhost_set_memory(listener, section, false);
515 memory_region_unref(section->mr);
516 for (i = 0; i < dev->n_mem_sections; ++i) {
517 if (dev->mem_sections[i].offset_within_address_space
518 == section->offset_within_address_space) {
519 --dev->n_mem_sections;
520 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
521 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
527 static void vhost_region_nop(MemoryListener *listener,
528 MemoryRegionSection *section)
532 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
533 struct vhost_virtqueue *vq,
534 unsigned idx, bool enable_log)
536 struct vhost_vring_addr addr = {
538 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
539 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
540 .used_user_addr = (uint64_t)(unsigned long)vq->used,
541 .log_guest_addr = vq->used_phys,
542 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
544 int r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
551 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
553 uint64_t features = dev->acked_features;
556 features |= 0x1 << VHOST_F_LOG_ALL;
558 r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
559 return r < 0 ? -errno : 0;
562 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
565 r = vhost_dev_set_features(dev, enable_log);
569 for (i = 0; i < dev->nvqs; ++i) {
570 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
578 for (; i >= 0; --i) {
579 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
583 t = vhost_dev_set_features(dev, dev->log_enabled);
589 static int vhost_migration_log(MemoryListener *listener, int enable)
591 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
594 if (!!enable == dev->log_enabled) {
598 dev->log_enabled = enable;
602 r = vhost_dev_set_log(dev, false);
612 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
613 r = vhost_dev_set_log(dev, true);
618 dev->log_enabled = enable;
622 static void vhost_log_global_start(MemoryListener *listener)
626 r = vhost_migration_log(listener, true);
632 static void vhost_log_global_stop(MemoryListener *listener)
636 r = vhost_migration_log(listener, false);
642 static void vhost_log_start(MemoryListener *listener,
643 MemoryRegionSection *section)
645 /* FIXME: implement */
648 static void vhost_log_stop(MemoryListener *listener,
649 MemoryRegionSection *section)
651 /* FIXME: implement */
654 static int vhost_virtqueue_start(struct vhost_dev *dev,
655 struct VirtIODevice *vdev,
656 struct vhost_virtqueue *vq,
661 int vhost_vq_index = idx - dev->vq_index;
662 struct vhost_vring_file file = {
663 .index = vhost_vq_index
665 struct vhost_vring_state state = {
666 .index = vhost_vq_index
668 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
670 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
672 vq->num = state.num = virtio_queue_get_num(vdev, idx);
673 r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
678 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
679 r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
684 s = l = virtio_queue_get_desc_size(vdev, idx);
685 a = virtio_queue_get_desc_addr(vdev, idx);
686 vq->desc = cpu_physical_memory_map(a, &l, 0);
687 if (!vq->desc || l != s) {
689 goto fail_alloc_desc;
691 s = l = virtio_queue_get_avail_size(vdev, idx);
692 a = virtio_queue_get_avail_addr(vdev, idx);
693 vq->avail = cpu_physical_memory_map(a, &l, 0);
694 if (!vq->avail || l != s) {
696 goto fail_alloc_avail;
698 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
699 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
700 vq->used = cpu_physical_memory_map(a, &l, 1);
701 if (!vq->used || l != s) {
703 goto fail_alloc_used;
706 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
707 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
708 vq->ring = cpu_physical_memory_map(a, &l, 1);
709 if (!vq->ring || l != s) {
711 goto fail_alloc_ring;
714 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
720 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
721 r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
727 /* Clear and discard previous events if any. */
728 event_notifier_test_and_clear(&vq->masked_notifier);
734 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
737 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
740 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
743 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
749 static void vhost_virtqueue_stop(struct vhost_dev *dev,
750 struct VirtIODevice *vdev,
751 struct vhost_virtqueue *vq,
754 struct vhost_vring_state state = {
755 .index = idx - dev->vq_index
758 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
759 r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
761 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
764 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
765 virtio_queue_invalidate_signalled_used(vdev, idx);
767 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
768 0, virtio_queue_get_ring_size(vdev, idx));
769 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
770 1, virtio_queue_get_used_size(vdev, idx));
771 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
772 0, virtio_queue_get_avail_size(vdev, idx));
773 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
774 0, virtio_queue_get_desc_size(vdev, idx));
777 static void vhost_eventfd_add(MemoryListener *listener,
778 MemoryRegionSection *section,
779 bool match_data, uint64_t data, EventNotifier *e)
783 static void vhost_eventfd_del(MemoryListener *listener,
784 MemoryRegionSection *section,
785 bool match_data, uint64_t data, EventNotifier *e)
789 static int vhost_virtqueue_init(struct vhost_dev *dev,
790 struct vhost_virtqueue *vq, int n)
792 struct vhost_vring_file file = {
795 int r = event_notifier_init(&vq->masked_notifier, 0);
800 file.fd = event_notifier_get_fd(&vq->masked_notifier);
801 r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
808 event_notifier_cleanup(&vq->masked_notifier);
812 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
814 event_notifier_cleanup(&vq->masked_notifier);
817 int vhost_dev_init(struct vhost_dev *hdev, int devfd, const char *devpath,
823 hdev->control = devfd;
825 hdev->control = open(devpath, O_RDWR);
826 if (hdev->control < 0) {
830 r = ioctl(hdev->control, VHOST_SET_OWNER, NULL);
835 r = ioctl(hdev->control, VHOST_GET_FEATURES, &features);
840 for (i = 0; i < hdev->nvqs; ++i) {
841 r = vhost_virtqueue_init(hdev, hdev->vqs + i, i);
846 hdev->features = features;
848 hdev->memory_listener = (MemoryListener) {
849 .begin = vhost_begin,
850 .commit = vhost_commit,
851 .region_add = vhost_region_add,
852 .region_del = vhost_region_del,
853 .region_nop = vhost_region_nop,
854 .log_start = vhost_log_start,
855 .log_stop = vhost_log_stop,
856 .log_sync = vhost_log_sync,
857 .log_global_start = vhost_log_global_start,
858 .log_global_stop = vhost_log_global_stop,
859 .eventfd_add = vhost_eventfd_add,
860 .eventfd_del = vhost_eventfd_del,
863 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
864 hdev->n_mem_sections = 0;
865 hdev->mem_sections = NULL;
868 hdev->log_enabled = false;
869 hdev->started = false;
870 hdev->memory_changed = false;
871 memory_listener_register(&hdev->memory_listener, &address_space_memory);
876 vhost_virtqueue_cleanup(hdev->vqs + i);
880 close(hdev->control);
884 void vhost_dev_cleanup(struct vhost_dev *hdev)
887 for (i = 0; i < hdev->nvqs; ++i) {
888 vhost_virtqueue_cleanup(hdev->vqs + i);
890 memory_listener_unregister(&hdev->memory_listener);
892 g_free(hdev->mem_sections);
893 close(hdev->control);
896 bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev)
898 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
899 VirtioBusState *vbus = VIRTIO_BUS(qbus);
900 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
902 return !k->query_guest_notifiers ||
903 k->query_guest_notifiers(qbus->parent) ||
907 /* Stop processing guest IO notifications in qemu.
908 * Start processing them in vhost in kernel.
910 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
912 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
913 VirtioBusState *vbus = VIRTIO_BUS(qbus);
914 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
916 if (!k->set_host_notifier) {
917 fprintf(stderr, "binding does not support host notifiers\n");
922 for (i = 0; i < hdev->nvqs; ++i) {
923 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
925 fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
933 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
935 fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
944 /* Stop processing guest IO notifications in vhost.
945 * Start processing them in qemu.
946 * This might actually run the qemu handlers right away,
947 * so virtio in qemu must be completely setup when this is called.
949 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
951 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
952 VirtioBusState *vbus = VIRTIO_BUS(qbus);
953 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
956 for (i = 0; i < hdev->nvqs; ++i) {
957 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
959 fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
966 /* Test and clear event pending status.
967 * Should be called after unmask to avoid losing events.
969 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
971 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
972 assert(hdev->started);
973 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
974 return event_notifier_test_and_clear(&vq->masked_notifier);
977 /* Mask/unmask events from this vq. */
978 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
981 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
982 int r, index = n - hdev->vq_index;
984 assert(hdev->started);
985 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
987 struct vhost_vring_file file = {
991 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
993 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
995 r = ioctl(hdev->control, VHOST_SET_VRING_CALL, &file);
999 /* Host notifiers must be enabled at this point. */
1000 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1004 hdev->started = true;
1006 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1010 r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem);
1015 for (i = 0; i < hdev->nvqs; ++i) {
1016 r = vhost_virtqueue_start(hdev,
1019 hdev->vq_index + i);
1025 if (hdev->log_enabled) {
1026 hdev->log_size = vhost_get_log_size(hdev);
1027 hdev->log = hdev->log_size ?
1028 g_malloc0(hdev->log_size * sizeof *hdev->log) : NULL;
1029 r = ioctl(hdev->control, VHOST_SET_LOG_BASE,
1030 (uint64_t)(unsigned long)hdev->log);
1041 vhost_virtqueue_stop(hdev,
1044 hdev->vq_index + i);
1050 hdev->started = false;
1054 /* Host notifiers must be enabled at this point. */
1055 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1059 for (i = 0; i < hdev->nvqs; ++i) {
1060 vhost_virtqueue_stop(hdev,
1063 hdev->vq_index + i);
1065 vhost_log_sync_range(hdev, 0, ~0x0ull);
1067 hdev->started = false;