4 * Copyright Red Hat, Inc. 2010
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <sys/ioctl.h>
17 #include <linux/vhost.h>
19 static void vhost_dev_sync_region(struct vhost_dev *dev,
20 uint64_t mfirst, uint64_t mlast,
21 uint64_t rfirst, uint64_t rlast)
23 uint64_t start = MAX(mfirst, rfirst);
24 uint64_t end = MIN(mlast, rlast);
25 vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK;
26 vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
27 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
29 assert(end / VHOST_LOG_CHUNK < dev->log_size);
30 assert(start / VHOST_LOG_CHUNK < dev->log_size);
34 for (;from < to; ++from) {
35 vhost_log_chunk_t log;
37 /* We first check with non-atomic: much cheaper,
38 * and we expect non-dirty to be the common case. */
42 /* Data must be read atomically. We don't really
43 * need the barrier semantics of __sync
44 * builtins, but it's easier to use them than
46 log = __sync_fetch_and_and(from, 0);
47 while ((bit = sizeof(log) > sizeof(int) ?
48 ffsll(log) : ffs(log))) {
50 cpu_physical_memory_set_dirty(addr + bit * VHOST_LOG_PAGE);
51 log &= ~(0x1ull << bit);
53 addr += VHOST_LOG_CHUNK;
57 static int vhost_client_sync_dirty_bitmap(CPUPhysMemoryClient *client,
58 target_phys_addr_t start_addr,
59 target_phys_addr_t end_addr)
61 struct vhost_dev *dev = container_of(client, struct vhost_dev, client);
63 if (!dev->log_enabled || !dev->started) {
66 for (i = 0; i < dev->mem->nregions; ++i) {
67 struct vhost_memory_region *reg = dev->mem->regions + i;
68 vhost_dev_sync_region(dev, start_addr, end_addr,
70 range_get_last(reg->guest_phys_addr,
73 for (i = 0; i < dev->nvqs; ++i) {
74 struct vhost_virtqueue *vq = dev->vqs + i;
75 vhost_dev_sync_region(dev, start_addr, end_addr, vq->used_phys,
76 range_get_last(vq->used_phys, vq->used_size));
81 /* Assign/unassign. Keep an unsorted array of non-overlapping
82 * memory regions in dev->mem. */
83 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
87 int from, to, n = dev->mem->nregions;
88 /* Track overlapping/split regions for sanity checking. */
89 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
91 for (from = 0, to = 0; from < n; ++from, ++to) {
92 struct vhost_memory_region *reg = dev->mem->regions + to;
97 /* clone old region */
99 memcpy(reg, dev->mem->regions + from, sizeof *reg);
102 /* No overlap is simple */
103 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
108 /* Split only happens if supplied region
109 * is in the middle of an existing one. Thus it can not
110 * overlap with any other existing region. */
113 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
114 memlast = range_get_last(start_addr, size);
116 /* Remove whole region */
117 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
118 --dev->mem->nregions;
126 if (memlast >= reglast) {
127 reg->memory_size = start_addr - reg->guest_phys_addr;
128 assert(reg->memory_size);
129 assert(!overlap_end);
135 if (start_addr <= reg->guest_phys_addr) {
136 change = memlast + 1 - reg->guest_phys_addr;
137 reg->memory_size -= change;
138 reg->guest_phys_addr += change;
139 reg->userspace_addr += change;
140 assert(reg->memory_size);
141 assert(!overlap_start);
146 /* This only happens if supplied region
147 * is in the middle of an existing one. Thus it can not
148 * overlap with any other existing region. */
149 assert(!overlap_start);
150 assert(!overlap_end);
151 assert(!overlap_middle);
152 /* Split region: shrink first part, shift second part. */
153 memcpy(dev->mem->regions + n, reg, sizeof *reg);
154 reg->memory_size = start_addr - reg->guest_phys_addr;
155 assert(reg->memory_size);
156 change = memlast + 1 - reg->guest_phys_addr;
157 reg = dev->mem->regions + n;
158 reg->memory_size -= change;
159 assert(reg->memory_size);
160 reg->guest_phys_addr += change;
161 reg->userspace_addr += change;
162 /* Never add more than 1 region */
163 assert(dev->mem->nregions == n);
164 ++dev->mem->nregions;
169 /* Called after unassign, so no regions overlap the given range. */
170 static void vhost_dev_assign_memory(struct vhost_dev *dev,
176 struct vhost_memory_region *merged = NULL;
177 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
178 struct vhost_memory_region *reg = dev->mem->regions + to;
179 uint64_t prlast, urlast;
180 uint64_t pmlast, umlast;
183 /* clone old region */
185 memcpy(reg, dev->mem->regions + from, sizeof *reg);
187 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
188 pmlast = range_get_last(start_addr, size);
189 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
190 umlast = range_get_last(uaddr, size);
192 /* check for overlapping regions: should never happen. */
193 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
194 /* Not an adjacent or overlapping region - do not merge. */
195 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
196 (pmlast + 1 != reg->guest_phys_addr ||
197 umlast + 1 != reg->userspace_addr)) {
207 u = MIN(uaddr, reg->userspace_addr);
208 s = MIN(start_addr, reg->guest_phys_addr);
209 e = MAX(pmlast, prlast);
210 uaddr = merged->userspace_addr = u;
211 start_addr = merged->guest_phys_addr = s;
212 size = merged->memory_size = e - s + 1;
213 assert(merged->memory_size);
217 struct vhost_memory_region *reg = dev->mem->regions + to;
218 memset(reg, 0, sizeof *reg);
219 reg->memory_size = size;
220 assert(reg->memory_size);
221 reg->guest_phys_addr = start_addr;
222 reg->userspace_addr = uaddr;
225 assert(to <= dev->mem->nregions + 1);
226 dev->mem->nregions = to;
229 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
231 uint64_t log_size = 0;
233 for (i = 0; i < dev->mem->nregions; ++i) {
234 struct vhost_memory_region *reg = dev->mem->regions + i;
235 uint64_t last = range_get_last(reg->guest_phys_addr,
237 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
239 for (i = 0; i < dev->nvqs; ++i) {
240 struct vhost_virtqueue *vq = dev->vqs + i;
241 uint64_t last = vq->used_phys + vq->used_size - 1;
242 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
247 static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size)
249 vhost_log_chunk_t *log;
253 log = qemu_mallocz(size * sizeof *log);
257 log_base = (uint64_t)(unsigned long)log;
258 r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base);
260 vhost_client_sync_dirty_bitmap(&dev->client, 0,
261 (target_phys_addr_t)~0x0ull);
266 dev->log_size = size;
269 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
274 for (i = 0; i < dev->nvqs; ++i) {
275 struct vhost_virtqueue *vq = dev->vqs + i;
276 target_phys_addr_t l;
279 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
283 p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
284 if (!p || l != vq->ring_size) {
285 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
289 fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
292 cpu_physical_memory_unmap(p, l, 0, 0);
297 static void vhost_client_set_memory(CPUPhysMemoryClient *client,
298 target_phys_addr_t start_addr,
300 ram_addr_t phys_offset)
302 struct vhost_dev *dev = container_of(client, struct vhost_dev, client);
303 ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
304 int s = offsetof(struct vhost_memory, regions) +
305 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
308 dev->mem = qemu_realloc(dev->mem, s);
312 vhost_dev_unassign_memory(dev, start_addr, size);
313 if (flags == IO_MEM_RAM) {
314 /* Add given mapping, merging adjacent regions if any */
315 vhost_dev_assign_memory(dev, start_addr, size,
316 (uintptr_t)qemu_get_ram_ptr(phys_offset));
318 /* Remove old mapping for this memory, if any. */
319 vhost_dev_unassign_memory(dev, start_addr, size);
327 r = vhost_verify_ring_mappings(dev, start_addr, size);
331 if (!dev->log_enabled) {
332 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
336 log_size = vhost_get_log_size(dev);
337 /* We allocate an extra 4K bytes to log,
338 * to reduce the * number of reallocations. */
339 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
340 /* To log more, must increase log size before table update. */
341 if (dev->log_size < log_size) {
342 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
344 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem);
346 /* To log less, can only decrease log size after table update. */
347 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
348 vhost_dev_log_resize(dev, log_size);
352 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
353 struct vhost_virtqueue *vq,
354 unsigned idx, bool enable_log)
356 struct vhost_vring_addr addr = {
358 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
359 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
360 .used_user_addr = (uint64_t)(unsigned long)vq->used,
361 .log_guest_addr = vq->used_phys,
362 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
364 int r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr);
371 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
373 uint64_t features = dev->acked_features;
376 features |= 0x1 << VHOST_F_LOG_ALL;
378 r = ioctl(dev->control, VHOST_SET_FEATURES, &features);
379 return r < 0 ? -errno : 0;
382 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
385 r = vhost_dev_set_features(dev, enable_log);
389 for (i = 0; i < dev->nvqs; ++i) {
390 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
398 for (; i >= 0; --i) {
399 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i,
403 t = vhost_dev_set_features(dev, dev->log_enabled);
409 static int vhost_client_migration_log(CPUPhysMemoryClient *client,
412 struct vhost_dev *dev = container_of(client, struct vhost_dev, client);
414 if (!!enable == dev->log_enabled) {
418 dev->log_enabled = enable;
422 r = vhost_dev_set_log(dev, false);
432 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
433 r = vhost_dev_set_log(dev, true);
438 dev->log_enabled = enable;
442 static int vhost_virtqueue_init(struct vhost_dev *dev,
443 struct VirtIODevice *vdev,
444 struct vhost_virtqueue *vq,
447 target_phys_addr_t s, l, a;
449 struct vhost_vring_file file = {
452 struct vhost_vring_state state = {
455 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
457 if (!vdev->binding->set_guest_notifier) {
458 fprintf(stderr, "binding does not support guest notifiers\n");
462 if (!vdev->binding->set_host_notifier) {
463 fprintf(stderr, "binding does not support host notifiers\n");
467 vq->num = state.num = virtio_queue_get_num(vdev, idx);
468 r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state);
473 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
474 r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state);
479 s = l = virtio_queue_get_desc_size(vdev, idx);
480 a = virtio_queue_get_desc_addr(vdev, idx);
481 vq->desc = cpu_physical_memory_map(a, &l, 0);
482 if (!vq->desc || l != s) {
484 goto fail_alloc_desc;
486 s = l = virtio_queue_get_avail_size(vdev, idx);
487 a = virtio_queue_get_avail_addr(vdev, idx);
488 vq->avail = cpu_physical_memory_map(a, &l, 0);
489 if (!vq->avail || l != s) {
491 goto fail_alloc_avail;
493 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
494 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
495 vq->used = cpu_physical_memory_map(a, &l, 1);
496 if (!vq->used || l != s) {
498 goto fail_alloc_used;
501 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
502 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
503 vq->ring = cpu_physical_memory_map(a, &l, 1);
504 if (!vq->ring || l != s) {
506 goto fail_alloc_ring;
509 r = vhost_virtqueue_set_addr(dev, vq, idx, dev->log_enabled);
514 r = vdev->binding->set_guest_notifier(vdev->binding_opaque, idx, true);
516 fprintf(stderr, "Error binding guest notifier: %d\n", -r);
517 goto fail_guest_notifier;
520 r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, true);
522 fprintf(stderr, "Error binding host notifier: %d\n", -r);
523 goto fail_host_notifier;
526 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
527 r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file);
532 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
533 r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
542 vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
544 vdev->binding->set_guest_notifier(vdev->binding_opaque, idx, false);
547 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
550 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
553 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
556 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
562 static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
563 struct VirtIODevice *vdev,
564 struct vhost_virtqueue *vq,
567 struct vhost_vring_state state = {
571 r = vdev->binding->set_guest_notifier(vdev->binding_opaque, idx, false);
573 fprintf(stderr, "vhost VQ %d guest cleanup failed: %d\n", idx, r);
578 r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
580 fprintf(stderr, "vhost VQ %d host cleanup failed: %d\n", idx, r);
584 r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state);
586 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
589 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
591 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
592 0, virtio_queue_get_ring_size(vdev, idx));
593 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
594 1, virtio_queue_get_used_size(vdev, idx));
595 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
596 0, virtio_queue_get_avail_size(vdev, idx));
597 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
598 0, virtio_queue_get_desc_size(vdev, idx));
601 int vhost_dev_init(struct vhost_dev *hdev, int devfd)
606 hdev->control = devfd;
608 hdev->control = open("/dev/vhost-net", O_RDWR);
609 if (hdev->control < 0) {
613 r = ioctl(hdev->control, VHOST_SET_OWNER, NULL);
618 r = ioctl(hdev->control, VHOST_GET_FEATURES, &features);
622 hdev->features = features;
624 hdev->client.set_memory = vhost_client_set_memory;
625 hdev->client.sync_dirty_bitmap = vhost_client_sync_dirty_bitmap;
626 hdev->client.migration_log = vhost_client_migration_log;
627 hdev->mem = qemu_mallocz(offsetof(struct vhost_memory, regions));
630 hdev->log_enabled = false;
631 hdev->started = false;
632 cpu_register_phys_memory_client(&hdev->client);
636 close(hdev->control);
640 void vhost_dev_cleanup(struct vhost_dev *hdev)
642 cpu_unregister_phys_memory_client(&hdev->client);
643 qemu_free(hdev->mem);
644 close(hdev->control);
647 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
651 r = vhost_dev_set_features(hdev, hdev->log_enabled);
655 r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem);
660 for (i = 0; i < hdev->nvqs; ++i) {
661 r = vhost_virtqueue_init(hdev,
670 if (hdev->log_enabled) {
671 hdev->log_size = vhost_get_log_size(hdev);
672 hdev->log = hdev->log_size ?
673 qemu_mallocz(hdev->log_size * sizeof *hdev->log) : NULL;
674 r = ioctl(hdev->control, VHOST_SET_LOG_BASE,
675 (uint64_t)(unsigned long)hdev->log);
682 hdev->started = true;
687 vhost_virtqueue_cleanup(hdev,
696 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
699 for (i = 0; i < hdev->nvqs; ++i) {
700 vhost_virtqueue_cleanup(hdev,
705 vhost_client_sync_dirty_bitmap(&hdev->client, 0,
706 (target_phys_addr_t)~0x0ull);
707 hdev->started = false;
708 qemu_free(hdev->log);