]> Git Repo - qemu.git/blob - hw/virtio/vhost.c
Revert "vhost: add traces for memory listeners"
[qemu.git] / hw / virtio / vhost.c
1 /*
2  * vhost support
3  *
4  * Copyright Red Hat, Inc. 2010
5  *
6  * Authors:
7  *  Michael S. Tsirkin <[email protected]>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/hw.h"
20 #include "qemu/atomic.h"
21 #include "qemu/range.h"
22 #include "qemu/error-report.h"
23 #include "qemu/memfd.h"
24 #include <linux/vhost.h>
25 #include "exec/address-spaces.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/blocker.h"
29 #include "sysemu/dma.h"
30
31 /* enabled until disconnected backend stabilizes */
32 #define _VHOST_DEBUG 1
33
34 #ifdef _VHOST_DEBUG
35 #define VHOST_OPS_DEBUG(fmt, ...) \
36     do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
37                       strerror(errno), errno); } while (0)
38 #else
39 #define VHOST_OPS_DEBUG(fmt, ...) \
40     do { } while (0)
41 #endif
42
43 static struct vhost_log *vhost_log;
44 static struct vhost_log *vhost_log_shm;
45
46 static unsigned int used_memslots;
47 static QLIST_HEAD(, vhost_dev) vhost_devices =
48     QLIST_HEAD_INITIALIZER(vhost_devices);
49
50 bool vhost_has_free_slot(void)
51 {
52     unsigned int slots_limit = ~0U;
53     struct vhost_dev *hdev;
54
55     QLIST_FOREACH(hdev, &vhost_devices, entry) {
56         unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
57         slots_limit = MIN(slots_limit, r);
58     }
59     return slots_limit > used_memslots;
60 }
61
62 static void vhost_dev_sync_region(struct vhost_dev *dev,
63                                   MemoryRegionSection *section,
64                                   uint64_t mfirst, uint64_t mlast,
65                                   uint64_t rfirst, uint64_t rlast)
66 {
67     vhost_log_chunk_t *log = dev->log->log;
68
69     uint64_t start = MAX(mfirst, rfirst);
70     uint64_t end = MIN(mlast, rlast);
71     vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
72     vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
73     uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
74
75     if (end < start) {
76         return;
77     }
78     assert(end / VHOST_LOG_CHUNK < dev->log_size);
79     assert(start / VHOST_LOG_CHUNK < dev->log_size);
80
81     for (;from < to; ++from) {
82         vhost_log_chunk_t log;
83         /* We first check with non-atomic: much cheaper,
84          * and we expect non-dirty to be the common case. */
85         if (!*from) {
86             addr += VHOST_LOG_CHUNK;
87             continue;
88         }
89         /* Data must be read atomically. We don't really need barrier semantics
90          * but it's easier to use atomic_* than roll our own. */
91         log = atomic_xchg(from, 0);
92         while (log) {
93             int bit = ctzl(log);
94             hwaddr page_addr;
95             hwaddr section_offset;
96             hwaddr mr_offset;
97             page_addr = addr + bit * VHOST_LOG_PAGE;
98             section_offset = page_addr - section->offset_within_address_space;
99             mr_offset = section_offset + section->offset_within_region;
100             memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
101             log &= ~(0x1ull << bit);
102         }
103         addr += VHOST_LOG_CHUNK;
104     }
105 }
106
107 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
108                                    MemoryRegionSection *section,
109                                    hwaddr first,
110                                    hwaddr last)
111 {
112     int i;
113     hwaddr start_addr;
114     hwaddr end_addr;
115
116     if (!dev->log_enabled || !dev->started) {
117         return 0;
118     }
119     start_addr = section->offset_within_address_space;
120     end_addr = range_get_last(start_addr, int128_get64(section->size));
121     start_addr = MAX(first, start_addr);
122     end_addr = MIN(last, end_addr);
123
124     for (i = 0; i < dev->mem->nregions; ++i) {
125         struct vhost_memory_region *reg = dev->mem->regions + i;
126         vhost_dev_sync_region(dev, section, start_addr, end_addr,
127                               reg->guest_phys_addr,
128                               range_get_last(reg->guest_phys_addr,
129                                              reg->memory_size));
130     }
131     for (i = 0; i < dev->nvqs; ++i) {
132         struct vhost_virtqueue *vq = dev->vqs + i;
133         vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
134                               range_get_last(vq->used_phys, vq->used_size));
135     }
136     return 0;
137 }
138
139 static void vhost_log_sync(MemoryListener *listener,
140                           MemoryRegionSection *section)
141 {
142     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
143                                          memory_listener);
144     vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
145 }
146
147 static void vhost_log_sync_range(struct vhost_dev *dev,
148                                  hwaddr first, hwaddr last)
149 {
150     int i;
151     /* FIXME: this is N^2 in number of sections */
152     for (i = 0; i < dev->n_mem_sections; ++i) {
153         MemoryRegionSection *section = &dev->mem_sections[i];
154         vhost_sync_dirty_bitmap(dev, section, first, last);
155     }
156 }
157
158 /* Assign/unassign. Keep an unsorted array of non-overlapping
159  * memory regions in dev->mem. */
160 static void vhost_dev_unassign_memory(struct vhost_dev *dev,
161                                       uint64_t start_addr,
162                                       uint64_t size)
163 {
164     int from, to, n = dev->mem->nregions;
165     /* Track overlapping/split regions for sanity checking. */
166     int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
167
168     for (from = 0, to = 0; from < n; ++from, ++to) {
169         struct vhost_memory_region *reg = dev->mem->regions + to;
170         uint64_t reglast;
171         uint64_t memlast;
172         uint64_t change;
173
174         /* clone old region */
175         if (to != from) {
176             memcpy(reg, dev->mem->regions + from, sizeof *reg);
177         }
178
179         /* No overlap is simple */
180         if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
181                             start_addr, size)) {
182             continue;
183         }
184
185         /* Split only happens if supplied region
186          * is in the middle of an existing one. Thus it can not
187          * overlap with any other existing region. */
188         assert(!split);
189
190         reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
191         memlast = range_get_last(start_addr, size);
192
193         /* Remove whole region */
194         if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
195             --dev->mem->nregions;
196             --to;
197             ++overlap_middle;
198             continue;
199         }
200
201         /* Shrink region */
202         if (memlast >= reglast) {
203             reg->memory_size = start_addr - reg->guest_phys_addr;
204             assert(reg->memory_size);
205             assert(!overlap_end);
206             ++overlap_end;
207             continue;
208         }
209
210         /* Shift region */
211         if (start_addr <= reg->guest_phys_addr) {
212             change = memlast + 1 - reg->guest_phys_addr;
213             reg->memory_size -= change;
214             reg->guest_phys_addr += change;
215             reg->userspace_addr += change;
216             assert(reg->memory_size);
217             assert(!overlap_start);
218             ++overlap_start;
219             continue;
220         }
221
222         /* This only happens if supplied region
223          * is in the middle of an existing one. Thus it can not
224          * overlap with any other existing region. */
225         assert(!overlap_start);
226         assert(!overlap_end);
227         assert(!overlap_middle);
228         /* Split region: shrink first part, shift second part. */
229         memcpy(dev->mem->regions + n, reg, sizeof *reg);
230         reg->memory_size = start_addr - reg->guest_phys_addr;
231         assert(reg->memory_size);
232         change = memlast + 1 - reg->guest_phys_addr;
233         reg = dev->mem->regions + n;
234         reg->memory_size -= change;
235         assert(reg->memory_size);
236         reg->guest_phys_addr += change;
237         reg->userspace_addr += change;
238         /* Never add more than 1 region */
239         assert(dev->mem->nregions == n);
240         ++dev->mem->nregions;
241         ++split;
242     }
243 }
244
245 /* Called after unassign, so no regions overlap the given range. */
246 static void vhost_dev_assign_memory(struct vhost_dev *dev,
247                                     uint64_t start_addr,
248                                     uint64_t size,
249                                     uint64_t uaddr)
250 {
251     int from, to;
252     struct vhost_memory_region *merged = NULL;
253     for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
254         struct vhost_memory_region *reg = dev->mem->regions + to;
255         uint64_t prlast, urlast;
256         uint64_t pmlast, umlast;
257         uint64_t s, e, u;
258
259         /* clone old region */
260         if (to != from) {
261             memcpy(reg, dev->mem->regions + from, sizeof *reg);
262         }
263         prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
264         pmlast = range_get_last(start_addr, size);
265         urlast = range_get_last(reg->userspace_addr, reg->memory_size);
266         umlast = range_get_last(uaddr, size);
267
268         /* check for overlapping regions: should never happen. */
269         assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
270         /* Not an adjacent or overlapping region - do not merge. */
271         if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
272             (pmlast + 1 != reg->guest_phys_addr ||
273              umlast + 1 != reg->userspace_addr)) {
274             continue;
275         }
276
277         if (dev->vhost_ops->vhost_backend_can_merge &&
278             !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
279                                                      reg->userspace_addr,
280                                                      reg->memory_size)) {
281             continue;
282         }
283
284         if (merged) {
285             --to;
286             assert(to >= 0);
287         } else {
288             merged = reg;
289         }
290         u = MIN(uaddr, reg->userspace_addr);
291         s = MIN(start_addr, reg->guest_phys_addr);
292         e = MAX(pmlast, prlast);
293         uaddr = merged->userspace_addr = u;
294         start_addr = merged->guest_phys_addr = s;
295         size = merged->memory_size = e - s + 1;
296         assert(merged->memory_size);
297     }
298
299     if (!merged) {
300         struct vhost_memory_region *reg = dev->mem->regions + to;
301         memset(reg, 0, sizeof *reg);
302         reg->memory_size = size;
303         assert(reg->memory_size);
304         reg->guest_phys_addr = start_addr;
305         reg->userspace_addr = uaddr;
306         ++to;
307     }
308     assert(to <= dev->mem->nregions + 1);
309     dev->mem->nregions = to;
310 }
311
312 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
313 {
314     uint64_t log_size = 0;
315     int i;
316     for (i = 0; i < dev->mem->nregions; ++i) {
317         struct vhost_memory_region *reg = dev->mem->regions + i;
318         uint64_t last = range_get_last(reg->guest_phys_addr,
319                                        reg->memory_size);
320         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
321     }
322     for (i = 0; i < dev->nvqs; ++i) {
323         struct vhost_virtqueue *vq = dev->vqs + i;
324         uint64_t last = vq->used_phys + vq->used_size - 1;
325         log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
326     }
327     return log_size;
328 }
329
330 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
331 {
332     Error *err = NULL;
333     struct vhost_log *log;
334     uint64_t logsize = size * sizeof(*(log->log));
335     int fd = -1;
336
337     log = g_new0(struct vhost_log, 1);
338     if (share) {
339         log->log = qemu_memfd_alloc("vhost-log", logsize,
340                                     F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
341                                     &fd, &err);
342         if (err) {
343             error_report_err(err);
344             g_free(log);
345             return NULL;
346         }
347         memset(log->log, 0, logsize);
348     } else {
349         log->log = g_malloc0(logsize);
350     }
351
352     log->size = size;
353     log->refcnt = 1;
354     log->fd = fd;
355
356     return log;
357 }
358
359 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
360 {
361     struct vhost_log *log = share ? vhost_log_shm : vhost_log;
362
363     if (!log || log->size != size) {
364         log = vhost_log_alloc(size, share);
365         if (share) {
366             vhost_log_shm = log;
367         } else {
368             vhost_log = log;
369         }
370     } else {
371         ++log->refcnt;
372     }
373
374     return log;
375 }
376
377 static void vhost_log_put(struct vhost_dev *dev, bool sync)
378 {
379     struct vhost_log *log = dev->log;
380
381     if (!log) {
382         return;
383     }
384
385     --log->refcnt;
386     if (log->refcnt == 0) {
387         /* Sync only the range covered by the old log */
388         if (dev->log_size && sync) {
389             vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
390         }
391
392         if (vhost_log == log) {
393             g_free(log->log);
394             vhost_log = NULL;
395         } else if (vhost_log_shm == log) {
396             qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
397                             log->fd);
398             vhost_log_shm = NULL;
399         }
400
401         g_free(log);
402     }
403
404     dev->log = NULL;
405     dev->log_size = 0;
406 }
407
408 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
409 {
410     return dev->vhost_ops->vhost_requires_shm_log &&
411            dev->vhost_ops->vhost_requires_shm_log(dev);
412 }
413
414 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
415 {
416     struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
417     uint64_t log_base = (uintptr_t)log->log;
418     int r;
419
420     /* inform backend of log switching, this must be done before
421        releasing the current log, to ensure no logging is lost */
422     r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
423     if (r < 0) {
424         VHOST_OPS_DEBUG("vhost_set_log_base failed");
425     }
426
427     vhost_log_put(dev, true);
428     dev->log = log;
429     dev->log_size = size;
430 }
431
432 static int vhost_dev_has_iommu(struct vhost_dev *dev)
433 {
434     VirtIODevice *vdev = dev->vdev;
435
436     return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
437 }
438
439 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
440                               hwaddr *plen, int is_write)
441 {
442     if (!vhost_dev_has_iommu(dev)) {
443         return cpu_physical_memory_map(addr, plen, is_write);
444     } else {
445         return (void *)(uintptr_t)addr;
446     }
447 }
448
449 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
450                                hwaddr len, int is_write,
451                                hwaddr access_len)
452 {
453     if (!vhost_dev_has_iommu(dev)) {
454         cpu_physical_memory_unmap(buffer, len, is_write, access_len);
455     }
456 }
457
458 static int vhost_verify_ring_part_mapping(struct vhost_dev *dev,
459                                           void *part,
460                                           uint64_t part_addr,
461                                           uint64_t part_size,
462                                           uint64_t start_addr,
463                                           uint64_t size)
464 {
465     hwaddr l;
466     void *p;
467     int r = 0;
468
469     if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
470         return 0;
471     }
472     l = part_size;
473     p = vhost_memory_map(dev, part_addr, &l, 1);
474     if (!p || l != part_size) {
475         r = -ENOMEM;
476     }
477     if (p != part) {
478         r = -EBUSY;
479     }
480     vhost_memory_unmap(dev, p, l, 0, 0);
481     return r;
482 }
483
484 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
485                                       uint64_t start_addr,
486                                       uint64_t size)
487 {
488     int i, j;
489     int r = 0;
490     const char *part_name[] = {
491         "descriptor table",
492         "available ring",
493         "used ring"
494     };
495
496     for (i = 0; i < dev->nvqs; ++i) {
497         struct vhost_virtqueue *vq = dev->vqs + i;
498
499         j = 0;
500         r = vhost_verify_ring_part_mapping(dev, vq->desc, vq->desc_phys,
501                                            vq->desc_size, start_addr, size);
502         if (r) {
503             break;
504         }
505
506         j++;
507         r = vhost_verify_ring_part_mapping(dev, vq->avail, vq->avail_phys,
508                                            vq->avail_size, start_addr, size);
509         if (r) {
510             break;
511         }
512
513         j++;
514         r = vhost_verify_ring_part_mapping(dev, vq->used, vq->used_phys,
515                                            vq->used_size, start_addr, size);
516         if (r) {
517             break;
518         }
519     }
520
521     if (r == -ENOMEM) {
522         error_report("Unable to map %s for ring %d", part_name[j], i);
523     } else if (r == -EBUSY) {
524         error_report("%s relocated for ring %d", part_name[j], i);
525     }
526     return r;
527 }
528
529 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
530                                                       uint64_t start_addr,
531                                                       uint64_t size)
532 {
533     int i, n = dev->mem->nregions;
534     for (i = 0; i < n; ++i) {
535         struct vhost_memory_region *reg = dev->mem->regions + i;
536         if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
537                            start_addr, size)) {
538             return reg;
539         }
540     }
541     return NULL;
542 }
543
544 static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
545                                  uint64_t start_addr,
546                                  uint64_t size,
547                                  uint64_t uaddr)
548 {
549     struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
550     uint64_t reglast;
551     uint64_t memlast;
552
553     if (!reg) {
554         return true;
555     }
556
557     reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
558     memlast = range_get_last(start_addr, size);
559
560     /* Need to extend region? */
561     if (start_addr < reg->guest_phys_addr || memlast > reglast) {
562         return true;
563     }
564     /* userspace_addr changed? */
565     return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
566 }
567
568 static void vhost_set_memory(MemoryListener *listener,
569                              MemoryRegionSection *section,
570                              bool add)
571 {
572     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
573                                          memory_listener);
574     hwaddr start_addr = section->offset_within_address_space;
575     ram_addr_t size = int128_get64(section->size);
576     bool log_dirty =
577         memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
578     int s = offsetof(struct vhost_memory, regions) +
579         (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
580     void *ram;
581
582     dev->mem = g_realloc(dev->mem, s);
583
584     if (log_dirty) {
585         add = false;
586     }
587
588     assert(size);
589
590     /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
591     ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
592     if (add) {
593         if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
594             /* Region exists with same address. Nothing to do. */
595             return;
596         }
597     } else {
598         if (!vhost_dev_find_reg(dev, start_addr, size)) {
599             /* Removing region that we don't access. Nothing to do. */
600             return;
601         }
602     }
603
604     vhost_dev_unassign_memory(dev, start_addr, size);
605     if (add) {
606         /* Add given mapping, merging adjacent regions if any */
607         vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
608     } else {
609         /* Remove old mapping for this memory, if any. */
610         vhost_dev_unassign_memory(dev, start_addr, size);
611     }
612     dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
613     dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
614     dev->memory_changed = true;
615     used_memslots = dev->mem->nregions;
616 }
617
618 static bool vhost_section(MemoryRegionSection *section)
619 {
620     return memory_region_is_ram(section->mr) &&
621         !memory_region_is_rom(section->mr);
622 }
623
624 static void vhost_begin(MemoryListener *listener)
625 {
626     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
627                                          memory_listener);
628     dev->mem_changed_end_addr = 0;
629     dev->mem_changed_start_addr = -1;
630 }
631
632 static void vhost_commit(MemoryListener *listener)
633 {
634     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
635                                          memory_listener);
636     hwaddr start_addr = 0;
637     ram_addr_t size = 0;
638     uint64_t log_size;
639     int r;
640
641     if (!dev->memory_changed) {
642         return;
643     }
644     if (!dev->started) {
645         return;
646     }
647     if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
648         return;
649     }
650
651     if (dev->started) {
652         start_addr = dev->mem_changed_start_addr;
653         size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
654
655         r = vhost_verify_ring_mappings(dev, start_addr, size);
656         assert(r >= 0);
657     }
658
659     if (!dev->log_enabled) {
660         r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
661         if (r < 0) {
662             VHOST_OPS_DEBUG("vhost_set_mem_table failed");
663         }
664         dev->memory_changed = false;
665         return;
666     }
667     log_size = vhost_get_log_size(dev);
668     /* We allocate an extra 4K bytes to log,
669      * to reduce the * number of reallocations. */
670 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
671     /* To log more, must increase log size before table update. */
672     if (dev->log_size < log_size) {
673         vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
674     }
675     r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
676     if (r < 0) {
677         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
678     }
679     /* To log less, can only decrease log size after table update. */
680     if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
681         vhost_dev_log_resize(dev, log_size);
682     }
683     dev->memory_changed = false;
684 }
685
686 static void vhost_region_add(MemoryListener *listener,
687                              MemoryRegionSection *section)
688 {
689     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
690                                          memory_listener);
691
692     if (!vhost_section(section)) {
693         return;
694     }
695
696     ++dev->n_mem_sections;
697     dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
698                                 dev->n_mem_sections);
699     dev->mem_sections[dev->n_mem_sections - 1] = *section;
700     memory_region_ref(section->mr);
701     vhost_set_memory(listener, section, true);
702 }
703
704 static void vhost_region_del(MemoryListener *listener,
705                              MemoryRegionSection *section)
706 {
707     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
708                                          memory_listener);
709     int i;
710
711     if (!vhost_section(section)) {
712         return;
713     }
714
715     vhost_set_memory(listener, section, false);
716     memory_region_unref(section->mr);
717     for (i = 0; i < dev->n_mem_sections; ++i) {
718         if (dev->mem_sections[i].offset_within_address_space
719             == section->offset_within_address_space) {
720             --dev->n_mem_sections;
721             memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
722                     (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
723             break;
724         }
725     }
726 }
727
728 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
729 {
730     struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
731     struct vhost_dev *hdev = iommu->hdev;
732     hwaddr iova = iotlb->iova + iommu->iommu_offset;
733
734     if (vhost_backend_invalidate_device_iotlb(hdev, iova,
735                                               iotlb->addr_mask + 1)) {
736         error_report("Fail to invalidate device iotlb");
737     }
738 }
739
740 static void vhost_iommu_region_add(MemoryListener *listener,
741                                    MemoryRegionSection *section)
742 {
743     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
744                                          iommu_listener);
745     struct vhost_iommu *iommu;
746     Int128 end;
747
748     if (!memory_region_is_iommu(section->mr)) {
749         return;
750     }
751
752     iommu = g_malloc0(sizeof(*iommu));
753     end = int128_add(int128_make64(section->offset_within_region),
754                      section->size);
755     end = int128_sub(end, int128_one());
756     iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
757                         IOMMU_NOTIFIER_UNMAP,
758                         section->offset_within_region,
759                         int128_get64(end));
760     iommu->mr = section->mr;
761     iommu->iommu_offset = section->offset_within_address_space -
762                           section->offset_within_region;
763     iommu->hdev = dev;
764     memory_region_register_iommu_notifier(section->mr, &iommu->n);
765     QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
766     /* TODO: can replay help performance here? */
767 }
768
769 static void vhost_iommu_region_del(MemoryListener *listener,
770                                    MemoryRegionSection *section)
771 {
772     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
773                                          iommu_listener);
774     struct vhost_iommu *iommu;
775
776     if (!memory_region_is_iommu(section->mr)) {
777         return;
778     }
779
780     QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
781         if (iommu->mr == section->mr &&
782             iommu->n.start == section->offset_within_region) {
783             memory_region_unregister_iommu_notifier(iommu->mr,
784                                                     &iommu->n);
785             QLIST_REMOVE(iommu, iommu_next);
786             g_free(iommu);
787             break;
788         }
789     }
790 }
791
792 static void vhost_region_nop(MemoryListener *listener,
793                              MemoryRegionSection *section)
794 {
795 }
796
797 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
798                                     struct vhost_virtqueue *vq,
799                                     unsigned idx, bool enable_log)
800 {
801     struct vhost_vring_addr addr = {
802         .index = idx,
803         .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
804         .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
805         .used_user_addr = (uint64_t)(unsigned long)vq->used,
806         .log_guest_addr = vq->used_phys,
807         .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
808     };
809     int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
810     if (r < 0) {
811         VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
812         return -errno;
813     }
814     return 0;
815 }
816
817 static int vhost_dev_set_features(struct vhost_dev *dev,
818                                   bool enable_log)
819 {
820     uint64_t features = dev->acked_features;
821     int r;
822     if (enable_log) {
823         features |= 0x1ULL << VHOST_F_LOG_ALL;
824     }
825     r = dev->vhost_ops->vhost_set_features(dev, features);
826     if (r < 0) {
827         VHOST_OPS_DEBUG("vhost_set_features failed");
828     }
829     return r < 0 ? -errno : 0;
830 }
831
832 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
833 {
834     int r, i, idx;
835     r = vhost_dev_set_features(dev, enable_log);
836     if (r < 0) {
837         goto err_features;
838     }
839     for (i = 0; i < dev->nvqs; ++i) {
840         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
841         r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
842                                      enable_log);
843         if (r < 0) {
844             goto err_vq;
845         }
846     }
847     return 0;
848 err_vq:
849     for (; i >= 0; --i) {
850         idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
851         vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
852                                  dev->log_enabled);
853     }
854     vhost_dev_set_features(dev, dev->log_enabled);
855 err_features:
856     return r;
857 }
858
859 static int vhost_migration_log(MemoryListener *listener, int enable)
860 {
861     struct vhost_dev *dev = container_of(listener, struct vhost_dev,
862                                          memory_listener);
863     int r;
864     if (!!enable == dev->log_enabled) {
865         return 0;
866     }
867     if (!dev->started) {
868         dev->log_enabled = enable;
869         return 0;
870     }
871     if (!enable) {
872         r = vhost_dev_set_log(dev, false);
873         if (r < 0) {
874             return r;
875         }
876         vhost_log_put(dev, false);
877     } else {
878         vhost_dev_log_resize(dev, vhost_get_log_size(dev));
879         r = vhost_dev_set_log(dev, true);
880         if (r < 0) {
881             return r;
882         }
883     }
884     dev->log_enabled = enable;
885     return 0;
886 }
887
888 static void vhost_log_global_start(MemoryListener *listener)
889 {
890     int r;
891
892     r = vhost_migration_log(listener, true);
893     if (r < 0) {
894         abort();
895     }
896 }
897
898 static void vhost_log_global_stop(MemoryListener *listener)
899 {
900     int r;
901
902     r = vhost_migration_log(listener, false);
903     if (r < 0) {
904         abort();
905     }
906 }
907
908 static void vhost_log_start(MemoryListener *listener,
909                             MemoryRegionSection *section,
910                             int old, int new)
911 {
912     /* FIXME: implement */
913 }
914
915 static void vhost_log_stop(MemoryListener *listener,
916                            MemoryRegionSection *section,
917                            int old, int new)
918 {
919     /* FIXME: implement */
920 }
921
922 /* The vhost driver natively knows how to handle the vrings of non
923  * cross-endian legacy devices and modern devices. Only legacy devices
924  * exposed to a bi-endian guest may require the vhost driver to use a
925  * specific endianness.
926  */
927 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
928 {
929     if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
930         return false;
931     }
932 #ifdef HOST_WORDS_BIGENDIAN
933     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
934 #else
935     return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
936 #endif
937 }
938
939 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
940                                                    bool is_big_endian,
941                                                    int vhost_vq_index)
942 {
943     struct vhost_vring_state s = {
944         .index = vhost_vq_index,
945         .num = is_big_endian
946     };
947
948     if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
949         return 0;
950     }
951
952     VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
953     if (errno == ENOTTY) {
954         error_report("vhost does not support cross-endian");
955         return -ENOSYS;
956     }
957
958     return -errno;
959 }
960
961 static int vhost_memory_region_lookup(struct vhost_dev *hdev,
962                                       uint64_t gpa, uint64_t *uaddr,
963                                       uint64_t *len)
964 {
965     int i;
966
967     for (i = 0; i < hdev->mem->nregions; i++) {
968         struct vhost_memory_region *reg = hdev->mem->regions + i;
969
970         if (gpa >= reg->guest_phys_addr &&
971             reg->guest_phys_addr + reg->memory_size > gpa) {
972             *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
973             *len = reg->guest_phys_addr + reg->memory_size - gpa;
974             return 0;
975         }
976     }
977
978     return -EFAULT;
979 }
980
981 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
982 {
983     IOMMUTLBEntry iotlb;
984     uint64_t uaddr, len;
985     int ret = -EFAULT;
986
987     rcu_read_lock();
988
989     iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
990                                           iova, write);
991     if (iotlb.target_as != NULL) {
992         ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
993                                          &uaddr, &len);
994         if (ret) {
995             error_report("Fail to lookup the translated address "
996                          "%"PRIx64, iotlb.translated_addr);
997             goto out;
998         }
999
1000         len = MIN(iotlb.addr_mask + 1, len);
1001         iova = iova & ~iotlb.addr_mask;
1002
1003         ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
1004                                                 len, iotlb.perm);
1005         if (ret) {
1006             error_report("Fail to update device iotlb");
1007             goto out;
1008         }
1009     }
1010 out:
1011     rcu_read_unlock();
1012
1013     return ret;
1014 }
1015
1016 static int vhost_virtqueue_start(struct vhost_dev *dev,
1017                                 struct VirtIODevice *vdev,
1018                                 struct vhost_virtqueue *vq,
1019                                 unsigned idx)
1020 {
1021     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1022     VirtioBusState *vbus = VIRTIO_BUS(qbus);
1023     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1024     hwaddr s, l, a;
1025     int r;
1026     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1027     struct vhost_vring_file file = {
1028         .index = vhost_vq_index
1029     };
1030     struct vhost_vring_state state = {
1031         .index = vhost_vq_index
1032     };
1033     struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
1034
1035
1036     vq->num = state.num = virtio_queue_get_num(vdev, idx);
1037     r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
1038     if (r) {
1039         VHOST_OPS_DEBUG("vhost_set_vring_num failed");
1040         return -errno;
1041     }
1042
1043     state.num = virtio_queue_get_last_avail_idx(vdev, idx);
1044     r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
1045     if (r) {
1046         VHOST_OPS_DEBUG("vhost_set_vring_base failed");
1047         return -errno;
1048     }
1049
1050     if (vhost_needs_vring_endian(vdev)) {
1051         r = vhost_virtqueue_set_vring_endian_legacy(dev,
1052                                                     virtio_is_big_endian(vdev),
1053                                                     vhost_vq_index);
1054         if (r) {
1055             return -errno;
1056         }
1057     }
1058
1059     vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
1060     vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
1061     vq->desc = vhost_memory_map(dev, a, &l, 0);
1062     if (!vq->desc || l != s) {
1063         r = -ENOMEM;
1064         goto fail_alloc_desc;
1065     }
1066     vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
1067     vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
1068     vq->avail = vhost_memory_map(dev, a, &l, 0);
1069     if (!vq->avail || l != s) {
1070         r = -ENOMEM;
1071         goto fail_alloc_avail;
1072     }
1073     vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1074     vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
1075     vq->used = vhost_memory_map(dev, a, &l, 1);
1076     if (!vq->used || l != s) {
1077         r = -ENOMEM;
1078         goto fail_alloc_used;
1079     }
1080
1081     r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
1082     if (r < 0) {
1083         r = -errno;
1084         goto fail_alloc;
1085     }
1086
1087     file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1088     r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1089     if (r) {
1090         VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1091         r = -errno;
1092         goto fail_kick;
1093     }
1094
1095     /* Clear and discard previous events if any. */
1096     event_notifier_test_and_clear(&vq->masked_notifier);
1097
1098     /* Init vring in unmasked state, unless guest_notifier_mask
1099      * will do it later.
1100      */
1101     if (!vdev->use_guest_notifier_mask) {
1102         /* TODO: check and handle errors. */
1103         vhost_virtqueue_mask(dev, vdev, idx, false);
1104     }
1105
1106     if (k->query_guest_notifiers &&
1107         k->query_guest_notifiers(qbus->parent) &&
1108         virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1109         file.fd = -1;
1110         r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1111         if (r) {
1112             goto fail_vector;
1113         }
1114     }
1115
1116     return 0;
1117
1118 fail_vector:
1119 fail_kick:
1120 fail_alloc:
1121     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1122                        0, 0);
1123 fail_alloc_used:
1124     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1125                        0, 0);
1126 fail_alloc_avail:
1127     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1128                        0, 0);
1129 fail_alloc_desc:
1130     return r;
1131 }
1132
1133 static void vhost_virtqueue_stop(struct vhost_dev *dev,
1134                                     struct VirtIODevice *vdev,
1135                                     struct vhost_virtqueue *vq,
1136                                     unsigned idx)
1137 {
1138     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1139     struct vhost_vring_state state = {
1140         .index = vhost_vq_index,
1141     };
1142     int r;
1143
1144     r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1145     if (r < 0) {
1146         VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
1147         /* Connection to the backend is broken, so let's sync internal
1148          * last avail idx to the device used idx.
1149          */
1150         virtio_queue_restore_last_avail_idx(vdev, idx);
1151     } else {
1152         virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1153     }
1154     virtio_queue_invalidate_signalled_used(vdev, idx);
1155     virtio_queue_update_used_idx(vdev, idx);
1156
1157     /* In the cross-endian case, we need to reset the vring endianness to
1158      * native as legacy devices expect so by default.
1159      */
1160     if (vhost_needs_vring_endian(vdev)) {
1161         vhost_virtqueue_set_vring_endian_legacy(dev,
1162                                                 !virtio_is_big_endian(vdev),
1163                                                 vhost_vq_index);
1164     }
1165
1166     vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1167                        1, virtio_queue_get_used_size(vdev, idx));
1168     vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1169                        0, virtio_queue_get_avail_size(vdev, idx));
1170     vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1171                        0, virtio_queue_get_desc_size(vdev, idx));
1172 }
1173
1174 static void vhost_eventfd_add(MemoryListener *listener,
1175                               MemoryRegionSection *section,
1176                               bool match_data, uint64_t data, EventNotifier *e)
1177 {
1178 }
1179
1180 static void vhost_eventfd_del(MemoryListener *listener,
1181                               MemoryRegionSection *section,
1182                               bool match_data, uint64_t data, EventNotifier *e)
1183 {
1184 }
1185
1186 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1187                                                 int n, uint32_t timeout)
1188 {
1189     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1190     struct vhost_vring_state state = {
1191         .index = vhost_vq_index,
1192         .num = timeout,
1193     };
1194     int r;
1195
1196     if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1197         return -EINVAL;
1198     }
1199
1200     r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1201     if (r) {
1202         VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1203         return r;
1204     }
1205
1206     return 0;
1207 }
1208
1209 static int vhost_virtqueue_init(struct vhost_dev *dev,
1210                                 struct vhost_virtqueue *vq, int n)
1211 {
1212     int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1213     struct vhost_vring_file file = {
1214         .index = vhost_vq_index,
1215     };
1216     int r = event_notifier_init(&vq->masked_notifier, 0);
1217     if (r < 0) {
1218         return r;
1219     }
1220
1221     file.fd = event_notifier_get_fd(&vq->masked_notifier);
1222     r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1223     if (r) {
1224         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1225         r = -errno;
1226         goto fail_call;
1227     }
1228
1229     vq->dev = dev;
1230
1231     return 0;
1232 fail_call:
1233     event_notifier_cleanup(&vq->masked_notifier);
1234     return r;
1235 }
1236
1237 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1238 {
1239     event_notifier_cleanup(&vq->masked_notifier);
1240 }
1241
1242 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1243                    VhostBackendType backend_type, uint32_t busyloop_timeout)
1244 {
1245     uint64_t features;
1246     int i, r, n_initialized_vqs = 0;
1247     Error *local_err = NULL;
1248
1249     hdev->vdev = NULL;
1250     hdev->migration_blocker = NULL;
1251
1252     r = vhost_set_backend_type(hdev, backend_type);
1253     assert(r >= 0);
1254
1255     r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1256     if (r < 0) {
1257         goto fail;
1258     }
1259
1260     if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1261         error_report("vhost backend memory slots limit is less"
1262                 " than current number of present memory slots");
1263         r = -1;
1264         goto fail;
1265     }
1266
1267     r = hdev->vhost_ops->vhost_set_owner(hdev);
1268     if (r < 0) {
1269         VHOST_OPS_DEBUG("vhost_set_owner failed");
1270         goto fail;
1271     }
1272
1273     r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1274     if (r < 0) {
1275         VHOST_OPS_DEBUG("vhost_get_features failed");
1276         goto fail;
1277     }
1278
1279     for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1280         r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1281         if (r < 0) {
1282             goto fail;
1283         }
1284     }
1285
1286     if (busyloop_timeout) {
1287         for (i = 0; i < hdev->nvqs; ++i) {
1288             r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1289                                                      busyloop_timeout);
1290             if (r < 0) {
1291                 goto fail_busyloop;
1292             }
1293         }
1294     }
1295
1296     hdev->features = features;
1297
1298     hdev->memory_listener = (MemoryListener) {
1299         .begin = vhost_begin,
1300         .commit = vhost_commit,
1301         .region_add = vhost_region_add,
1302         .region_del = vhost_region_del,
1303         .region_nop = vhost_region_nop,
1304         .log_start = vhost_log_start,
1305         .log_stop = vhost_log_stop,
1306         .log_sync = vhost_log_sync,
1307         .log_global_start = vhost_log_global_start,
1308         .log_global_stop = vhost_log_global_stop,
1309         .eventfd_add = vhost_eventfd_add,
1310         .eventfd_del = vhost_eventfd_del,
1311         .priority = 10
1312     };
1313
1314     hdev->iommu_listener = (MemoryListener) {
1315         .region_add = vhost_iommu_region_add,
1316         .region_del = vhost_iommu_region_del,
1317     };
1318
1319     if (hdev->migration_blocker == NULL) {
1320         if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1321             error_setg(&hdev->migration_blocker,
1322                        "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1323         } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
1324             error_setg(&hdev->migration_blocker,
1325                        "Migration disabled: failed to allocate shared memory");
1326         }
1327     }
1328
1329     if (hdev->migration_blocker != NULL) {
1330         r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1331         if (local_err) {
1332             error_report_err(local_err);
1333             error_free(hdev->migration_blocker);
1334             goto fail_busyloop;
1335         }
1336     }
1337
1338     hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1339     hdev->n_mem_sections = 0;
1340     hdev->mem_sections = NULL;
1341     hdev->log = NULL;
1342     hdev->log_size = 0;
1343     hdev->log_enabled = false;
1344     hdev->started = false;
1345     hdev->memory_changed = false;
1346     memory_listener_register(&hdev->memory_listener, &address_space_memory);
1347     QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1348     return 0;
1349
1350 fail_busyloop:
1351     while (--i >= 0) {
1352         vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1353     }
1354 fail:
1355     hdev->nvqs = n_initialized_vqs;
1356     vhost_dev_cleanup(hdev);
1357     return r;
1358 }
1359
1360 void vhost_dev_cleanup(struct vhost_dev *hdev)
1361 {
1362     int i;
1363
1364     for (i = 0; i < hdev->nvqs; ++i) {
1365         vhost_virtqueue_cleanup(hdev->vqs + i);
1366     }
1367     if (hdev->mem) {
1368         /* those are only safe after successful init */
1369         memory_listener_unregister(&hdev->memory_listener);
1370         QLIST_REMOVE(hdev, entry);
1371     }
1372     if (hdev->migration_blocker) {
1373         migrate_del_blocker(hdev->migration_blocker);
1374         error_free(hdev->migration_blocker);
1375     }
1376     g_free(hdev->mem);
1377     g_free(hdev->mem_sections);
1378     if (hdev->vhost_ops) {
1379         hdev->vhost_ops->vhost_backend_cleanup(hdev);
1380     }
1381     assert(!hdev->log);
1382
1383     memset(hdev, 0, sizeof(struct vhost_dev));
1384 }
1385
1386 /* Stop processing guest IO notifications in qemu.
1387  * Start processing them in vhost in kernel.
1388  */
1389 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1390 {
1391     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1392     int i, r, e;
1393
1394     /* We will pass the notifiers to the kernel, make sure that QEMU
1395      * doesn't interfere.
1396      */
1397     r = virtio_device_grab_ioeventfd(vdev);
1398     if (r < 0) {
1399         error_report("binding does not support host notifiers");
1400         goto fail;
1401     }
1402
1403     for (i = 0; i < hdev->nvqs; ++i) {
1404         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1405                                          true);
1406         if (r < 0) {
1407             error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1408             goto fail_vq;
1409         }
1410     }
1411
1412     return 0;
1413 fail_vq:
1414     while (--i >= 0) {
1415         e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1416                                          false);
1417         if (e < 0) {
1418             error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1419         }
1420         assert (e >= 0);
1421     }
1422     virtio_device_release_ioeventfd(vdev);
1423 fail:
1424     return r;
1425 }
1426
1427 /* Stop processing guest IO notifications in vhost.
1428  * Start processing them in qemu.
1429  * This might actually run the qemu handlers right away,
1430  * so virtio in qemu must be completely setup when this is called.
1431  */
1432 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1433 {
1434     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1435     int i, r;
1436
1437     for (i = 0; i < hdev->nvqs; ++i) {
1438         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1439                                          false);
1440         if (r < 0) {
1441             error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1442         }
1443         assert (r >= 0);
1444     }
1445     virtio_device_release_ioeventfd(vdev);
1446 }
1447
1448 /* Test and clear event pending status.
1449  * Should be called after unmask to avoid losing events.
1450  */
1451 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1452 {
1453     struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1454     assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1455     return event_notifier_test_and_clear(&vq->masked_notifier);
1456 }
1457
1458 /* Mask/unmask events from this vq. */
1459 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1460                          bool mask)
1461 {
1462     struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1463     int r, index = n - hdev->vq_index;
1464     struct vhost_vring_file file;
1465
1466     /* should only be called after backend is connected */
1467     assert(hdev->vhost_ops);
1468
1469     if (mask) {
1470         assert(vdev->use_guest_notifier_mask);
1471         file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1472     } else {
1473         file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1474     }
1475
1476     file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1477     r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1478     if (r < 0) {
1479         VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1480     }
1481 }
1482
1483 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1484                             uint64_t features)
1485 {
1486     const int *bit = feature_bits;
1487     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1488         uint64_t bit_mask = (1ULL << *bit);
1489         if (!(hdev->features & bit_mask)) {
1490             features &= ~bit_mask;
1491         }
1492         bit++;
1493     }
1494     return features;
1495 }
1496
1497 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1498                         uint64_t features)
1499 {
1500     const int *bit = feature_bits;
1501     while (*bit != VHOST_INVALID_FEATURE_BIT) {
1502         uint64_t bit_mask = (1ULL << *bit);
1503         if (features & bit_mask) {
1504             hdev->acked_features |= bit_mask;
1505         }
1506         bit++;
1507     }
1508 }
1509
1510 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1511                          uint32_t config_len)
1512 {
1513     assert(hdev->vhost_ops);
1514
1515     if (hdev->vhost_ops->vhost_get_config) {
1516         return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
1517     }
1518
1519     return -1;
1520 }
1521
1522 int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1523                          uint32_t offset, uint32_t size, uint32_t flags)
1524 {
1525     assert(hdev->vhost_ops);
1526
1527     if (hdev->vhost_ops->vhost_set_config) {
1528         return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1529                                                  size, flags);
1530     }
1531
1532     return -1;
1533 }
1534
1535 void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1536                                    const VhostDevConfigOps *ops)
1537 {
1538     assert(hdev->vhost_ops);
1539     hdev->config_ops = ops;
1540 }
1541
1542 /* Host notifiers must be enabled at this point. */
1543 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1544 {
1545     int i, r;
1546
1547     /* should only be called after backend is connected */
1548     assert(hdev->vhost_ops);
1549
1550     hdev->started = true;
1551     hdev->vdev = vdev;
1552
1553     r = vhost_dev_set_features(hdev, hdev->log_enabled);
1554     if (r < 0) {
1555         goto fail_features;
1556     }
1557
1558     if (vhost_dev_has_iommu(hdev)) {
1559         memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1560     }
1561
1562     r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1563     if (r < 0) {
1564         VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1565         r = -errno;
1566         goto fail_mem;
1567     }
1568     for (i = 0; i < hdev->nvqs; ++i) {
1569         r = vhost_virtqueue_start(hdev,
1570                                   vdev,
1571                                   hdev->vqs + i,
1572                                   hdev->vq_index + i);
1573         if (r < 0) {
1574             goto fail_vq;
1575         }
1576     }
1577
1578     if (hdev->log_enabled) {
1579         uint64_t log_base;
1580
1581         hdev->log_size = vhost_get_log_size(hdev);
1582         hdev->log = vhost_log_get(hdev->log_size,
1583                                   vhost_dev_log_is_shared(hdev));
1584         log_base = (uintptr_t)hdev->log->log;
1585         r = hdev->vhost_ops->vhost_set_log_base(hdev,
1586                                                 hdev->log_size ? log_base : 0,
1587                                                 hdev->log);
1588         if (r < 0) {
1589             VHOST_OPS_DEBUG("vhost_set_log_base failed");
1590             r = -errno;
1591             goto fail_log;
1592         }
1593     }
1594
1595     if (vhost_dev_has_iommu(hdev)) {
1596         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1597
1598         /* Update used ring information for IOTLB to work correctly,
1599          * vhost-kernel code requires for this.*/
1600         for (i = 0; i < hdev->nvqs; ++i) {
1601             struct vhost_virtqueue *vq = hdev->vqs + i;
1602             vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1603         }
1604     }
1605     return 0;
1606 fail_log:
1607     vhost_log_put(hdev, false);
1608 fail_vq:
1609     while (--i >= 0) {
1610         vhost_virtqueue_stop(hdev,
1611                              vdev,
1612                              hdev->vqs + i,
1613                              hdev->vq_index + i);
1614     }
1615     i = hdev->nvqs;
1616
1617 fail_mem:
1618 fail_features:
1619
1620     hdev->started = false;
1621     return r;
1622 }
1623
1624 /* Host notifiers must be enabled at this point. */
1625 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1626 {
1627     int i;
1628
1629     /* should only be called after backend is connected */
1630     assert(hdev->vhost_ops);
1631
1632     for (i = 0; i < hdev->nvqs; ++i) {
1633         vhost_virtqueue_stop(hdev,
1634                              vdev,
1635                              hdev->vqs + i,
1636                              hdev->vq_index + i);
1637     }
1638
1639     if (vhost_dev_has_iommu(hdev)) {
1640         hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1641         memory_listener_unregister(&hdev->iommu_listener);
1642     }
1643     vhost_log_put(hdev, true);
1644     hdev->started = false;
1645     hdev->vdev = NULL;
1646 }
1647
1648 int vhost_net_set_backend(struct vhost_dev *hdev,
1649                           struct vhost_vring_file *file)
1650 {
1651     if (hdev->vhost_ops->vhost_net_set_backend) {
1652         return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1653     }
1654
1655     return -1;
1656 }
This page took 0.124902 seconds and 4 git commands to generate.