]> Git Repo - qemu.git/blame - hw/virtio/vhost.c
vhost-user: add error report in vhost_user_write()
[qemu.git] / hw / virtio / vhost.c
CommitLineData
d5970055
MT
1/*
2 * vhost support
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Authors:
7 * Michael S. Tsirkin <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
6b620ca3
PB
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
d5970055
MT
14 */
15
9b8bfe21 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
0d09e41a 18#include "hw/virtio/vhost.h"
d5970055 19#include "hw/hw.h"
5444e768 20#include "qemu/atomic.h"
1de7afc9 21#include "qemu/range.h"
04b7a152 22#include "qemu/error-report.h"
15324404 23#include "qemu/memfd.h"
11078ae3 24#include <linux/vhost.h>
022c62cb 25#include "exec/address-spaces.h"
1c819449 26#include "hw/virtio/virtio-bus.h"
04b7a152 27#include "hw/virtio/virtio-access.h"
7145872e 28#include "migration/migration.h"
d5970055 29
162bba7f
MAL
30/* enabled until disconnected backend stabilizes */
31#define _VHOST_DEBUG 1
32
33#ifdef _VHOST_DEBUG
34#define VHOST_OPS_DEBUG(fmt, ...) \
35 do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
36 strerror(errno), errno); } while (0)
37#else
38#define VHOST_OPS_DEBUG(fmt, ...) \
39 do { } while (0)
40#endif
41
309750fa 42static struct vhost_log *vhost_log;
15324404 43static struct vhost_log *vhost_log_shm;
309750fa 44
2ce68e4c
IM
45static unsigned int used_memslots;
46static QLIST_HEAD(, vhost_dev) vhost_devices =
47 QLIST_HEAD_INITIALIZER(vhost_devices);
48
49bool vhost_has_free_slot(void)
50{
51 unsigned int slots_limit = ~0U;
52 struct vhost_dev *hdev;
53
54 QLIST_FOREACH(hdev, &vhost_devices, entry) {
55 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
56 slots_limit = MIN(slots_limit, r);
57 }
58 return slots_limit > used_memslots;
59}
60
d5970055 61static void vhost_dev_sync_region(struct vhost_dev *dev,
2817b260 62 MemoryRegionSection *section,
d5970055
MT
63 uint64_t mfirst, uint64_t mlast,
64 uint64_t rfirst, uint64_t rlast)
65{
309750fa
JW
66 vhost_log_chunk_t *log = dev->log->log;
67
d5970055
MT
68 uint64_t start = MAX(mfirst, rfirst);
69 uint64_t end = MIN(mlast, rlast);
309750fa
JW
70 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
71 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
d5970055
MT
72 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
73
d5970055
MT
74 if (end < start) {
75 return;
76 }
e314672a 77 assert(end / VHOST_LOG_CHUNK < dev->log_size);
fbbaf9ae 78 assert(start / VHOST_LOG_CHUNK < dev->log_size);
e314672a 79
d5970055
MT
80 for (;from < to; ++from) {
81 vhost_log_chunk_t log;
d5970055
MT
82 /* We first check with non-atomic: much cheaper,
83 * and we expect non-dirty to be the common case. */
84 if (!*from) {
0c600ce2 85 addr += VHOST_LOG_CHUNK;
d5970055
MT
86 continue;
87 }
5444e768
PB
88 /* Data must be read atomically. We don't really need barrier semantics
89 * but it's easier to use atomic_* than roll our own. */
90 log = atomic_xchg(from, 0);
747eb78b
NC
91 while (log) {
92 int bit = ctzl(log);
6b37a23d
MT
93 hwaddr page_addr;
94 hwaddr section_offset;
95 hwaddr mr_offset;
6b37a23d
MT
96 page_addr = addr + bit * VHOST_LOG_PAGE;
97 section_offset = page_addr - section->offset_within_address_space;
98 mr_offset = section_offset + section->offset_within_region;
99 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
d5970055
MT
100 log &= ~(0x1ull << bit);
101 }
102 addr += VHOST_LOG_CHUNK;
103 }
104}
105
04097f7c 106static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
2817b260 107 MemoryRegionSection *section,
6b37a23d
MT
108 hwaddr first,
109 hwaddr last)
d5970055 110{
d5970055 111 int i;
6b37a23d
MT
112 hwaddr start_addr;
113 hwaddr end_addr;
04097f7c 114
d5970055
MT
115 if (!dev->log_enabled || !dev->started) {
116 return 0;
117 }
6b37a23d 118 start_addr = section->offset_within_address_space;
052e87b0 119 end_addr = range_get_last(start_addr, int128_get64(section->size));
6b37a23d
MT
120 start_addr = MAX(first, start_addr);
121 end_addr = MIN(last, end_addr);
122
d5970055
MT
123 for (i = 0; i < dev->mem->nregions; ++i) {
124 struct vhost_memory_region *reg = dev->mem->regions + i;
2817b260 125 vhost_dev_sync_region(dev, section, start_addr, end_addr,
d5970055
MT
126 reg->guest_phys_addr,
127 range_get_last(reg->guest_phys_addr,
128 reg->memory_size));
129 }
130 for (i = 0; i < dev->nvqs; ++i) {
131 struct vhost_virtqueue *vq = dev->vqs + i;
2817b260 132 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
d5970055
MT
133 range_get_last(vq->used_phys, vq->used_size));
134 }
135 return 0;
136}
137
04097f7c
AK
138static void vhost_log_sync(MemoryListener *listener,
139 MemoryRegionSection *section)
140{
141 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
142 memory_listener);
6b37a23d
MT
143 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
144}
04097f7c 145
6b37a23d
MT
146static void vhost_log_sync_range(struct vhost_dev *dev,
147 hwaddr first, hwaddr last)
148{
149 int i;
150 /* FIXME: this is N^2 in number of sections */
151 for (i = 0; i < dev->n_mem_sections; ++i) {
152 MemoryRegionSection *section = &dev->mem_sections[i];
153 vhost_sync_dirty_bitmap(dev, section, first, last);
154 }
04097f7c
AK
155}
156
d5970055
MT
157/* Assign/unassign. Keep an unsorted array of non-overlapping
158 * memory regions in dev->mem. */
159static void vhost_dev_unassign_memory(struct vhost_dev *dev,
160 uint64_t start_addr,
161 uint64_t size)
162{
163 int from, to, n = dev->mem->nregions;
164 /* Track overlapping/split regions for sanity checking. */
165 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
166
167 for (from = 0, to = 0; from < n; ++from, ++to) {
168 struct vhost_memory_region *reg = dev->mem->regions + to;
169 uint64_t reglast;
170 uint64_t memlast;
171 uint64_t change;
172
173 /* clone old region */
174 if (to != from) {
175 memcpy(reg, dev->mem->regions + from, sizeof *reg);
176 }
177
178 /* No overlap is simple */
179 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
180 start_addr, size)) {
181 continue;
182 }
183
184 /* Split only happens if supplied region
185 * is in the middle of an existing one. Thus it can not
186 * overlap with any other existing region. */
187 assert(!split);
188
189 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
190 memlast = range_get_last(start_addr, size);
191
192 /* Remove whole region */
193 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
194 --dev->mem->nregions;
195 --to;
d5970055
MT
196 ++overlap_middle;
197 continue;
198 }
199
200 /* Shrink region */
201 if (memlast >= reglast) {
202 reg->memory_size = start_addr - reg->guest_phys_addr;
203 assert(reg->memory_size);
204 assert(!overlap_end);
205 ++overlap_end;
206 continue;
207 }
208
209 /* Shift region */
210 if (start_addr <= reg->guest_phys_addr) {
211 change = memlast + 1 - reg->guest_phys_addr;
212 reg->memory_size -= change;
213 reg->guest_phys_addr += change;
214 reg->userspace_addr += change;
215 assert(reg->memory_size);
216 assert(!overlap_start);
217 ++overlap_start;
218 continue;
219 }
220
221 /* This only happens if supplied region
222 * is in the middle of an existing one. Thus it can not
223 * overlap with any other existing region. */
224 assert(!overlap_start);
225 assert(!overlap_end);
226 assert(!overlap_middle);
227 /* Split region: shrink first part, shift second part. */
228 memcpy(dev->mem->regions + n, reg, sizeof *reg);
229 reg->memory_size = start_addr - reg->guest_phys_addr;
230 assert(reg->memory_size);
231 change = memlast + 1 - reg->guest_phys_addr;
232 reg = dev->mem->regions + n;
233 reg->memory_size -= change;
234 assert(reg->memory_size);
235 reg->guest_phys_addr += change;
236 reg->userspace_addr += change;
237 /* Never add more than 1 region */
238 assert(dev->mem->nregions == n);
239 ++dev->mem->nregions;
240 ++split;
241 }
242}
243
244/* Called after unassign, so no regions overlap the given range. */
245static void vhost_dev_assign_memory(struct vhost_dev *dev,
246 uint64_t start_addr,
247 uint64_t size,
248 uint64_t uaddr)
249{
250 int from, to;
251 struct vhost_memory_region *merged = NULL;
252 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
253 struct vhost_memory_region *reg = dev->mem->regions + to;
254 uint64_t prlast, urlast;
255 uint64_t pmlast, umlast;
256 uint64_t s, e, u;
257
258 /* clone old region */
259 if (to != from) {
260 memcpy(reg, dev->mem->regions + from, sizeof *reg);
261 }
262 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
263 pmlast = range_get_last(start_addr, size);
264 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
265 umlast = range_get_last(uaddr, size);
266
267 /* check for overlapping regions: should never happen. */
268 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
269 /* Not an adjacent or overlapping region - do not merge. */
270 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
271 (pmlast + 1 != reg->guest_phys_addr ||
272 umlast + 1 != reg->userspace_addr)) {
273 continue;
274 }
275
ffe42cc1
MT
276 if (dev->vhost_ops->vhost_backend_can_merge &&
277 !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
278 reg->userspace_addr,
279 reg->memory_size)) {
280 continue;
281 }
282
d5970055
MT
283 if (merged) {
284 --to;
285 assert(to >= 0);
286 } else {
287 merged = reg;
288 }
289 u = MIN(uaddr, reg->userspace_addr);
290 s = MIN(start_addr, reg->guest_phys_addr);
291 e = MAX(pmlast, prlast);
292 uaddr = merged->userspace_addr = u;
293 start_addr = merged->guest_phys_addr = s;
294 size = merged->memory_size = e - s + 1;
295 assert(merged->memory_size);
296 }
297
298 if (!merged) {
299 struct vhost_memory_region *reg = dev->mem->regions + to;
300 memset(reg, 0, sizeof *reg);
301 reg->memory_size = size;
302 assert(reg->memory_size);
303 reg->guest_phys_addr = start_addr;
304 reg->userspace_addr = uaddr;
305 ++to;
306 }
307 assert(to <= dev->mem->nregions + 1);
308 dev->mem->nregions = to;
309}
310
311static uint64_t vhost_get_log_size(struct vhost_dev *dev)
312{
313 uint64_t log_size = 0;
314 int i;
315 for (i = 0; i < dev->mem->nregions; ++i) {
316 struct vhost_memory_region *reg = dev->mem->regions + i;
317 uint64_t last = range_get_last(reg->guest_phys_addr,
318 reg->memory_size);
319 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
320 }
321 for (i = 0; i < dev->nvqs; ++i) {
322 struct vhost_virtqueue *vq = dev->vqs + i;
323 uint64_t last = vq->used_phys + vq->used_size - 1;
324 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
325 }
326 return log_size;
327}
15324404
MAL
328
329static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
309750fa 330{
15324404
MAL
331 struct vhost_log *log;
332 uint64_t logsize = size * sizeof(*(log->log));
333 int fd = -1;
334
335 log = g_new0(struct vhost_log, 1);
336 if (share) {
337 log->log = qemu_memfd_alloc("vhost-log", logsize,
338 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
339 &fd);
340 memset(log->log, 0, logsize);
341 } else {
342 log->log = g_malloc0(logsize);
343 }
309750fa
JW
344
345 log->size = size;
346 log->refcnt = 1;
15324404 347 log->fd = fd;
309750fa
JW
348
349 return log;
350}
351
15324404 352static struct vhost_log *vhost_log_get(uint64_t size, bool share)
309750fa 353{
15324404
MAL
354 struct vhost_log *log = share ? vhost_log_shm : vhost_log;
355
356 if (!log || log->size != size) {
357 log = vhost_log_alloc(size, share);
358 if (share) {
359 vhost_log_shm = log;
360 } else {
361 vhost_log = log;
362 }
309750fa 363 } else {
15324404 364 ++log->refcnt;
309750fa
JW
365 }
366
15324404 367 return log;
309750fa
JW
368}
369
370static void vhost_log_put(struct vhost_dev *dev, bool sync)
371{
372 struct vhost_log *log = dev->log;
373
374 if (!log) {
375 return;
376 }
9e0bc24f
MAL
377 dev->log = NULL;
378 dev->log_size = 0;
309750fa
JW
379
380 --log->refcnt;
381 if (log->refcnt == 0) {
382 /* Sync only the range covered by the old log */
383 if (dev->log_size && sync) {
384 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
385 }
15324404 386
309750fa 387 if (vhost_log == log) {
15324404 388 g_free(log->log);
309750fa 389 vhost_log = NULL;
15324404
MAL
390 } else if (vhost_log_shm == log) {
391 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
392 log->fd);
393 vhost_log_shm = NULL;
309750fa 394 }
15324404 395
309750fa
JW
396 g_free(log);
397 }
398}
d5970055 399
15324404
MAL
400static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
401{
402 return dev->vhost_ops->vhost_requires_shm_log &&
403 dev->vhost_ops->vhost_requires_shm_log(dev);
404}
405
406static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
d5970055 407{
15324404 408 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
309750fa 409 uint64_t log_base = (uintptr_t)log->log;
6b37a23d 410 int r;
6528499f 411
636f4ddd
MAL
412 /* inform backend of log switching, this must be done before
413 releasing the current log, to ensure no logging is lost */
9a78a5dd 414 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
162bba7f
MAL
415 if (r < 0) {
416 VHOST_OPS_DEBUG("vhost_set_log_base failed");
417 }
418
309750fa 419 vhost_log_put(dev, true);
d5970055
MT
420 dev->log = log;
421 dev->log_size = size;
422}
423
424static int vhost_verify_ring_mappings(struct vhost_dev *dev,
425 uint64_t start_addr,
426 uint64_t size)
427{
428 int i;
8617343f
MT
429 int r = 0;
430
431 for (i = 0; !r && i < dev->nvqs; ++i) {
d5970055 432 struct vhost_virtqueue *vq = dev->vqs + i;
a8170e5e 433 hwaddr l;
d5970055
MT
434 void *p;
435
436 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
437 continue;
438 }
439 l = vq->ring_size;
440 p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
441 if (!p || l != vq->ring_size) {
4afba631 442 error_report("Unable to map ring buffer for ring %d", i);
8617343f 443 r = -ENOMEM;
d5970055
MT
444 }
445 if (p != vq->ring) {
4afba631 446 error_report("Ring buffer relocated for ring %d", i);
8617343f 447 r = -EBUSY;
d5970055
MT
448 }
449 cpu_physical_memory_unmap(p, l, 0, 0);
450 }
8617343f 451 return r;
d5970055
MT
452}
453
4e789564
MT
454static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
455 uint64_t start_addr,
456 uint64_t size)
457{
458 int i, n = dev->mem->nregions;
459 for (i = 0; i < n; ++i) {
460 struct vhost_memory_region *reg = dev->mem->regions + i;
461 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
462 start_addr, size)) {
463 return reg;
464 }
465 }
466 return NULL;
467}
468
469static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
470 uint64_t start_addr,
471 uint64_t size,
472 uint64_t uaddr)
473{
474 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
475 uint64_t reglast;
476 uint64_t memlast;
477
478 if (!reg) {
479 return true;
480 }
481
482 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
483 memlast = range_get_last(start_addr, size);
484
485 /* Need to extend region? */
486 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
487 return true;
488 }
489 /* userspace_addr changed? */
490 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
491}
492
04097f7c
AK
493static void vhost_set_memory(MemoryListener *listener,
494 MemoryRegionSection *section,
495 bool add)
d5970055 496{
04097f7c
AK
497 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
498 memory_listener);
a8170e5e 499 hwaddr start_addr = section->offset_within_address_space;
052e87b0 500 ram_addr_t size = int128_get64(section->size);
2d1a35be
PB
501 bool log_dirty =
502 memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
d5970055
MT
503 int s = offsetof(struct vhost_memory, regions) +
504 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
04097f7c
AK
505 void *ram;
506
7267c094 507 dev->mem = g_realloc(dev->mem, s);
d5970055 508
f5a4e64f 509 if (log_dirty) {
04097f7c 510 add = false;
f5a4e64f
MT
511 }
512
d5970055
MT
513 assert(size);
514
4e789564 515 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
d743c382 516 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
04097f7c
AK
517 if (add) {
518 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
4e789564
MT
519 /* Region exists with same address. Nothing to do. */
520 return;
521 }
522 } else {
523 if (!vhost_dev_find_reg(dev, start_addr, size)) {
524 /* Removing region that we don't access. Nothing to do. */
525 return;
526 }
527 }
528
d5970055 529 vhost_dev_unassign_memory(dev, start_addr, size);
04097f7c 530 if (add) {
d5970055 531 /* Add given mapping, merging adjacent regions if any */
04097f7c 532 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
d5970055
MT
533 } else {
534 /* Remove old mapping for this memory, if any. */
535 vhost_dev_unassign_memory(dev, start_addr, size);
536 }
af603142
NB
537 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
538 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
539 dev->memory_changed = true;
2ce68e4c 540 used_memslots = dev->mem->nregions;
af603142
NB
541}
542
543static bool vhost_section(MemoryRegionSection *section)
544{
545 return memory_region_is_ram(section->mr);
546}
547
548static void vhost_begin(MemoryListener *listener)
549{
550 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
551 memory_listener);
552 dev->mem_changed_end_addr = 0;
553 dev->mem_changed_start_addr = -1;
554}
d5970055 555
af603142
NB
556static void vhost_commit(MemoryListener *listener)
557{
558 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
559 memory_listener);
560 hwaddr start_addr = 0;
561 ram_addr_t size = 0;
562 uint64_t log_size;
563 int r;
564
565 if (!dev->memory_changed) {
566 return;
567 }
d5970055
MT
568 if (!dev->started) {
569 return;
570 }
af603142
NB
571 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
572 return;
573 }
d5970055
MT
574
575 if (dev->started) {
af603142
NB
576 start_addr = dev->mem_changed_start_addr;
577 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
578
d5970055
MT
579 r = vhost_verify_ring_mappings(dev, start_addr, size);
580 assert(r >= 0);
581 }
582
583 if (!dev->log_enabled) {
21e70425 584 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
162bba7f
MAL
585 if (r < 0) {
586 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
587 }
af603142 588 dev->memory_changed = false;
d5970055
MT
589 return;
590 }
591 log_size = vhost_get_log_size(dev);
592 /* We allocate an extra 4K bytes to log,
593 * to reduce the * number of reallocations. */
594#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
595 /* To log more, must increase log size before table update. */
596 if (dev->log_size < log_size) {
597 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
598 }
21e70425 599 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
162bba7f
MAL
600 if (r < 0) {
601 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
602 }
d5970055
MT
603 /* To log less, can only decrease log size after table update. */
604 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
605 vhost_dev_log_resize(dev, log_size);
606 }
af603142 607 dev->memory_changed = false;
50c1e149
AK
608}
609
04097f7c
AK
610static void vhost_region_add(MemoryListener *listener,
611 MemoryRegionSection *section)
612{
2817b260
AK
613 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
614 memory_listener);
615
c49450b9
AK
616 if (!vhost_section(section)) {
617 return;
618 }
619
2817b260
AK
620 ++dev->n_mem_sections;
621 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
622 dev->n_mem_sections);
623 dev->mem_sections[dev->n_mem_sections - 1] = *section;
dfde4e6e 624 memory_region_ref(section->mr);
04097f7c
AK
625 vhost_set_memory(listener, section, true);
626}
627
628static void vhost_region_del(MemoryListener *listener,
629 MemoryRegionSection *section)
630{
2817b260
AK
631 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
632 memory_listener);
633 int i;
634
c49450b9
AK
635 if (!vhost_section(section)) {
636 return;
637 }
638
04097f7c 639 vhost_set_memory(listener, section, false);
dfde4e6e 640 memory_region_unref(section->mr);
2817b260
AK
641 for (i = 0; i < dev->n_mem_sections; ++i) {
642 if (dev->mem_sections[i].offset_within_address_space
643 == section->offset_within_address_space) {
644 --dev->n_mem_sections;
645 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
637f7a6a 646 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
2817b260
AK
647 break;
648 }
649 }
04097f7c
AK
650}
651
50c1e149
AK
652static void vhost_region_nop(MemoryListener *listener,
653 MemoryRegionSection *section)
654{
655}
656
d5970055
MT
657static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
658 struct vhost_virtqueue *vq,
659 unsigned idx, bool enable_log)
660{
661 struct vhost_vring_addr addr = {
662 .index = idx,
2b3af999
SW
663 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
664 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
665 .used_user_addr = (uint64_t)(unsigned long)vq->used,
d5970055
MT
666 .log_guest_addr = vq->used_phys,
667 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
668 };
21e70425 669 int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
d5970055 670 if (r < 0) {
c6409692 671 VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
d5970055
MT
672 return -errno;
673 }
674 return 0;
675}
676
677static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
678{
679 uint64_t features = dev->acked_features;
680 int r;
681 if (enable_log) {
9a2ba823 682 features |= 0x1ULL << VHOST_F_LOG_ALL;
d5970055 683 }
21e70425 684 r = dev->vhost_ops->vhost_set_features(dev, features);
c6409692
MAL
685 if (r < 0) {
686 VHOST_OPS_DEBUG("vhost_set_features failed");
687 }
d5970055
MT
688 return r < 0 ? -errno : 0;
689}
690
691static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
692{
162bba7f 693 int r, i, idx;
d5970055
MT
694 r = vhost_dev_set_features(dev, enable_log);
695 if (r < 0) {
696 goto err_features;
697 }
698 for (i = 0; i < dev->nvqs; ++i) {
25a2a920
TC
699 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
700 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
d5970055
MT
701 enable_log);
702 if (r < 0) {
703 goto err_vq;
704 }
705 }
706 return 0;
707err_vq:
708 for (; i >= 0; --i) {
25a2a920 709 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
162bba7f
MAL
710 vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
711 dev->log_enabled);
d5970055 712 }
162bba7f 713 vhost_dev_set_features(dev, dev->log_enabled);
d5970055
MT
714err_features:
715 return r;
716}
717
04097f7c 718static int vhost_migration_log(MemoryListener *listener, int enable)
d5970055 719{
04097f7c
AK
720 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
721 memory_listener);
d5970055
MT
722 int r;
723 if (!!enable == dev->log_enabled) {
724 return 0;
725 }
726 if (!dev->started) {
727 dev->log_enabled = enable;
728 return 0;
729 }
730 if (!enable) {
731 r = vhost_dev_set_log(dev, false);
732 if (r < 0) {
733 return r;
734 }
309750fa 735 vhost_log_put(dev, false);
d5970055
MT
736 } else {
737 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
738 r = vhost_dev_set_log(dev, true);
739 if (r < 0) {
740 return r;
741 }
742 }
743 dev->log_enabled = enable;
744 return 0;
745}
746
04097f7c
AK
747static void vhost_log_global_start(MemoryListener *listener)
748{
749 int r;
750
751 r = vhost_migration_log(listener, true);
752 if (r < 0) {
753 abort();
754 }
755}
756
757static void vhost_log_global_stop(MemoryListener *listener)
758{
759 int r;
760
761 r = vhost_migration_log(listener, false);
762 if (r < 0) {
763 abort();
764 }
765}
766
767static void vhost_log_start(MemoryListener *listener,
b2dfd71c
PB
768 MemoryRegionSection *section,
769 int old, int new)
04097f7c
AK
770{
771 /* FIXME: implement */
772}
773
774static void vhost_log_stop(MemoryListener *listener,
b2dfd71c
PB
775 MemoryRegionSection *section,
776 int old, int new)
04097f7c
AK
777{
778 /* FIXME: implement */
779}
780
46f70ff1
GK
781/* The vhost driver natively knows how to handle the vrings of non
782 * cross-endian legacy devices and modern devices. Only legacy devices
783 * exposed to a bi-endian guest may require the vhost driver to use a
784 * specific endianness.
785 */
a122ab24
GK
786static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
787{
e5848123
GK
788 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
789 return false;
790 }
a122ab24 791#ifdef HOST_WORDS_BIGENDIAN
46f70ff1 792 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
a122ab24 793#else
46f70ff1 794 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
a122ab24 795#endif
a122ab24
GK
796}
797
04b7a152
GK
798static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
799 bool is_big_endian,
800 int vhost_vq_index)
801{
802 struct vhost_vring_state s = {
803 .index = vhost_vq_index,
804 .num = is_big_endian
805 };
806
21e70425 807 if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
04b7a152
GK
808 return 0;
809 }
810
c6409692 811 VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
04b7a152
GK
812 if (errno == ENOTTY) {
813 error_report("vhost does not support cross-endian");
814 return -ENOSYS;
815 }
816
817 return -errno;
818}
819
f56a1247 820static int vhost_virtqueue_start(struct vhost_dev *dev,
d5970055
MT
821 struct VirtIODevice *vdev,
822 struct vhost_virtqueue *vq,
823 unsigned idx)
824{
a8170e5e 825 hwaddr s, l, a;
d5970055 826 int r;
21e70425 827 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
d5970055 828 struct vhost_vring_file file = {
a9f98bb5 829 .index = vhost_vq_index
d5970055
MT
830 };
831 struct vhost_vring_state state = {
a9f98bb5 832 .index = vhost_vq_index
d5970055
MT
833 };
834 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
835
a9f98bb5 836
d5970055 837 vq->num = state.num = virtio_queue_get_num(vdev, idx);
21e70425 838 r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
d5970055 839 if (r) {
c6409692 840 VHOST_OPS_DEBUG("vhost_set_vring_num failed");
d5970055
MT
841 return -errno;
842 }
843
844 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
21e70425 845 r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
d5970055 846 if (r) {
c6409692 847 VHOST_OPS_DEBUG("vhost_set_vring_base failed");
d5970055
MT
848 return -errno;
849 }
850
e5848123 851 if (vhost_needs_vring_endian(vdev)) {
04b7a152
GK
852 r = vhost_virtqueue_set_vring_endian_legacy(dev,
853 virtio_is_big_endian(vdev),
854 vhost_vq_index);
855 if (r) {
856 return -errno;
857 }
858 }
859
d5970055
MT
860 s = l = virtio_queue_get_desc_size(vdev, idx);
861 a = virtio_queue_get_desc_addr(vdev, idx);
862 vq->desc = cpu_physical_memory_map(a, &l, 0);
863 if (!vq->desc || l != s) {
864 r = -ENOMEM;
865 goto fail_alloc_desc;
866 }
867 s = l = virtio_queue_get_avail_size(vdev, idx);
868 a = virtio_queue_get_avail_addr(vdev, idx);
869 vq->avail = cpu_physical_memory_map(a, &l, 0);
870 if (!vq->avail || l != s) {
871 r = -ENOMEM;
872 goto fail_alloc_avail;
873 }
874 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
875 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
876 vq->used = cpu_physical_memory_map(a, &l, 1);
877 if (!vq->used || l != s) {
878 r = -ENOMEM;
879 goto fail_alloc_used;
880 }
881
882 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
883 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
884 vq->ring = cpu_physical_memory_map(a, &l, 1);
885 if (!vq->ring || l != s) {
886 r = -ENOMEM;
887 goto fail_alloc_ring;
888 }
889
a9f98bb5 890 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
d5970055
MT
891 if (r < 0) {
892 r = -errno;
893 goto fail_alloc;
894 }
a9f98bb5 895
d5970055 896 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
21e70425 897 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
d5970055 898 if (r) {
c6409692 899 VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
c8852121 900 r = -errno;
d5970055
MT
901 goto fail_kick;
902 }
903
f56a1247
MT
904 /* Clear and discard previous events if any. */
905 event_notifier_test_and_clear(&vq->masked_notifier);
d5970055 906
5669655a
VK
907 /* Init vring in unmasked state, unless guest_notifier_mask
908 * will do it later.
909 */
910 if (!vdev->use_guest_notifier_mask) {
911 /* TODO: check and handle errors. */
912 vhost_virtqueue_mask(dev, vdev, idx, false);
913 }
914
d5970055
MT
915 return 0;
916
d5970055 917fail_kick:
d5970055
MT
918fail_alloc:
919 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
920 0, 0);
921fail_alloc_ring:
922 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
923 0, 0);
924fail_alloc_used:
925 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
926 0, 0);
927fail_alloc_avail:
928 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
929 0, 0);
930fail_alloc_desc:
931 return r;
932}
933
f56a1247 934static void vhost_virtqueue_stop(struct vhost_dev *dev,
d5970055
MT
935 struct VirtIODevice *vdev,
936 struct vhost_virtqueue *vq,
937 unsigned idx)
938{
21e70425 939 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
d5970055 940 struct vhost_vring_state state = {
04b7a152 941 .index = vhost_vq_index,
d5970055
MT
942 };
943 int r;
fc57fd99 944
21e70425 945 r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
d5970055 946 if (r < 0) {
c6409692 947 VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
d5970055
MT
948 }
949 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
3561ba14 950 virtio_queue_invalidate_signalled_used(vdev, idx);
04b7a152
GK
951
952 /* In the cross-endian case, we need to reset the vring endianness to
953 * native as legacy devices expect so by default.
954 */
e5848123 955 if (vhost_needs_vring_endian(vdev)) {
162bba7f
MAL
956 vhost_virtqueue_set_vring_endian_legacy(dev,
957 !virtio_is_big_endian(vdev),
958 vhost_vq_index);
04b7a152
GK
959 }
960
d5970055
MT
961 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
962 0, virtio_queue_get_ring_size(vdev, idx));
963 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
964 1, virtio_queue_get_used_size(vdev, idx));
965 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
966 0, virtio_queue_get_avail_size(vdev, idx));
967 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
968 0, virtio_queue_get_desc_size(vdev, idx));
969}
970
80a1ea37
AK
971static void vhost_eventfd_add(MemoryListener *listener,
972 MemoryRegionSection *section,
753d5e14 973 bool match_data, uint64_t data, EventNotifier *e)
80a1ea37
AK
974{
975}
976
977static void vhost_eventfd_del(MemoryListener *listener,
978 MemoryRegionSection *section,
753d5e14 979 bool match_data, uint64_t data, EventNotifier *e)
80a1ea37
AK
980{
981}
982
69e87b32
JW
983static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
984 int n, uint32_t timeout)
985{
986 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
987 struct vhost_vring_state state = {
988 .index = vhost_vq_index,
989 .num = timeout,
990 };
991 int r;
992
993 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
994 return -EINVAL;
995 }
996
997 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
998 if (r) {
c6409692 999 VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
69e87b32
JW
1000 return r;
1001 }
1002
1003 return 0;
1004}
1005
f56a1247
MT
1006static int vhost_virtqueue_init(struct vhost_dev *dev,
1007 struct vhost_virtqueue *vq, int n)
1008{
21e70425 1009 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
f56a1247 1010 struct vhost_vring_file file = {
b931bfbf 1011 .index = vhost_vq_index,
f56a1247
MT
1012 };
1013 int r = event_notifier_init(&vq->masked_notifier, 0);
1014 if (r < 0) {
1015 return r;
1016 }
1017
1018 file.fd = event_notifier_get_fd(&vq->masked_notifier);
21e70425 1019 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
f56a1247 1020 if (r) {
c6409692 1021 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
f56a1247
MT
1022 r = -errno;
1023 goto fail_call;
1024 }
1025 return 0;
1026fail_call:
1027 event_notifier_cleanup(&vq->masked_notifier);
1028 return r;
1029}
1030
1031static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1032{
1033 event_notifier_cleanup(&vq->masked_notifier);
1034}
1035
81647a65 1036int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
69e87b32 1037 VhostBackendType backend_type, uint32_t busyloop_timeout)
d5970055
MT
1038{
1039 uint64_t features;
a06db3ec 1040 int i, r, n_initialized_vqs = 0;
81647a65 1041
d2fc4402
MAL
1042 hdev->migration_blocker = NULL;
1043
7cb8a9b9
MAL
1044 r = vhost_set_backend_type(hdev, backend_type);
1045 assert(r >= 0);
1a1bfac9 1046
7cb8a9b9
MAL
1047 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1048 if (r < 0) {
1049 goto fail;
24d1eb33
NN
1050 }
1051
aebf8168 1052 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
4afba631
MAL
1053 error_report("vhost backend memory slots limit is less"
1054 " than current number of present memory slots");
7cb8a9b9
MAL
1055 r = -1;
1056 goto fail;
aebf8168 1057 }
2ce68e4c 1058
21e70425 1059 r = hdev->vhost_ops->vhost_set_owner(hdev);
d5970055 1060 if (r < 0) {
c6409692 1061 VHOST_OPS_DEBUG("vhost_set_owner failed");
d5970055
MT
1062 goto fail;
1063 }
1064
21e70425 1065 r = hdev->vhost_ops->vhost_get_features(hdev, &features);
d5970055 1066 if (r < 0) {
c6409692 1067 VHOST_OPS_DEBUG("vhost_get_features failed");
d5970055
MT
1068 goto fail;
1069 }
f56a1247 1070
a06db3ec 1071 for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
b931bfbf 1072 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
f56a1247 1073 if (r < 0) {
a06db3ec 1074 goto fail;
f56a1247
MT
1075 }
1076 }
69e87b32
JW
1077
1078 if (busyloop_timeout) {
1079 for (i = 0; i < hdev->nvqs; ++i) {
1080 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1081 busyloop_timeout);
1082 if (r < 0) {
1083 goto fail_busyloop;
1084 }
1085 }
1086 }
1087
d5970055
MT
1088 hdev->features = features;
1089
04097f7c 1090 hdev->memory_listener = (MemoryListener) {
50c1e149
AK
1091 .begin = vhost_begin,
1092 .commit = vhost_commit,
04097f7c
AK
1093 .region_add = vhost_region_add,
1094 .region_del = vhost_region_del,
50c1e149 1095 .region_nop = vhost_region_nop,
04097f7c
AK
1096 .log_start = vhost_log_start,
1097 .log_stop = vhost_log_stop,
1098 .log_sync = vhost_log_sync,
1099 .log_global_start = vhost_log_global_start,
1100 .log_global_stop = vhost_log_global_stop,
80a1ea37
AK
1101 .eventfd_add = vhost_eventfd_add,
1102 .eventfd_del = vhost_eventfd_del,
72e22d2f 1103 .priority = 10
04097f7c 1104 };
d2fc4402
MAL
1105
1106 if (hdev->migration_blocker == NULL) {
1107 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1108 error_setg(&hdev->migration_blocker,
1109 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
31190ed7
MAL
1110 } else if (!qemu_memfd_check()) {
1111 error_setg(&hdev->migration_blocker,
1112 "Migration disabled: failed to allocate shared memory");
d2fc4402
MAL
1113 }
1114 }
1115
1116 if (hdev->migration_blocker != NULL) {
7145872e
MT
1117 migrate_add_blocker(hdev->migration_blocker);
1118 }
d2fc4402 1119
7267c094 1120 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
2817b260
AK
1121 hdev->n_mem_sections = 0;
1122 hdev->mem_sections = NULL;
d5970055
MT
1123 hdev->log = NULL;
1124 hdev->log_size = 0;
1125 hdev->log_enabled = false;
1126 hdev->started = false;
af603142 1127 hdev->memory_changed = false;
f6790af6 1128 memory_listener_register(&hdev->memory_listener, &address_space_memory);
5be5f9be 1129 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
d5970055 1130 return 0;
a06db3ec 1131
69e87b32
JW
1132fail_busyloop:
1133 while (--i >= 0) {
1134 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1135 }
d5970055 1136fail:
a06db3ec
MAL
1137 hdev->nvqs = n_initialized_vqs;
1138 vhost_dev_cleanup(hdev);
d5970055
MT
1139 return r;
1140}
1141
1142void vhost_dev_cleanup(struct vhost_dev *hdev)
1143{
f56a1247 1144 int i;
e0547b59 1145
f56a1247
MT
1146 for (i = 0; i < hdev->nvqs; ++i) {
1147 vhost_virtqueue_cleanup(hdev->vqs + i);
1148 }
5be5f9be
MAL
1149 if (hdev->mem) {
1150 /* those are only safe after successful init */
1151 memory_listener_unregister(&hdev->memory_listener);
1152 QLIST_REMOVE(hdev, entry);
1153 }
7145872e
MT
1154 if (hdev->migration_blocker) {
1155 migrate_del_blocker(hdev->migration_blocker);
1156 error_free(hdev->migration_blocker);
1157 }
7267c094 1158 g_free(hdev->mem);
2817b260 1159 g_free(hdev->mem_sections);
e0547b59
MAL
1160 if (hdev->vhost_ops) {
1161 hdev->vhost_ops->vhost_backend_cleanup(hdev);
1162 }
7b527247 1163 assert(!hdev->log);
e0547b59
MAL
1164
1165 memset(hdev, 0, sizeof(struct vhost_dev));
d5970055
MT
1166}
1167
b0b3db79
MT
1168/* Stop processing guest IO notifications in qemu.
1169 * Start processing them in vhost in kernel.
1170 */
1171int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1172{
1c819449
FK
1173 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1174 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1175 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
16617e36 1176 int i, r, e;
4afba631 1177
21a4d962 1178 if (!k->ioeventfd_started) {
4afba631 1179 error_report("binding does not support host notifiers");
b0b3db79
MT
1180 r = -ENOSYS;
1181 goto fail;
1182 }
1183
1184 for (i = 0; i < hdev->nvqs; ++i) {
b1f0a33d
CH
1185 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1186 true);
b0b3db79 1187 if (r < 0) {
4afba631 1188 error_report("vhost VQ %d notifier binding failed: %d", i, -r);
b0b3db79
MT
1189 goto fail_vq;
1190 }
1191 }
1192
1193 return 0;
1194fail_vq:
1195 while (--i >= 0) {
b1f0a33d
CH
1196 e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1197 false);
16617e36 1198 if (e < 0) {
4afba631 1199 error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
b0b3db79 1200 }
16617e36 1201 assert (e >= 0);
b0b3db79
MT
1202 }
1203fail:
1204 return r;
1205}
1206
1207/* Stop processing guest IO notifications in vhost.
1208 * Start processing them in qemu.
1209 * This might actually run the qemu handlers right away,
1210 * so virtio in qemu must be completely setup when this is called.
1211 */
1212void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1213{
1c819449 1214 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
b0b3db79
MT
1215 int i, r;
1216
1217 for (i = 0; i < hdev->nvqs; ++i) {
b1f0a33d
CH
1218 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1219 false);
b0b3db79 1220 if (r < 0) {
4afba631 1221 error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
b0b3db79
MT
1222 }
1223 assert (r >= 0);
1224 }
1225}
1226
f56a1247
MT
1227/* Test and clear event pending status.
1228 * Should be called after unmask to avoid losing events.
1229 */
1230bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1231{
a9f98bb5 1232 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
a9f98bb5 1233 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
f56a1247
MT
1234 return event_notifier_test_and_clear(&vq->masked_notifier);
1235}
1236
1237/* Mask/unmask events from this vq. */
1238void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1239 bool mask)
1240{
1241 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
a9f98bb5 1242 int r, index = n - hdev->vq_index;
fc57fd99 1243 struct vhost_vring_file file;
f56a1247 1244
8695de0f
MAL
1245 /* should only be called after backend is connected */
1246 assert(hdev->vhost_ops);
1247
f56a1247 1248 if (mask) {
5669655a 1249 assert(vdev->use_guest_notifier_mask);
a9f98bb5 1250 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
f56a1247
MT
1251 } else {
1252 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1253 }
fc57fd99 1254
21e70425
MAL
1255 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1256 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
162bba7f
MAL
1257 if (r < 0) {
1258 VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1259 }
f56a1247
MT
1260}
1261
9a2ba823
CH
1262uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1263 uint64_t features)
2e6d46d7
NN
1264{
1265 const int *bit = feature_bits;
1266 while (*bit != VHOST_INVALID_FEATURE_BIT) {
9a2ba823 1267 uint64_t bit_mask = (1ULL << *bit);
2e6d46d7
NN
1268 if (!(hdev->features & bit_mask)) {
1269 features &= ~bit_mask;
1270 }
1271 bit++;
1272 }
1273 return features;
1274}
1275
1276void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
9a2ba823 1277 uint64_t features)
2e6d46d7
NN
1278{
1279 const int *bit = feature_bits;
1280 while (*bit != VHOST_INVALID_FEATURE_BIT) {
9a2ba823 1281 uint64_t bit_mask = (1ULL << *bit);
2e6d46d7
NN
1282 if (features & bit_mask) {
1283 hdev->acked_features |= bit_mask;
1284 }
1285 bit++;
1286 }
1287}
1288
b0b3db79 1289/* Host notifiers must be enabled at this point. */
d5970055
MT
1290int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1291{
1292 int i, r;
24f4fe34 1293
8695de0f
MAL
1294 /* should only be called after backend is connected */
1295 assert(hdev->vhost_ops);
1296
24f4fe34
MT
1297 hdev->started = true;
1298
d5970055
MT
1299 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1300 if (r < 0) {
54dd9321 1301 goto fail_features;
d5970055 1302 }
21e70425 1303 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
d5970055 1304 if (r < 0) {
c6409692 1305 VHOST_OPS_DEBUG("vhost_set_mem_table failed");
d5970055 1306 r = -errno;
54dd9321 1307 goto fail_mem;
d5970055 1308 }
d154e0ba 1309 for (i = 0; i < hdev->nvqs; ++i) {
f56a1247 1310 r = vhost_virtqueue_start(hdev,
a9f98bb5
JW
1311 vdev,
1312 hdev->vqs + i,
1313 hdev->vq_index + i);
d154e0ba
MT
1314 if (r < 0) {
1315 goto fail_vq;
1316 }
1317 }
1318
d5970055 1319 if (hdev->log_enabled) {
e05ca820
MT
1320 uint64_t log_base;
1321
d5970055 1322 hdev->log_size = vhost_get_log_size(hdev);
15324404
MAL
1323 hdev->log = vhost_log_get(hdev->log_size,
1324 vhost_dev_log_is_shared(hdev));
309750fa 1325 log_base = (uintptr_t)hdev->log->log;
c2bea314 1326 r = hdev->vhost_ops->vhost_set_log_base(hdev,
9a78a5dd
MAL
1327 hdev->log_size ? log_base : 0,
1328 hdev->log);
d5970055 1329 if (r < 0) {
c6409692 1330 VHOST_OPS_DEBUG("vhost_set_log_base failed");
d5970055 1331 r = -errno;
54dd9321 1332 goto fail_log;
d5970055
MT
1333 }
1334 }
d154e0ba 1335
d5970055 1336 return 0;
54dd9321 1337fail_log:
24bfa207 1338 vhost_log_put(hdev, false);
d5970055
MT
1339fail_vq:
1340 while (--i >= 0) {
f56a1247 1341 vhost_virtqueue_stop(hdev,
a9f98bb5
JW
1342 vdev,
1343 hdev->vqs + i,
1344 hdev->vq_index + i);
d5970055 1345 }
a9f98bb5 1346 i = hdev->nvqs;
54dd9321
MT
1347fail_mem:
1348fail_features:
24f4fe34
MT
1349
1350 hdev->started = false;
d5970055
MT
1351 return r;
1352}
1353
b0b3db79 1354/* Host notifiers must be enabled at this point. */
d5970055
MT
1355void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1356{
a9f98bb5 1357 int i;
54dd9321 1358
8695de0f
MAL
1359 /* should only be called after backend is connected */
1360 assert(hdev->vhost_ops);
1361
d5970055 1362 for (i = 0; i < hdev->nvqs; ++i) {
f56a1247 1363 vhost_virtqueue_stop(hdev,
a9f98bb5
JW
1364 vdev,
1365 hdev->vqs + i,
1366 hdev->vq_index + i);
d5970055 1367 }
54dd9321 1368
309750fa 1369 vhost_log_put(hdev, true);
d5970055 1370 hdev->started = false;
d5970055 1371}
This page took 0.817466 seconds and 4 git commands to generate.