]> Git Repo - qemu.git/blame - hw/virtio/vhost.c
bios-linker-loader: document+validate input
[qemu.git] / hw / virtio / vhost.c
CommitLineData
d5970055
MT
1/*
2 * vhost support
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Authors:
7 * Michael S. Tsirkin <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
6b620ca3
PB
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
d5970055
MT
14 */
15
9b8bfe21 16#include "qemu/osdep.h"
0d09e41a 17#include "hw/virtio/vhost.h"
d5970055 18#include "hw/hw.h"
5444e768 19#include "qemu/atomic.h"
1de7afc9 20#include "qemu/range.h"
04b7a152 21#include "qemu/error-report.h"
15324404 22#include "qemu/memfd.h"
11078ae3 23#include <linux/vhost.h>
022c62cb 24#include "exec/address-spaces.h"
1c819449 25#include "hw/virtio/virtio-bus.h"
04b7a152 26#include "hw/virtio/virtio-access.h"
7145872e 27#include "migration/migration.h"
d5970055 28
309750fa 29static struct vhost_log *vhost_log;
15324404 30static struct vhost_log *vhost_log_shm;
309750fa 31
2ce68e4c
IM
32static unsigned int used_memslots;
33static QLIST_HEAD(, vhost_dev) vhost_devices =
34 QLIST_HEAD_INITIALIZER(vhost_devices);
35
36bool vhost_has_free_slot(void)
37{
38 unsigned int slots_limit = ~0U;
39 struct vhost_dev *hdev;
40
41 QLIST_FOREACH(hdev, &vhost_devices, entry) {
42 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
43 slots_limit = MIN(slots_limit, r);
44 }
45 return slots_limit > used_memslots;
46}
47
d5970055 48static void vhost_dev_sync_region(struct vhost_dev *dev,
2817b260 49 MemoryRegionSection *section,
d5970055
MT
50 uint64_t mfirst, uint64_t mlast,
51 uint64_t rfirst, uint64_t rlast)
52{
309750fa
JW
53 vhost_log_chunk_t *log = dev->log->log;
54
d5970055
MT
55 uint64_t start = MAX(mfirst, rfirst);
56 uint64_t end = MIN(mlast, rlast);
309750fa
JW
57 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
58 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
d5970055
MT
59 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
60
d5970055
MT
61 if (end < start) {
62 return;
63 }
e314672a 64 assert(end / VHOST_LOG_CHUNK < dev->log_size);
fbbaf9ae 65 assert(start / VHOST_LOG_CHUNK < dev->log_size);
e314672a 66
d5970055
MT
67 for (;from < to; ++from) {
68 vhost_log_chunk_t log;
d5970055
MT
69 /* We first check with non-atomic: much cheaper,
70 * and we expect non-dirty to be the common case. */
71 if (!*from) {
0c600ce2 72 addr += VHOST_LOG_CHUNK;
d5970055
MT
73 continue;
74 }
5444e768
PB
75 /* Data must be read atomically. We don't really need barrier semantics
76 * but it's easier to use atomic_* than roll our own. */
77 log = atomic_xchg(from, 0);
747eb78b
NC
78 while (log) {
79 int bit = ctzl(log);
6b37a23d
MT
80 hwaddr page_addr;
81 hwaddr section_offset;
82 hwaddr mr_offset;
6b37a23d
MT
83 page_addr = addr + bit * VHOST_LOG_PAGE;
84 section_offset = page_addr - section->offset_within_address_space;
85 mr_offset = section_offset + section->offset_within_region;
86 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
d5970055
MT
87 log &= ~(0x1ull << bit);
88 }
89 addr += VHOST_LOG_CHUNK;
90 }
91}
92
04097f7c 93static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
2817b260 94 MemoryRegionSection *section,
6b37a23d
MT
95 hwaddr first,
96 hwaddr last)
d5970055 97{
d5970055 98 int i;
6b37a23d
MT
99 hwaddr start_addr;
100 hwaddr end_addr;
04097f7c 101
d5970055
MT
102 if (!dev->log_enabled || !dev->started) {
103 return 0;
104 }
6b37a23d 105 start_addr = section->offset_within_address_space;
052e87b0 106 end_addr = range_get_last(start_addr, int128_get64(section->size));
6b37a23d
MT
107 start_addr = MAX(first, start_addr);
108 end_addr = MIN(last, end_addr);
109
d5970055
MT
110 for (i = 0; i < dev->mem->nregions; ++i) {
111 struct vhost_memory_region *reg = dev->mem->regions + i;
2817b260 112 vhost_dev_sync_region(dev, section, start_addr, end_addr,
d5970055
MT
113 reg->guest_phys_addr,
114 range_get_last(reg->guest_phys_addr,
115 reg->memory_size));
116 }
117 for (i = 0; i < dev->nvqs; ++i) {
118 struct vhost_virtqueue *vq = dev->vqs + i;
2817b260 119 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
d5970055
MT
120 range_get_last(vq->used_phys, vq->used_size));
121 }
122 return 0;
123}
124
04097f7c
AK
125static void vhost_log_sync(MemoryListener *listener,
126 MemoryRegionSection *section)
127{
128 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
129 memory_listener);
6b37a23d
MT
130 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
131}
04097f7c 132
6b37a23d
MT
133static void vhost_log_sync_range(struct vhost_dev *dev,
134 hwaddr first, hwaddr last)
135{
136 int i;
137 /* FIXME: this is N^2 in number of sections */
138 for (i = 0; i < dev->n_mem_sections; ++i) {
139 MemoryRegionSection *section = &dev->mem_sections[i];
140 vhost_sync_dirty_bitmap(dev, section, first, last);
141 }
04097f7c
AK
142}
143
d5970055
MT
144/* Assign/unassign. Keep an unsorted array of non-overlapping
145 * memory regions in dev->mem. */
146static void vhost_dev_unassign_memory(struct vhost_dev *dev,
147 uint64_t start_addr,
148 uint64_t size)
149{
150 int from, to, n = dev->mem->nregions;
151 /* Track overlapping/split regions for sanity checking. */
152 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
153
154 for (from = 0, to = 0; from < n; ++from, ++to) {
155 struct vhost_memory_region *reg = dev->mem->regions + to;
156 uint64_t reglast;
157 uint64_t memlast;
158 uint64_t change;
159
160 /* clone old region */
161 if (to != from) {
162 memcpy(reg, dev->mem->regions + from, sizeof *reg);
163 }
164
165 /* No overlap is simple */
166 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
167 start_addr, size)) {
168 continue;
169 }
170
171 /* Split only happens if supplied region
172 * is in the middle of an existing one. Thus it can not
173 * overlap with any other existing region. */
174 assert(!split);
175
176 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
177 memlast = range_get_last(start_addr, size);
178
179 /* Remove whole region */
180 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
181 --dev->mem->nregions;
182 --to;
d5970055
MT
183 ++overlap_middle;
184 continue;
185 }
186
187 /* Shrink region */
188 if (memlast >= reglast) {
189 reg->memory_size = start_addr - reg->guest_phys_addr;
190 assert(reg->memory_size);
191 assert(!overlap_end);
192 ++overlap_end;
193 continue;
194 }
195
196 /* Shift region */
197 if (start_addr <= reg->guest_phys_addr) {
198 change = memlast + 1 - reg->guest_phys_addr;
199 reg->memory_size -= change;
200 reg->guest_phys_addr += change;
201 reg->userspace_addr += change;
202 assert(reg->memory_size);
203 assert(!overlap_start);
204 ++overlap_start;
205 continue;
206 }
207
208 /* This only happens if supplied region
209 * is in the middle of an existing one. Thus it can not
210 * overlap with any other existing region. */
211 assert(!overlap_start);
212 assert(!overlap_end);
213 assert(!overlap_middle);
214 /* Split region: shrink first part, shift second part. */
215 memcpy(dev->mem->regions + n, reg, sizeof *reg);
216 reg->memory_size = start_addr - reg->guest_phys_addr;
217 assert(reg->memory_size);
218 change = memlast + 1 - reg->guest_phys_addr;
219 reg = dev->mem->regions + n;
220 reg->memory_size -= change;
221 assert(reg->memory_size);
222 reg->guest_phys_addr += change;
223 reg->userspace_addr += change;
224 /* Never add more than 1 region */
225 assert(dev->mem->nregions == n);
226 ++dev->mem->nregions;
227 ++split;
228 }
229}
230
231/* Called after unassign, so no regions overlap the given range. */
232static void vhost_dev_assign_memory(struct vhost_dev *dev,
233 uint64_t start_addr,
234 uint64_t size,
235 uint64_t uaddr)
236{
237 int from, to;
238 struct vhost_memory_region *merged = NULL;
239 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
240 struct vhost_memory_region *reg = dev->mem->regions + to;
241 uint64_t prlast, urlast;
242 uint64_t pmlast, umlast;
243 uint64_t s, e, u;
244
245 /* clone old region */
246 if (to != from) {
247 memcpy(reg, dev->mem->regions + from, sizeof *reg);
248 }
249 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
250 pmlast = range_get_last(start_addr, size);
251 urlast = range_get_last(reg->userspace_addr, reg->memory_size);
252 umlast = range_get_last(uaddr, size);
253
254 /* check for overlapping regions: should never happen. */
255 assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
256 /* Not an adjacent or overlapping region - do not merge. */
257 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
258 (pmlast + 1 != reg->guest_phys_addr ||
259 umlast + 1 != reg->userspace_addr)) {
260 continue;
261 }
262
263 if (merged) {
264 --to;
265 assert(to >= 0);
266 } else {
267 merged = reg;
268 }
269 u = MIN(uaddr, reg->userspace_addr);
270 s = MIN(start_addr, reg->guest_phys_addr);
271 e = MAX(pmlast, prlast);
272 uaddr = merged->userspace_addr = u;
273 start_addr = merged->guest_phys_addr = s;
274 size = merged->memory_size = e - s + 1;
275 assert(merged->memory_size);
276 }
277
278 if (!merged) {
279 struct vhost_memory_region *reg = dev->mem->regions + to;
280 memset(reg, 0, sizeof *reg);
281 reg->memory_size = size;
282 assert(reg->memory_size);
283 reg->guest_phys_addr = start_addr;
284 reg->userspace_addr = uaddr;
285 ++to;
286 }
287 assert(to <= dev->mem->nregions + 1);
288 dev->mem->nregions = to;
289}
290
291static uint64_t vhost_get_log_size(struct vhost_dev *dev)
292{
293 uint64_t log_size = 0;
294 int i;
295 for (i = 0; i < dev->mem->nregions; ++i) {
296 struct vhost_memory_region *reg = dev->mem->regions + i;
297 uint64_t last = range_get_last(reg->guest_phys_addr,
298 reg->memory_size);
299 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
300 }
301 for (i = 0; i < dev->nvqs; ++i) {
302 struct vhost_virtqueue *vq = dev->vqs + i;
303 uint64_t last = vq->used_phys + vq->used_size - 1;
304 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
305 }
306 return log_size;
307}
15324404
MAL
308
309static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
309750fa 310{
15324404
MAL
311 struct vhost_log *log;
312 uint64_t logsize = size * sizeof(*(log->log));
313 int fd = -1;
314
315 log = g_new0(struct vhost_log, 1);
316 if (share) {
317 log->log = qemu_memfd_alloc("vhost-log", logsize,
318 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
319 &fd);
320 memset(log->log, 0, logsize);
321 } else {
322 log->log = g_malloc0(logsize);
323 }
309750fa
JW
324
325 log->size = size;
326 log->refcnt = 1;
15324404 327 log->fd = fd;
309750fa
JW
328
329 return log;
330}
331
15324404 332static struct vhost_log *vhost_log_get(uint64_t size, bool share)
309750fa 333{
15324404
MAL
334 struct vhost_log *log = share ? vhost_log_shm : vhost_log;
335
336 if (!log || log->size != size) {
337 log = vhost_log_alloc(size, share);
338 if (share) {
339 vhost_log_shm = log;
340 } else {
341 vhost_log = log;
342 }
309750fa 343 } else {
15324404 344 ++log->refcnt;
309750fa
JW
345 }
346
15324404 347 return log;
309750fa
JW
348}
349
350static void vhost_log_put(struct vhost_dev *dev, bool sync)
351{
352 struct vhost_log *log = dev->log;
353
354 if (!log) {
355 return;
356 }
357
358 --log->refcnt;
359 if (log->refcnt == 0) {
360 /* Sync only the range covered by the old log */
361 if (dev->log_size && sync) {
362 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
363 }
15324404 364
309750fa 365 if (vhost_log == log) {
15324404 366 g_free(log->log);
309750fa 367 vhost_log = NULL;
15324404
MAL
368 } else if (vhost_log_shm == log) {
369 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
370 log->fd);
371 vhost_log_shm = NULL;
309750fa 372 }
15324404 373
309750fa
JW
374 g_free(log);
375 }
376}
d5970055 377
15324404
MAL
378static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
379{
380 return dev->vhost_ops->vhost_requires_shm_log &&
381 dev->vhost_ops->vhost_requires_shm_log(dev);
382}
383
384static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
d5970055 385{
15324404 386 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
309750fa 387 uint64_t log_base = (uintptr_t)log->log;
6b37a23d 388 int r;
6528499f 389
636f4ddd
MAL
390 /* inform backend of log switching, this must be done before
391 releasing the current log, to ensure no logging is lost */
9a78a5dd 392 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
d5970055 393 assert(r >= 0);
309750fa 394 vhost_log_put(dev, true);
d5970055
MT
395 dev->log = log;
396 dev->log_size = size;
397}
398
399static int vhost_verify_ring_mappings(struct vhost_dev *dev,
400 uint64_t start_addr,
401 uint64_t size)
402{
403 int i;
8617343f
MT
404 int r = 0;
405
406 for (i = 0; !r && i < dev->nvqs; ++i) {
d5970055 407 struct vhost_virtqueue *vq = dev->vqs + i;
a8170e5e 408 hwaddr l;
d5970055
MT
409 void *p;
410
411 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
412 continue;
413 }
414 l = vq->ring_size;
415 p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
416 if (!p || l != vq->ring_size) {
417 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
8617343f 418 r = -ENOMEM;
d5970055
MT
419 }
420 if (p != vq->ring) {
421 fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
8617343f 422 r = -EBUSY;
d5970055
MT
423 }
424 cpu_physical_memory_unmap(p, l, 0, 0);
425 }
8617343f 426 return r;
d5970055
MT
427}
428
4e789564
MT
429static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
430 uint64_t start_addr,
431 uint64_t size)
432{
433 int i, n = dev->mem->nregions;
434 for (i = 0; i < n; ++i) {
435 struct vhost_memory_region *reg = dev->mem->regions + i;
436 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
437 start_addr, size)) {
438 return reg;
439 }
440 }
441 return NULL;
442}
443
444static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
445 uint64_t start_addr,
446 uint64_t size,
447 uint64_t uaddr)
448{
449 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
450 uint64_t reglast;
451 uint64_t memlast;
452
453 if (!reg) {
454 return true;
455 }
456
457 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
458 memlast = range_get_last(start_addr, size);
459
460 /* Need to extend region? */
461 if (start_addr < reg->guest_phys_addr || memlast > reglast) {
462 return true;
463 }
464 /* userspace_addr changed? */
465 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
466}
467
04097f7c
AK
468static void vhost_set_memory(MemoryListener *listener,
469 MemoryRegionSection *section,
470 bool add)
d5970055 471{
04097f7c
AK
472 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
473 memory_listener);
a8170e5e 474 hwaddr start_addr = section->offset_within_address_space;
052e87b0 475 ram_addr_t size = int128_get64(section->size);
2d1a35be
PB
476 bool log_dirty =
477 memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
d5970055
MT
478 int s = offsetof(struct vhost_memory, regions) +
479 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
04097f7c
AK
480 void *ram;
481
7267c094 482 dev->mem = g_realloc(dev->mem, s);
d5970055 483
f5a4e64f 484 if (log_dirty) {
04097f7c 485 add = false;
f5a4e64f
MT
486 }
487
d5970055
MT
488 assert(size);
489
4e789564 490 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
d743c382 491 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
04097f7c
AK
492 if (add) {
493 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
4e789564
MT
494 /* Region exists with same address. Nothing to do. */
495 return;
496 }
497 } else {
498 if (!vhost_dev_find_reg(dev, start_addr, size)) {
499 /* Removing region that we don't access. Nothing to do. */
500 return;
501 }
502 }
503
d5970055 504 vhost_dev_unassign_memory(dev, start_addr, size);
04097f7c 505 if (add) {
d5970055 506 /* Add given mapping, merging adjacent regions if any */
04097f7c 507 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
d5970055
MT
508 } else {
509 /* Remove old mapping for this memory, if any. */
510 vhost_dev_unassign_memory(dev, start_addr, size);
511 }
af603142
NB
512 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
513 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
514 dev->memory_changed = true;
2ce68e4c 515 used_memslots = dev->mem->nregions;
af603142
NB
516}
517
518static bool vhost_section(MemoryRegionSection *section)
519{
520 return memory_region_is_ram(section->mr);
521}
522
523static void vhost_begin(MemoryListener *listener)
524{
525 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
526 memory_listener);
527 dev->mem_changed_end_addr = 0;
528 dev->mem_changed_start_addr = -1;
529}
d5970055 530
af603142
NB
531static void vhost_commit(MemoryListener *listener)
532{
533 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
534 memory_listener);
535 hwaddr start_addr = 0;
536 ram_addr_t size = 0;
537 uint64_t log_size;
538 int r;
539
540 if (!dev->memory_changed) {
541 return;
542 }
d5970055
MT
543 if (!dev->started) {
544 return;
545 }
af603142
NB
546 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
547 return;
548 }
d5970055
MT
549
550 if (dev->started) {
af603142
NB
551 start_addr = dev->mem_changed_start_addr;
552 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
553
d5970055
MT
554 r = vhost_verify_ring_mappings(dev, start_addr, size);
555 assert(r >= 0);
556 }
557
558 if (!dev->log_enabled) {
21e70425 559 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
d5970055 560 assert(r >= 0);
af603142 561 dev->memory_changed = false;
d5970055
MT
562 return;
563 }
564 log_size = vhost_get_log_size(dev);
565 /* We allocate an extra 4K bytes to log,
566 * to reduce the * number of reallocations. */
567#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
568 /* To log more, must increase log size before table update. */
569 if (dev->log_size < log_size) {
570 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
571 }
21e70425 572 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
d5970055
MT
573 assert(r >= 0);
574 /* To log less, can only decrease log size after table update. */
575 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
576 vhost_dev_log_resize(dev, log_size);
577 }
af603142 578 dev->memory_changed = false;
50c1e149
AK
579}
580
04097f7c
AK
581static void vhost_region_add(MemoryListener *listener,
582 MemoryRegionSection *section)
583{
2817b260
AK
584 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
585 memory_listener);
586
c49450b9
AK
587 if (!vhost_section(section)) {
588 return;
589 }
590
2817b260
AK
591 ++dev->n_mem_sections;
592 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
593 dev->n_mem_sections);
594 dev->mem_sections[dev->n_mem_sections - 1] = *section;
dfde4e6e 595 memory_region_ref(section->mr);
04097f7c
AK
596 vhost_set_memory(listener, section, true);
597}
598
599static void vhost_region_del(MemoryListener *listener,
600 MemoryRegionSection *section)
601{
2817b260
AK
602 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
603 memory_listener);
604 int i;
605
c49450b9
AK
606 if (!vhost_section(section)) {
607 return;
608 }
609
04097f7c 610 vhost_set_memory(listener, section, false);
dfde4e6e 611 memory_region_unref(section->mr);
2817b260
AK
612 for (i = 0; i < dev->n_mem_sections; ++i) {
613 if (dev->mem_sections[i].offset_within_address_space
614 == section->offset_within_address_space) {
615 --dev->n_mem_sections;
616 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
637f7a6a 617 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
2817b260
AK
618 break;
619 }
620 }
04097f7c
AK
621}
622
50c1e149
AK
623static void vhost_region_nop(MemoryListener *listener,
624 MemoryRegionSection *section)
625{
626}
627
d5970055
MT
628static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
629 struct vhost_virtqueue *vq,
630 unsigned idx, bool enable_log)
631{
632 struct vhost_vring_addr addr = {
633 .index = idx,
2b3af999
SW
634 .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
635 .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
636 .used_user_addr = (uint64_t)(unsigned long)vq->used,
d5970055
MT
637 .log_guest_addr = vq->used_phys,
638 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
639 };
21e70425 640 int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
d5970055
MT
641 if (r < 0) {
642 return -errno;
643 }
644 return 0;
645}
646
647static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
648{
649 uint64_t features = dev->acked_features;
650 int r;
651 if (enable_log) {
9a2ba823 652 features |= 0x1ULL << VHOST_F_LOG_ALL;
d5970055 653 }
21e70425 654 r = dev->vhost_ops->vhost_set_features(dev, features);
d5970055
MT
655 return r < 0 ? -errno : 0;
656}
657
658static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
659{
25a2a920 660 int r, t, i, idx;
d5970055
MT
661 r = vhost_dev_set_features(dev, enable_log);
662 if (r < 0) {
663 goto err_features;
664 }
665 for (i = 0; i < dev->nvqs; ++i) {
25a2a920
TC
666 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
667 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
d5970055
MT
668 enable_log);
669 if (r < 0) {
670 goto err_vq;
671 }
672 }
673 return 0;
674err_vq:
675 for (; i >= 0; --i) {
25a2a920
TC
676 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
677 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
d5970055
MT
678 dev->log_enabled);
679 assert(t >= 0);
680 }
681 t = vhost_dev_set_features(dev, dev->log_enabled);
682 assert(t >= 0);
683err_features:
684 return r;
685}
686
04097f7c 687static int vhost_migration_log(MemoryListener *listener, int enable)
d5970055 688{
04097f7c
AK
689 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
690 memory_listener);
d5970055
MT
691 int r;
692 if (!!enable == dev->log_enabled) {
693 return 0;
694 }
695 if (!dev->started) {
696 dev->log_enabled = enable;
697 return 0;
698 }
699 if (!enable) {
700 r = vhost_dev_set_log(dev, false);
701 if (r < 0) {
702 return r;
703 }
309750fa 704 vhost_log_put(dev, false);
d5970055
MT
705 dev->log = NULL;
706 dev->log_size = 0;
707 } else {
708 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
709 r = vhost_dev_set_log(dev, true);
710 if (r < 0) {
711 return r;
712 }
713 }
714 dev->log_enabled = enable;
715 return 0;
716}
717
04097f7c
AK
718static void vhost_log_global_start(MemoryListener *listener)
719{
720 int r;
721
722 r = vhost_migration_log(listener, true);
723 if (r < 0) {
724 abort();
725 }
726}
727
728static void vhost_log_global_stop(MemoryListener *listener)
729{
730 int r;
731
732 r = vhost_migration_log(listener, false);
733 if (r < 0) {
734 abort();
735 }
736}
737
738static void vhost_log_start(MemoryListener *listener,
b2dfd71c
PB
739 MemoryRegionSection *section,
740 int old, int new)
04097f7c
AK
741{
742 /* FIXME: implement */
743}
744
745static void vhost_log_stop(MemoryListener *listener,
b2dfd71c
PB
746 MemoryRegionSection *section,
747 int old, int new)
04097f7c
AK
748{
749 /* FIXME: implement */
750}
751
46f70ff1
GK
752/* The vhost driver natively knows how to handle the vrings of non
753 * cross-endian legacy devices and modern devices. Only legacy devices
754 * exposed to a bi-endian guest may require the vhost driver to use a
755 * specific endianness.
756 */
a122ab24
GK
757static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
758{
e5848123
GK
759 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
760 return false;
761 }
a122ab24
GK
762#ifdef TARGET_IS_BIENDIAN
763#ifdef HOST_WORDS_BIGENDIAN
46f70ff1 764 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
a122ab24 765#else
46f70ff1 766 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
a122ab24
GK
767#endif
768#else
769 return false;
770#endif
771}
772
04b7a152
GK
773static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
774 bool is_big_endian,
775 int vhost_vq_index)
776{
777 struct vhost_vring_state s = {
778 .index = vhost_vq_index,
779 .num = is_big_endian
780 };
781
21e70425 782 if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
04b7a152
GK
783 return 0;
784 }
785
786 if (errno == ENOTTY) {
787 error_report("vhost does not support cross-endian");
788 return -ENOSYS;
789 }
790
791 return -errno;
792}
793
f56a1247 794static int vhost_virtqueue_start(struct vhost_dev *dev,
d5970055
MT
795 struct VirtIODevice *vdev,
796 struct vhost_virtqueue *vq,
797 unsigned idx)
798{
a8170e5e 799 hwaddr s, l, a;
d5970055 800 int r;
21e70425 801 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
d5970055 802 struct vhost_vring_file file = {
a9f98bb5 803 .index = vhost_vq_index
d5970055
MT
804 };
805 struct vhost_vring_state state = {
a9f98bb5 806 .index = vhost_vq_index
d5970055
MT
807 };
808 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
809
a9f98bb5 810
d5970055 811 vq->num = state.num = virtio_queue_get_num(vdev, idx);
21e70425 812 r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
d5970055
MT
813 if (r) {
814 return -errno;
815 }
816
817 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
21e70425 818 r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
d5970055
MT
819 if (r) {
820 return -errno;
821 }
822
e5848123 823 if (vhost_needs_vring_endian(vdev)) {
04b7a152
GK
824 r = vhost_virtqueue_set_vring_endian_legacy(dev,
825 virtio_is_big_endian(vdev),
826 vhost_vq_index);
827 if (r) {
828 return -errno;
829 }
830 }
831
d5970055
MT
832 s = l = virtio_queue_get_desc_size(vdev, idx);
833 a = virtio_queue_get_desc_addr(vdev, idx);
834 vq->desc = cpu_physical_memory_map(a, &l, 0);
835 if (!vq->desc || l != s) {
836 r = -ENOMEM;
837 goto fail_alloc_desc;
838 }
839 s = l = virtio_queue_get_avail_size(vdev, idx);
840 a = virtio_queue_get_avail_addr(vdev, idx);
841 vq->avail = cpu_physical_memory_map(a, &l, 0);
842 if (!vq->avail || l != s) {
843 r = -ENOMEM;
844 goto fail_alloc_avail;
845 }
846 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
847 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
848 vq->used = cpu_physical_memory_map(a, &l, 1);
849 if (!vq->used || l != s) {
850 r = -ENOMEM;
851 goto fail_alloc_used;
852 }
853
854 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
855 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
856 vq->ring = cpu_physical_memory_map(a, &l, 1);
857 if (!vq->ring || l != s) {
858 r = -ENOMEM;
859 goto fail_alloc_ring;
860 }
861
a9f98bb5 862 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
d5970055
MT
863 if (r < 0) {
864 r = -errno;
865 goto fail_alloc;
866 }
a9f98bb5 867
d5970055 868 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
21e70425 869 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
d5970055 870 if (r) {
c8852121 871 r = -errno;
d5970055
MT
872 goto fail_kick;
873 }
874
f56a1247
MT
875 /* Clear and discard previous events if any. */
876 event_notifier_test_and_clear(&vq->masked_notifier);
d5970055 877
5669655a
VK
878 /* Init vring in unmasked state, unless guest_notifier_mask
879 * will do it later.
880 */
881 if (!vdev->use_guest_notifier_mask) {
882 /* TODO: check and handle errors. */
883 vhost_virtqueue_mask(dev, vdev, idx, false);
884 }
885
d5970055
MT
886 return 0;
887
d5970055 888fail_kick:
d5970055
MT
889fail_alloc:
890 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
891 0, 0);
892fail_alloc_ring:
893 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
894 0, 0);
895fail_alloc_used:
896 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
897 0, 0);
898fail_alloc_avail:
899 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
900 0, 0);
901fail_alloc_desc:
902 return r;
903}
904
f56a1247 905static void vhost_virtqueue_stop(struct vhost_dev *dev,
d5970055
MT
906 struct VirtIODevice *vdev,
907 struct vhost_virtqueue *vq,
908 unsigned idx)
909{
21e70425 910 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
d5970055 911 struct vhost_vring_state state = {
04b7a152 912 .index = vhost_vq_index,
d5970055
MT
913 };
914 int r;
fc57fd99 915
21e70425 916 r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
d5970055
MT
917 if (r < 0) {
918 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r);
919 fflush(stderr);
920 }
921 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
3561ba14 922 virtio_queue_invalidate_signalled_used(vdev, idx);
04b7a152
GK
923
924 /* In the cross-endian case, we need to reset the vring endianness to
925 * native as legacy devices expect so by default.
926 */
e5848123 927 if (vhost_needs_vring_endian(vdev)) {
04b7a152
GK
928 r = vhost_virtqueue_set_vring_endian_legacy(dev,
929 !virtio_is_big_endian(vdev),
930 vhost_vq_index);
931 if (r < 0) {
932 error_report("failed to reset vring endianness");
933 }
934 }
935
d5970055
MT
936 assert (r >= 0);
937 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
938 0, virtio_queue_get_ring_size(vdev, idx));
939 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
940 1, virtio_queue_get_used_size(vdev, idx));
941 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
942 0, virtio_queue_get_avail_size(vdev, idx));
943 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
944 0, virtio_queue_get_desc_size(vdev, idx));
945}
946
80a1ea37
AK
947static void vhost_eventfd_add(MemoryListener *listener,
948 MemoryRegionSection *section,
753d5e14 949 bool match_data, uint64_t data, EventNotifier *e)
80a1ea37
AK
950{
951}
952
953static void vhost_eventfd_del(MemoryListener *listener,
954 MemoryRegionSection *section,
753d5e14 955 bool match_data, uint64_t data, EventNotifier *e)
80a1ea37
AK
956{
957}
958
f56a1247
MT
959static int vhost_virtqueue_init(struct vhost_dev *dev,
960 struct vhost_virtqueue *vq, int n)
961{
21e70425 962 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
f56a1247 963 struct vhost_vring_file file = {
b931bfbf 964 .index = vhost_vq_index,
f56a1247
MT
965 };
966 int r = event_notifier_init(&vq->masked_notifier, 0);
967 if (r < 0) {
968 return r;
969 }
970
971 file.fd = event_notifier_get_fd(&vq->masked_notifier);
21e70425 972 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
f56a1247
MT
973 if (r) {
974 r = -errno;
975 goto fail_call;
976 }
977 return 0;
978fail_call:
979 event_notifier_cleanup(&vq->masked_notifier);
980 return r;
981}
982
983static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
984{
985 event_notifier_cleanup(&vq->masked_notifier);
986}
987
81647a65 988int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1e7398a1 989 VhostBackendType backend_type)
d5970055
MT
990{
991 uint64_t features;
f56a1247 992 int i, r;
81647a65 993
d2fc4402
MAL
994 hdev->migration_blocker = NULL;
995
1a1bfac9 996 if (vhost_set_backend_type(hdev, backend_type) < 0) {
b19ca188 997 close((uintptr_t)opaque);
1a1bfac9
NN
998 return -1;
999 }
1000
24d1eb33 1001 if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) {
b19ca188 1002 close((uintptr_t)opaque);
24d1eb33
NN
1003 return -errno;
1004 }
1005
aebf8168
IM
1006 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1007 fprintf(stderr, "vhost backend memory slots limit is less"
1008 " than current number of present memory slots\n");
1009 close((uintptr_t)opaque);
1010 return -1;
1011 }
2ce68e4c
IM
1012 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1013
21e70425 1014 r = hdev->vhost_ops->vhost_set_owner(hdev);
d5970055
MT
1015 if (r < 0) {
1016 goto fail;
1017 }
1018
21e70425 1019 r = hdev->vhost_ops->vhost_get_features(hdev, &features);
d5970055
MT
1020 if (r < 0) {
1021 goto fail;
1022 }
f56a1247
MT
1023
1024 for (i = 0; i < hdev->nvqs; ++i) {
b931bfbf 1025 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
f56a1247
MT
1026 if (r < 0) {
1027 goto fail_vq;
1028 }
1029 }
d5970055
MT
1030 hdev->features = features;
1031
04097f7c 1032 hdev->memory_listener = (MemoryListener) {
50c1e149
AK
1033 .begin = vhost_begin,
1034 .commit = vhost_commit,
04097f7c
AK
1035 .region_add = vhost_region_add,
1036 .region_del = vhost_region_del,
50c1e149 1037 .region_nop = vhost_region_nop,
04097f7c
AK
1038 .log_start = vhost_log_start,
1039 .log_stop = vhost_log_stop,
1040 .log_sync = vhost_log_sync,
1041 .log_global_start = vhost_log_global_start,
1042 .log_global_stop = vhost_log_global_stop,
80a1ea37
AK
1043 .eventfd_add = vhost_eventfd_add,
1044 .eventfd_del = vhost_eventfd_del,
72e22d2f 1045 .priority = 10
04097f7c 1046 };
d2fc4402
MAL
1047
1048 if (hdev->migration_blocker == NULL) {
1049 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1050 error_setg(&hdev->migration_blocker,
1051 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
31190ed7
MAL
1052 } else if (!qemu_memfd_check()) {
1053 error_setg(&hdev->migration_blocker,
1054 "Migration disabled: failed to allocate shared memory");
d2fc4402
MAL
1055 }
1056 }
1057
1058 if (hdev->migration_blocker != NULL) {
7145872e
MT
1059 migrate_add_blocker(hdev->migration_blocker);
1060 }
d2fc4402 1061
7267c094 1062 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
2817b260
AK
1063 hdev->n_mem_sections = 0;
1064 hdev->mem_sections = NULL;
d5970055
MT
1065 hdev->log = NULL;
1066 hdev->log_size = 0;
1067 hdev->log_enabled = false;
1068 hdev->started = false;
af603142 1069 hdev->memory_changed = false;
f6790af6 1070 memory_listener_register(&hdev->memory_listener, &address_space_memory);
d5970055 1071 return 0;
f56a1247
MT
1072fail_vq:
1073 while (--i >= 0) {
1074 vhost_virtqueue_cleanup(hdev->vqs + i);
1075 }
d5970055
MT
1076fail:
1077 r = -errno;
24d1eb33 1078 hdev->vhost_ops->vhost_backend_cleanup(hdev);
2ce68e4c 1079 QLIST_REMOVE(hdev, entry);
d5970055
MT
1080 return r;
1081}
1082
1083void vhost_dev_cleanup(struct vhost_dev *hdev)
1084{
f56a1247
MT
1085 int i;
1086 for (i = 0; i < hdev->nvqs; ++i) {
1087 vhost_virtqueue_cleanup(hdev->vqs + i);
1088 }
04097f7c 1089 memory_listener_unregister(&hdev->memory_listener);
7145872e
MT
1090 if (hdev->migration_blocker) {
1091 migrate_del_blocker(hdev->migration_blocker);
1092 error_free(hdev->migration_blocker);
1093 }
7267c094 1094 g_free(hdev->mem);
2817b260 1095 g_free(hdev->mem_sections);
24d1eb33 1096 hdev->vhost_ops->vhost_backend_cleanup(hdev);
2ce68e4c 1097 QLIST_REMOVE(hdev, entry);
d5970055
MT
1098}
1099
b0b3db79
MT
1100/* Stop processing guest IO notifications in qemu.
1101 * Start processing them in vhost in kernel.
1102 */
1103int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1104{
1c819449
FK
1105 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1106 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1107 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
16617e36 1108 int i, r, e;
1c819449 1109 if (!k->set_host_notifier) {
b0b3db79
MT
1110 fprintf(stderr, "binding does not support host notifiers\n");
1111 r = -ENOSYS;
1112 goto fail;
1113 }
1114
1115 for (i = 0; i < hdev->nvqs; ++i) {
1c819449 1116 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
b0b3db79
MT
1117 if (r < 0) {
1118 fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
1119 goto fail_vq;
1120 }
1121 }
1122
1123 return 0;
1124fail_vq:
1125 while (--i >= 0) {
16617e36
JW
1126 e = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
1127 if (e < 0) {
b0b3db79
MT
1128 fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
1129 fflush(stderr);
1130 }
16617e36 1131 assert (e >= 0);
b0b3db79
MT
1132 }
1133fail:
1134 return r;
1135}
1136
1137/* Stop processing guest IO notifications in vhost.
1138 * Start processing them in qemu.
1139 * This might actually run the qemu handlers right away,
1140 * so virtio in qemu must be completely setup when this is called.
1141 */
1142void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1143{
1c819449
FK
1144 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1145 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1146 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
b0b3db79
MT
1147 int i, r;
1148
1149 for (i = 0; i < hdev->nvqs; ++i) {
1c819449 1150 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
b0b3db79
MT
1151 if (r < 0) {
1152 fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
1153 fflush(stderr);
1154 }
1155 assert (r >= 0);
1156 }
1157}
1158
f56a1247
MT
1159/* Test and clear event pending status.
1160 * Should be called after unmask to avoid losing events.
1161 */
1162bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1163{
a9f98bb5 1164 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
a9f98bb5 1165 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
f56a1247
MT
1166 return event_notifier_test_and_clear(&vq->masked_notifier);
1167}
1168
1169/* Mask/unmask events from this vq. */
1170void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1171 bool mask)
1172{
1173 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
a9f98bb5 1174 int r, index = n - hdev->vq_index;
fc57fd99 1175 struct vhost_vring_file file;
f56a1247 1176
f56a1247 1177 if (mask) {
5669655a 1178 assert(vdev->use_guest_notifier_mask);
a9f98bb5 1179 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
f56a1247
MT
1180 } else {
1181 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1182 }
fc57fd99 1183
21e70425
MAL
1184 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1185 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
f56a1247
MT
1186 assert(r >= 0);
1187}
1188
9a2ba823
CH
1189uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1190 uint64_t features)
2e6d46d7
NN
1191{
1192 const int *bit = feature_bits;
1193 while (*bit != VHOST_INVALID_FEATURE_BIT) {
9a2ba823 1194 uint64_t bit_mask = (1ULL << *bit);
2e6d46d7
NN
1195 if (!(hdev->features & bit_mask)) {
1196 features &= ~bit_mask;
1197 }
1198 bit++;
1199 }
1200 return features;
1201}
1202
1203void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
9a2ba823 1204 uint64_t features)
2e6d46d7
NN
1205{
1206 const int *bit = feature_bits;
1207 while (*bit != VHOST_INVALID_FEATURE_BIT) {
9a2ba823 1208 uint64_t bit_mask = (1ULL << *bit);
2e6d46d7
NN
1209 if (features & bit_mask) {
1210 hdev->acked_features |= bit_mask;
1211 }
1212 bit++;
1213 }
1214}
1215
b0b3db79 1216/* Host notifiers must be enabled at this point. */
d5970055
MT
1217int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1218{
1219 int i, r;
24f4fe34
MT
1220
1221 hdev->started = true;
1222
d5970055
MT
1223 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1224 if (r < 0) {
54dd9321 1225 goto fail_features;
d5970055 1226 }
21e70425 1227 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
d5970055
MT
1228 if (r < 0) {
1229 r = -errno;
54dd9321 1230 goto fail_mem;
d5970055 1231 }
d154e0ba 1232 for (i = 0; i < hdev->nvqs; ++i) {
f56a1247 1233 r = vhost_virtqueue_start(hdev,
a9f98bb5
JW
1234 vdev,
1235 hdev->vqs + i,
1236 hdev->vq_index + i);
d154e0ba
MT
1237 if (r < 0) {
1238 goto fail_vq;
1239 }
1240 }
1241
d5970055 1242 if (hdev->log_enabled) {
e05ca820
MT
1243 uint64_t log_base;
1244
d5970055 1245 hdev->log_size = vhost_get_log_size(hdev);
15324404
MAL
1246 hdev->log = vhost_log_get(hdev->log_size,
1247 vhost_dev_log_is_shared(hdev));
309750fa 1248 log_base = (uintptr_t)hdev->log->log;
c2bea314 1249 r = hdev->vhost_ops->vhost_set_log_base(hdev,
9a78a5dd
MAL
1250 hdev->log_size ? log_base : 0,
1251 hdev->log);
d5970055
MT
1252 if (r < 0) {
1253 r = -errno;
54dd9321 1254 goto fail_log;
d5970055
MT
1255 }
1256 }
d154e0ba 1257
d5970055 1258 return 0;
54dd9321 1259fail_log:
24bfa207 1260 vhost_log_put(hdev, false);
d5970055
MT
1261fail_vq:
1262 while (--i >= 0) {
f56a1247 1263 vhost_virtqueue_stop(hdev,
a9f98bb5
JW
1264 vdev,
1265 hdev->vqs + i,
1266 hdev->vq_index + i);
d5970055 1267 }
a9f98bb5 1268 i = hdev->nvqs;
54dd9321
MT
1269fail_mem:
1270fail_features:
24f4fe34
MT
1271
1272 hdev->started = false;
d5970055
MT
1273 return r;
1274}
1275
b0b3db79 1276/* Host notifiers must be enabled at this point. */
d5970055
MT
1277void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1278{
a9f98bb5 1279 int i;
54dd9321 1280
d5970055 1281 for (i = 0; i < hdev->nvqs; ++i) {
f56a1247 1282 vhost_virtqueue_stop(hdev,
a9f98bb5
JW
1283 vdev,
1284 hdev->vqs + i,
1285 hdev->vq_index + i);
d5970055 1286 }
54dd9321 1287
309750fa 1288 vhost_log_put(hdev, true);
d5970055 1289 hdev->started = false;
c1be973a 1290 hdev->log = NULL;
d5970055
MT
1291 hdev->log_size = 0;
1292}
a9f98bb5 1293
This page took 0.722151 seconds and 4 git commands to generate.