]>
Commit | Line | Data |
---|---|---|
d5970055 MT |
1 | /* |
2 | * vhost support | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2010 | |
5 | * | |
6 | * Authors: | |
7 | * Michael S. Tsirkin <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
6b620ca3 PB |
11 | * |
12 | * Contributions after 2012-01-13 are licensed under the terms of the | |
13 | * GNU GPL, version 2 or (at your option) any later version. | |
d5970055 MT |
14 | */ |
15 | ||
9b8bfe21 | 16 | #include "qemu/osdep.h" |
da34e65c | 17 | #include "qapi/error.h" |
0d09e41a | 18 | #include "hw/virtio/vhost.h" |
d5970055 | 19 | #include "hw/hw.h" |
5444e768 | 20 | #include "qemu/atomic.h" |
1de7afc9 | 21 | #include "qemu/range.h" |
04b7a152 | 22 | #include "qemu/error-report.h" |
15324404 | 23 | #include "qemu/memfd.h" |
11078ae3 | 24 | #include <linux/vhost.h> |
022c62cb | 25 | #include "exec/address-spaces.h" |
1c819449 | 26 | #include "hw/virtio/virtio-bus.h" |
04b7a152 | 27 | #include "hw/virtio/virtio-access.h" |
7145872e | 28 | #include "migration/migration.h" |
d5970055 | 29 | |
309750fa | 30 | static struct vhost_log *vhost_log; |
15324404 | 31 | static struct vhost_log *vhost_log_shm; |
309750fa | 32 | |
2ce68e4c IM |
33 | static unsigned int used_memslots; |
34 | static QLIST_HEAD(, vhost_dev) vhost_devices = | |
35 | QLIST_HEAD_INITIALIZER(vhost_devices); | |
36 | ||
37 | bool vhost_has_free_slot(void) | |
38 | { | |
39 | unsigned int slots_limit = ~0U; | |
40 | struct vhost_dev *hdev; | |
41 | ||
42 | QLIST_FOREACH(hdev, &vhost_devices, entry) { | |
43 | unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); | |
44 | slots_limit = MIN(slots_limit, r); | |
45 | } | |
46 | return slots_limit > used_memslots; | |
47 | } | |
48 | ||
d5970055 | 49 | static void vhost_dev_sync_region(struct vhost_dev *dev, |
2817b260 | 50 | MemoryRegionSection *section, |
d5970055 MT |
51 | uint64_t mfirst, uint64_t mlast, |
52 | uint64_t rfirst, uint64_t rlast) | |
53 | { | |
309750fa JW |
54 | vhost_log_chunk_t *log = dev->log->log; |
55 | ||
d5970055 MT |
56 | uint64_t start = MAX(mfirst, rfirst); |
57 | uint64_t end = MIN(mlast, rlast); | |
309750fa JW |
58 | vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK; |
59 | vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1; | |
d5970055 MT |
60 | uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK; |
61 | ||
d5970055 MT |
62 | if (end < start) { |
63 | return; | |
64 | } | |
e314672a | 65 | assert(end / VHOST_LOG_CHUNK < dev->log_size); |
fbbaf9ae | 66 | assert(start / VHOST_LOG_CHUNK < dev->log_size); |
e314672a | 67 | |
d5970055 MT |
68 | for (;from < to; ++from) { |
69 | vhost_log_chunk_t log; | |
d5970055 MT |
70 | /* We first check with non-atomic: much cheaper, |
71 | * and we expect non-dirty to be the common case. */ | |
72 | if (!*from) { | |
0c600ce2 | 73 | addr += VHOST_LOG_CHUNK; |
d5970055 MT |
74 | continue; |
75 | } | |
5444e768 PB |
76 | /* Data must be read atomically. We don't really need barrier semantics |
77 | * but it's easier to use atomic_* than roll our own. */ | |
78 | log = atomic_xchg(from, 0); | |
747eb78b NC |
79 | while (log) { |
80 | int bit = ctzl(log); | |
6b37a23d MT |
81 | hwaddr page_addr; |
82 | hwaddr section_offset; | |
83 | hwaddr mr_offset; | |
6b37a23d MT |
84 | page_addr = addr + bit * VHOST_LOG_PAGE; |
85 | section_offset = page_addr - section->offset_within_address_space; | |
86 | mr_offset = section_offset + section->offset_within_region; | |
87 | memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE); | |
d5970055 MT |
88 | log &= ~(0x1ull << bit); |
89 | } | |
90 | addr += VHOST_LOG_CHUNK; | |
91 | } | |
92 | } | |
93 | ||
04097f7c | 94 | static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, |
2817b260 | 95 | MemoryRegionSection *section, |
6b37a23d MT |
96 | hwaddr first, |
97 | hwaddr last) | |
d5970055 | 98 | { |
d5970055 | 99 | int i; |
6b37a23d MT |
100 | hwaddr start_addr; |
101 | hwaddr end_addr; | |
04097f7c | 102 | |
d5970055 MT |
103 | if (!dev->log_enabled || !dev->started) { |
104 | return 0; | |
105 | } | |
6b37a23d | 106 | start_addr = section->offset_within_address_space; |
052e87b0 | 107 | end_addr = range_get_last(start_addr, int128_get64(section->size)); |
6b37a23d MT |
108 | start_addr = MAX(first, start_addr); |
109 | end_addr = MIN(last, end_addr); | |
110 | ||
d5970055 MT |
111 | for (i = 0; i < dev->mem->nregions; ++i) { |
112 | struct vhost_memory_region *reg = dev->mem->regions + i; | |
2817b260 | 113 | vhost_dev_sync_region(dev, section, start_addr, end_addr, |
d5970055 MT |
114 | reg->guest_phys_addr, |
115 | range_get_last(reg->guest_phys_addr, | |
116 | reg->memory_size)); | |
117 | } | |
118 | for (i = 0; i < dev->nvqs; ++i) { | |
119 | struct vhost_virtqueue *vq = dev->vqs + i; | |
2817b260 | 120 | vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys, |
d5970055 MT |
121 | range_get_last(vq->used_phys, vq->used_size)); |
122 | } | |
123 | return 0; | |
124 | } | |
125 | ||
04097f7c AK |
126 | static void vhost_log_sync(MemoryListener *listener, |
127 | MemoryRegionSection *section) | |
128 | { | |
129 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
130 | memory_listener); | |
6b37a23d MT |
131 | vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL); |
132 | } | |
04097f7c | 133 | |
6b37a23d MT |
134 | static void vhost_log_sync_range(struct vhost_dev *dev, |
135 | hwaddr first, hwaddr last) | |
136 | { | |
137 | int i; | |
138 | /* FIXME: this is N^2 in number of sections */ | |
139 | for (i = 0; i < dev->n_mem_sections; ++i) { | |
140 | MemoryRegionSection *section = &dev->mem_sections[i]; | |
141 | vhost_sync_dirty_bitmap(dev, section, first, last); | |
142 | } | |
04097f7c AK |
143 | } |
144 | ||
d5970055 MT |
145 | /* Assign/unassign. Keep an unsorted array of non-overlapping |
146 | * memory regions in dev->mem. */ | |
147 | static void vhost_dev_unassign_memory(struct vhost_dev *dev, | |
148 | uint64_t start_addr, | |
149 | uint64_t size) | |
150 | { | |
151 | int from, to, n = dev->mem->nregions; | |
152 | /* Track overlapping/split regions for sanity checking. */ | |
153 | int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0; | |
154 | ||
155 | for (from = 0, to = 0; from < n; ++from, ++to) { | |
156 | struct vhost_memory_region *reg = dev->mem->regions + to; | |
157 | uint64_t reglast; | |
158 | uint64_t memlast; | |
159 | uint64_t change; | |
160 | ||
161 | /* clone old region */ | |
162 | if (to != from) { | |
163 | memcpy(reg, dev->mem->regions + from, sizeof *reg); | |
164 | } | |
165 | ||
166 | /* No overlap is simple */ | |
167 | if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size, | |
168 | start_addr, size)) { | |
169 | continue; | |
170 | } | |
171 | ||
172 | /* Split only happens if supplied region | |
173 | * is in the middle of an existing one. Thus it can not | |
174 | * overlap with any other existing region. */ | |
175 | assert(!split); | |
176 | ||
177 | reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); | |
178 | memlast = range_get_last(start_addr, size); | |
179 | ||
180 | /* Remove whole region */ | |
181 | if (start_addr <= reg->guest_phys_addr && memlast >= reglast) { | |
182 | --dev->mem->nregions; | |
183 | --to; | |
d5970055 MT |
184 | ++overlap_middle; |
185 | continue; | |
186 | } | |
187 | ||
188 | /* Shrink region */ | |
189 | if (memlast >= reglast) { | |
190 | reg->memory_size = start_addr - reg->guest_phys_addr; | |
191 | assert(reg->memory_size); | |
192 | assert(!overlap_end); | |
193 | ++overlap_end; | |
194 | continue; | |
195 | } | |
196 | ||
197 | /* Shift region */ | |
198 | if (start_addr <= reg->guest_phys_addr) { | |
199 | change = memlast + 1 - reg->guest_phys_addr; | |
200 | reg->memory_size -= change; | |
201 | reg->guest_phys_addr += change; | |
202 | reg->userspace_addr += change; | |
203 | assert(reg->memory_size); | |
204 | assert(!overlap_start); | |
205 | ++overlap_start; | |
206 | continue; | |
207 | } | |
208 | ||
209 | /* This only happens if supplied region | |
210 | * is in the middle of an existing one. Thus it can not | |
211 | * overlap with any other existing region. */ | |
212 | assert(!overlap_start); | |
213 | assert(!overlap_end); | |
214 | assert(!overlap_middle); | |
215 | /* Split region: shrink first part, shift second part. */ | |
216 | memcpy(dev->mem->regions + n, reg, sizeof *reg); | |
217 | reg->memory_size = start_addr - reg->guest_phys_addr; | |
218 | assert(reg->memory_size); | |
219 | change = memlast + 1 - reg->guest_phys_addr; | |
220 | reg = dev->mem->regions + n; | |
221 | reg->memory_size -= change; | |
222 | assert(reg->memory_size); | |
223 | reg->guest_phys_addr += change; | |
224 | reg->userspace_addr += change; | |
225 | /* Never add more than 1 region */ | |
226 | assert(dev->mem->nregions == n); | |
227 | ++dev->mem->nregions; | |
228 | ++split; | |
229 | } | |
230 | } | |
231 | ||
232 | /* Called after unassign, so no regions overlap the given range. */ | |
233 | static void vhost_dev_assign_memory(struct vhost_dev *dev, | |
234 | uint64_t start_addr, | |
235 | uint64_t size, | |
236 | uint64_t uaddr) | |
237 | { | |
238 | int from, to; | |
239 | struct vhost_memory_region *merged = NULL; | |
240 | for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) { | |
241 | struct vhost_memory_region *reg = dev->mem->regions + to; | |
242 | uint64_t prlast, urlast; | |
243 | uint64_t pmlast, umlast; | |
244 | uint64_t s, e, u; | |
245 | ||
246 | /* clone old region */ | |
247 | if (to != from) { | |
248 | memcpy(reg, dev->mem->regions + from, sizeof *reg); | |
249 | } | |
250 | prlast = range_get_last(reg->guest_phys_addr, reg->memory_size); | |
251 | pmlast = range_get_last(start_addr, size); | |
252 | urlast = range_get_last(reg->userspace_addr, reg->memory_size); | |
253 | umlast = range_get_last(uaddr, size); | |
254 | ||
255 | /* check for overlapping regions: should never happen. */ | |
256 | assert(prlast < start_addr || pmlast < reg->guest_phys_addr); | |
257 | /* Not an adjacent or overlapping region - do not merge. */ | |
258 | if ((prlast + 1 != start_addr || urlast + 1 != uaddr) && | |
259 | (pmlast + 1 != reg->guest_phys_addr || | |
260 | umlast + 1 != reg->userspace_addr)) { | |
261 | continue; | |
262 | } | |
263 | ||
ffe42cc1 MT |
264 | if (dev->vhost_ops->vhost_backend_can_merge && |
265 | !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size, | |
266 | reg->userspace_addr, | |
267 | reg->memory_size)) { | |
268 | continue; | |
269 | } | |
270 | ||
d5970055 MT |
271 | if (merged) { |
272 | --to; | |
273 | assert(to >= 0); | |
274 | } else { | |
275 | merged = reg; | |
276 | } | |
277 | u = MIN(uaddr, reg->userspace_addr); | |
278 | s = MIN(start_addr, reg->guest_phys_addr); | |
279 | e = MAX(pmlast, prlast); | |
280 | uaddr = merged->userspace_addr = u; | |
281 | start_addr = merged->guest_phys_addr = s; | |
282 | size = merged->memory_size = e - s + 1; | |
283 | assert(merged->memory_size); | |
284 | } | |
285 | ||
286 | if (!merged) { | |
287 | struct vhost_memory_region *reg = dev->mem->regions + to; | |
288 | memset(reg, 0, sizeof *reg); | |
289 | reg->memory_size = size; | |
290 | assert(reg->memory_size); | |
291 | reg->guest_phys_addr = start_addr; | |
292 | reg->userspace_addr = uaddr; | |
293 | ++to; | |
294 | } | |
295 | assert(to <= dev->mem->nregions + 1); | |
296 | dev->mem->nregions = to; | |
297 | } | |
298 | ||
299 | static uint64_t vhost_get_log_size(struct vhost_dev *dev) | |
300 | { | |
301 | uint64_t log_size = 0; | |
302 | int i; | |
303 | for (i = 0; i < dev->mem->nregions; ++i) { | |
304 | struct vhost_memory_region *reg = dev->mem->regions + i; | |
305 | uint64_t last = range_get_last(reg->guest_phys_addr, | |
306 | reg->memory_size); | |
307 | log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); | |
308 | } | |
309 | for (i = 0; i < dev->nvqs; ++i) { | |
310 | struct vhost_virtqueue *vq = dev->vqs + i; | |
311 | uint64_t last = vq->used_phys + vq->used_size - 1; | |
312 | log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); | |
313 | } | |
314 | return log_size; | |
315 | } | |
15324404 MAL |
316 | |
317 | static struct vhost_log *vhost_log_alloc(uint64_t size, bool share) | |
309750fa | 318 | { |
15324404 MAL |
319 | struct vhost_log *log; |
320 | uint64_t logsize = size * sizeof(*(log->log)); | |
321 | int fd = -1; | |
322 | ||
323 | log = g_new0(struct vhost_log, 1); | |
324 | if (share) { | |
325 | log->log = qemu_memfd_alloc("vhost-log", logsize, | |
326 | F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, | |
327 | &fd); | |
328 | memset(log->log, 0, logsize); | |
329 | } else { | |
330 | log->log = g_malloc0(logsize); | |
331 | } | |
309750fa JW |
332 | |
333 | log->size = size; | |
334 | log->refcnt = 1; | |
15324404 | 335 | log->fd = fd; |
309750fa JW |
336 | |
337 | return log; | |
338 | } | |
339 | ||
15324404 | 340 | static struct vhost_log *vhost_log_get(uint64_t size, bool share) |
309750fa | 341 | { |
15324404 MAL |
342 | struct vhost_log *log = share ? vhost_log_shm : vhost_log; |
343 | ||
344 | if (!log || log->size != size) { | |
345 | log = vhost_log_alloc(size, share); | |
346 | if (share) { | |
347 | vhost_log_shm = log; | |
348 | } else { | |
349 | vhost_log = log; | |
350 | } | |
309750fa | 351 | } else { |
15324404 | 352 | ++log->refcnt; |
309750fa JW |
353 | } |
354 | ||
15324404 | 355 | return log; |
309750fa JW |
356 | } |
357 | ||
358 | static void vhost_log_put(struct vhost_dev *dev, bool sync) | |
359 | { | |
360 | struct vhost_log *log = dev->log; | |
361 | ||
362 | if (!log) { | |
363 | return; | |
364 | } | |
365 | ||
366 | --log->refcnt; | |
367 | if (log->refcnt == 0) { | |
368 | /* Sync only the range covered by the old log */ | |
369 | if (dev->log_size && sync) { | |
370 | vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1); | |
371 | } | |
15324404 | 372 | |
309750fa | 373 | if (vhost_log == log) { |
15324404 | 374 | g_free(log->log); |
309750fa | 375 | vhost_log = NULL; |
15324404 MAL |
376 | } else if (vhost_log_shm == log) { |
377 | qemu_memfd_free(log->log, log->size * sizeof(*(log->log)), | |
378 | log->fd); | |
379 | vhost_log_shm = NULL; | |
309750fa | 380 | } |
15324404 | 381 | |
309750fa JW |
382 | g_free(log); |
383 | } | |
384 | } | |
d5970055 | 385 | |
15324404 MAL |
386 | static bool vhost_dev_log_is_shared(struct vhost_dev *dev) |
387 | { | |
388 | return dev->vhost_ops->vhost_requires_shm_log && | |
389 | dev->vhost_ops->vhost_requires_shm_log(dev); | |
390 | } | |
391 | ||
392 | static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size) | |
d5970055 | 393 | { |
15324404 | 394 | struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev)); |
309750fa | 395 | uint64_t log_base = (uintptr_t)log->log; |
6b37a23d | 396 | int r; |
6528499f | 397 | |
636f4ddd MAL |
398 | /* inform backend of log switching, this must be done before |
399 | releasing the current log, to ensure no logging is lost */ | |
9a78a5dd | 400 | r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log); |
d5970055 | 401 | assert(r >= 0); |
309750fa | 402 | vhost_log_put(dev, true); |
d5970055 MT |
403 | dev->log = log; |
404 | dev->log_size = size; | |
405 | } | |
406 | ||
407 | static int vhost_verify_ring_mappings(struct vhost_dev *dev, | |
408 | uint64_t start_addr, | |
409 | uint64_t size) | |
410 | { | |
411 | int i; | |
8617343f MT |
412 | int r = 0; |
413 | ||
414 | for (i = 0; !r && i < dev->nvqs; ++i) { | |
d5970055 | 415 | struct vhost_virtqueue *vq = dev->vqs + i; |
a8170e5e | 416 | hwaddr l; |
d5970055 MT |
417 | void *p; |
418 | ||
419 | if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) { | |
420 | continue; | |
421 | } | |
422 | l = vq->ring_size; | |
423 | p = cpu_physical_memory_map(vq->ring_phys, &l, 1); | |
424 | if (!p || l != vq->ring_size) { | |
425 | fprintf(stderr, "Unable to map ring buffer for ring %d\n", i); | |
8617343f | 426 | r = -ENOMEM; |
d5970055 MT |
427 | } |
428 | if (p != vq->ring) { | |
429 | fprintf(stderr, "Ring buffer relocated for ring %d\n", i); | |
8617343f | 430 | r = -EBUSY; |
d5970055 MT |
431 | } |
432 | cpu_physical_memory_unmap(p, l, 0, 0); | |
433 | } | |
8617343f | 434 | return r; |
d5970055 MT |
435 | } |
436 | ||
4e789564 MT |
437 | static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev, |
438 | uint64_t start_addr, | |
439 | uint64_t size) | |
440 | { | |
441 | int i, n = dev->mem->nregions; | |
442 | for (i = 0; i < n; ++i) { | |
443 | struct vhost_memory_region *reg = dev->mem->regions + i; | |
444 | if (ranges_overlap(reg->guest_phys_addr, reg->memory_size, | |
445 | start_addr, size)) { | |
446 | return reg; | |
447 | } | |
448 | } | |
449 | return NULL; | |
450 | } | |
451 | ||
452 | static bool vhost_dev_cmp_memory(struct vhost_dev *dev, | |
453 | uint64_t start_addr, | |
454 | uint64_t size, | |
455 | uint64_t uaddr) | |
456 | { | |
457 | struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size); | |
458 | uint64_t reglast; | |
459 | uint64_t memlast; | |
460 | ||
461 | if (!reg) { | |
462 | return true; | |
463 | } | |
464 | ||
465 | reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); | |
466 | memlast = range_get_last(start_addr, size); | |
467 | ||
468 | /* Need to extend region? */ | |
469 | if (start_addr < reg->guest_phys_addr || memlast > reglast) { | |
470 | return true; | |
471 | } | |
472 | /* userspace_addr changed? */ | |
473 | return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr; | |
474 | } | |
475 | ||
04097f7c AK |
476 | static void vhost_set_memory(MemoryListener *listener, |
477 | MemoryRegionSection *section, | |
478 | bool add) | |
d5970055 | 479 | { |
04097f7c AK |
480 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
481 | memory_listener); | |
a8170e5e | 482 | hwaddr start_addr = section->offset_within_address_space; |
052e87b0 | 483 | ram_addr_t size = int128_get64(section->size); |
2d1a35be PB |
484 | bool log_dirty = |
485 | memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION); | |
d5970055 MT |
486 | int s = offsetof(struct vhost_memory, regions) + |
487 | (dev->mem->nregions + 1) * sizeof dev->mem->regions[0]; | |
04097f7c AK |
488 | void *ram; |
489 | ||
7267c094 | 490 | dev->mem = g_realloc(dev->mem, s); |
d5970055 | 491 | |
f5a4e64f | 492 | if (log_dirty) { |
04097f7c | 493 | add = false; |
f5a4e64f MT |
494 | } |
495 | ||
d5970055 MT |
496 | assert(size); |
497 | ||
4e789564 | 498 | /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */ |
d743c382 | 499 | ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region; |
04097f7c AK |
500 | if (add) { |
501 | if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) { | |
4e789564 MT |
502 | /* Region exists with same address. Nothing to do. */ |
503 | return; | |
504 | } | |
505 | } else { | |
506 | if (!vhost_dev_find_reg(dev, start_addr, size)) { | |
507 | /* Removing region that we don't access. Nothing to do. */ | |
508 | return; | |
509 | } | |
510 | } | |
511 | ||
d5970055 | 512 | vhost_dev_unassign_memory(dev, start_addr, size); |
04097f7c | 513 | if (add) { |
d5970055 | 514 | /* Add given mapping, merging adjacent regions if any */ |
04097f7c | 515 | vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram); |
d5970055 MT |
516 | } else { |
517 | /* Remove old mapping for this memory, if any. */ | |
518 | vhost_dev_unassign_memory(dev, start_addr, size); | |
519 | } | |
af603142 NB |
520 | dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr); |
521 | dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1); | |
522 | dev->memory_changed = true; | |
2ce68e4c | 523 | used_memslots = dev->mem->nregions; |
af603142 NB |
524 | } |
525 | ||
526 | static bool vhost_section(MemoryRegionSection *section) | |
527 | { | |
528 | return memory_region_is_ram(section->mr); | |
529 | } | |
530 | ||
531 | static void vhost_begin(MemoryListener *listener) | |
532 | { | |
533 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
534 | memory_listener); | |
535 | dev->mem_changed_end_addr = 0; | |
536 | dev->mem_changed_start_addr = -1; | |
537 | } | |
d5970055 | 538 | |
af603142 NB |
539 | static void vhost_commit(MemoryListener *listener) |
540 | { | |
541 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
542 | memory_listener); | |
543 | hwaddr start_addr = 0; | |
544 | ram_addr_t size = 0; | |
545 | uint64_t log_size; | |
546 | int r; | |
547 | ||
548 | if (!dev->memory_changed) { | |
549 | return; | |
550 | } | |
d5970055 MT |
551 | if (!dev->started) { |
552 | return; | |
553 | } | |
af603142 NB |
554 | if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) { |
555 | return; | |
556 | } | |
d5970055 MT |
557 | |
558 | if (dev->started) { | |
af603142 NB |
559 | start_addr = dev->mem_changed_start_addr; |
560 | size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1; | |
561 | ||
d5970055 MT |
562 | r = vhost_verify_ring_mappings(dev, start_addr, size); |
563 | assert(r >= 0); | |
564 | } | |
565 | ||
566 | if (!dev->log_enabled) { | |
21e70425 | 567 | r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); |
d5970055 | 568 | assert(r >= 0); |
af603142 | 569 | dev->memory_changed = false; |
d5970055 MT |
570 | return; |
571 | } | |
572 | log_size = vhost_get_log_size(dev); | |
573 | /* We allocate an extra 4K bytes to log, | |
574 | * to reduce the * number of reallocations. */ | |
575 | #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) | |
576 | /* To log more, must increase log size before table update. */ | |
577 | if (dev->log_size < log_size) { | |
578 | vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); | |
579 | } | |
21e70425 | 580 | r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); |
d5970055 MT |
581 | assert(r >= 0); |
582 | /* To log less, can only decrease log size after table update. */ | |
583 | if (dev->log_size > log_size + VHOST_LOG_BUFFER) { | |
584 | vhost_dev_log_resize(dev, log_size); | |
585 | } | |
af603142 | 586 | dev->memory_changed = false; |
50c1e149 AK |
587 | } |
588 | ||
04097f7c AK |
589 | static void vhost_region_add(MemoryListener *listener, |
590 | MemoryRegionSection *section) | |
591 | { | |
2817b260 AK |
592 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
593 | memory_listener); | |
594 | ||
c49450b9 AK |
595 | if (!vhost_section(section)) { |
596 | return; | |
597 | } | |
598 | ||
2817b260 AK |
599 | ++dev->n_mem_sections; |
600 | dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections, | |
601 | dev->n_mem_sections); | |
602 | dev->mem_sections[dev->n_mem_sections - 1] = *section; | |
dfde4e6e | 603 | memory_region_ref(section->mr); |
04097f7c AK |
604 | vhost_set_memory(listener, section, true); |
605 | } | |
606 | ||
607 | static void vhost_region_del(MemoryListener *listener, | |
608 | MemoryRegionSection *section) | |
609 | { | |
2817b260 AK |
610 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
611 | memory_listener); | |
612 | int i; | |
613 | ||
c49450b9 AK |
614 | if (!vhost_section(section)) { |
615 | return; | |
616 | } | |
617 | ||
04097f7c | 618 | vhost_set_memory(listener, section, false); |
dfde4e6e | 619 | memory_region_unref(section->mr); |
2817b260 AK |
620 | for (i = 0; i < dev->n_mem_sections; ++i) { |
621 | if (dev->mem_sections[i].offset_within_address_space | |
622 | == section->offset_within_address_space) { | |
623 | --dev->n_mem_sections; | |
624 | memmove(&dev->mem_sections[i], &dev->mem_sections[i+1], | |
637f7a6a | 625 | (dev->n_mem_sections - i) * sizeof(*dev->mem_sections)); |
2817b260 AK |
626 | break; |
627 | } | |
628 | } | |
04097f7c AK |
629 | } |
630 | ||
50c1e149 AK |
631 | static void vhost_region_nop(MemoryListener *listener, |
632 | MemoryRegionSection *section) | |
633 | { | |
634 | } | |
635 | ||
d5970055 MT |
636 | static int vhost_virtqueue_set_addr(struct vhost_dev *dev, |
637 | struct vhost_virtqueue *vq, | |
638 | unsigned idx, bool enable_log) | |
639 | { | |
640 | struct vhost_vring_addr addr = { | |
641 | .index = idx, | |
2b3af999 SW |
642 | .desc_user_addr = (uint64_t)(unsigned long)vq->desc, |
643 | .avail_user_addr = (uint64_t)(unsigned long)vq->avail, | |
644 | .used_user_addr = (uint64_t)(unsigned long)vq->used, | |
d5970055 MT |
645 | .log_guest_addr = vq->used_phys, |
646 | .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0, | |
647 | }; | |
21e70425 | 648 | int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr); |
d5970055 MT |
649 | if (r < 0) { |
650 | return -errno; | |
651 | } | |
652 | return 0; | |
653 | } | |
654 | ||
655 | static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log) | |
656 | { | |
657 | uint64_t features = dev->acked_features; | |
658 | int r; | |
659 | if (enable_log) { | |
9a2ba823 | 660 | features |= 0x1ULL << VHOST_F_LOG_ALL; |
d5970055 | 661 | } |
21e70425 | 662 | r = dev->vhost_ops->vhost_set_features(dev, features); |
d5970055 MT |
663 | return r < 0 ? -errno : 0; |
664 | } | |
665 | ||
666 | static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) | |
667 | { | |
25a2a920 | 668 | int r, t, i, idx; |
d5970055 MT |
669 | r = vhost_dev_set_features(dev, enable_log); |
670 | if (r < 0) { | |
671 | goto err_features; | |
672 | } | |
673 | for (i = 0; i < dev->nvqs; ++i) { | |
25a2a920 TC |
674 | idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); |
675 | r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, | |
d5970055 MT |
676 | enable_log); |
677 | if (r < 0) { | |
678 | goto err_vq; | |
679 | } | |
680 | } | |
681 | return 0; | |
682 | err_vq: | |
683 | for (; i >= 0; --i) { | |
25a2a920 TC |
684 | idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); |
685 | t = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, | |
d5970055 MT |
686 | dev->log_enabled); |
687 | assert(t >= 0); | |
688 | } | |
689 | t = vhost_dev_set_features(dev, dev->log_enabled); | |
690 | assert(t >= 0); | |
691 | err_features: | |
692 | return r; | |
693 | } | |
694 | ||
04097f7c | 695 | static int vhost_migration_log(MemoryListener *listener, int enable) |
d5970055 | 696 | { |
04097f7c AK |
697 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
698 | memory_listener); | |
d5970055 MT |
699 | int r; |
700 | if (!!enable == dev->log_enabled) { | |
701 | return 0; | |
702 | } | |
703 | if (!dev->started) { | |
704 | dev->log_enabled = enable; | |
705 | return 0; | |
706 | } | |
707 | if (!enable) { | |
708 | r = vhost_dev_set_log(dev, false); | |
709 | if (r < 0) { | |
710 | return r; | |
711 | } | |
309750fa | 712 | vhost_log_put(dev, false); |
d5970055 MT |
713 | dev->log = NULL; |
714 | dev->log_size = 0; | |
715 | } else { | |
716 | vhost_dev_log_resize(dev, vhost_get_log_size(dev)); | |
717 | r = vhost_dev_set_log(dev, true); | |
718 | if (r < 0) { | |
719 | return r; | |
720 | } | |
721 | } | |
722 | dev->log_enabled = enable; | |
723 | return 0; | |
724 | } | |
725 | ||
04097f7c AK |
726 | static void vhost_log_global_start(MemoryListener *listener) |
727 | { | |
728 | int r; | |
729 | ||
730 | r = vhost_migration_log(listener, true); | |
731 | if (r < 0) { | |
732 | abort(); | |
733 | } | |
734 | } | |
735 | ||
736 | static void vhost_log_global_stop(MemoryListener *listener) | |
737 | { | |
738 | int r; | |
739 | ||
740 | r = vhost_migration_log(listener, false); | |
741 | if (r < 0) { | |
742 | abort(); | |
743 | } | |
744 | } | |
745 | ||
746 | static void vhost_log_start(MemoryListener *listener, | |
b2dfd71c PB |
747 | MemoryRegionSection *section, |
748 | int old, int new) | |
04097f7c AK |
749 | { |
750 | /* FIXME: implement */ | |
751 | } | |
752 | ||
753 | static void vhost_log_stop(MemoryListener *listener, | |
b2dfd71c PB |
754 | MemoryRegionSection *section, |
755 | int old, int new) | |
04097f7c AK |
756 | { |
757 | /* FIXME: implement */ | |
758 | } | |
759 | ||
46f70ff1 GK |
760 | /* The vhost driver natively knows how to handle the vrings of non |
761 | * cross-endian legacy devices and modern devices. Only legacy devices | |
762 | * exposed to a bi-endian guest may require the vhost driver to use a | |
763 | * specific endianness. | |
764 | */ | |
a122ab24 GK |
765 | static inline bool vhost_needs_vring_endian(VirtIODevice *vdev) |
766 | { | |
e5848123 GK |
767 | if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
768 | return false; | |
769 | } | |
a122ab24 | 770 | #ifdef HOST_WORDS_BIGENDIAN |
46f70ff1 | 771 | return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE; |
a122ab24 | 772 | #else |
46f70ff1 | 773 | return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG; |
a122ab24 | 774 | #endif |
a122ab24 GK |
775 | } |
776 | ||
04b7a152 GK |
777 | static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev, |
778 | bool is_big_endian, | |
779 | int vhost_vq_index) | |
780 | { | |
781 | struct vhost_vring_state s = { | |
782 | .index = vhost_vq_index, | |
783 | .num = is_big_endian | |
784 | }; | |
785 | ||
21e70425 | 786 | if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) { |
04b7a152 GK |
787 | return 0; |
788 | } | |
789 | ||
790 | if (errno == ENOTTY) { | |
791 | error_report("vhost does not support cross-endian"); | |
792 | return -ENOSYS; | |
793 | } | |
794 | ||
795 | return -errno; | |
796 | } | |
797 | ||
f56a1247 | 798 | static int vhost_virtqueue_start(struct vhost_dev *dev, |
d5970055 MT |
799 | struct VirtIODevice *vdev, |
800 | struct vhost_virtqueue *vq, | |
801 | unsigned idx) | |
802 | { | |
a8170e5e | 803 | hwaddr s, l, a; |
d5970055 | 804 | int r; |
21e70425 | 805 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); |
d5970055 | 806 | struct vhost_vring_file file = { |
a9f98bb5 | 807 | .index = vhost_vq_index |
d5970055 MT |
808 | }; |
809 | struct vhost_vring_state state = { | |
a9f98bb5 | 810 | .index = vhost_vq_index |
d5970055 MT |
811 | }; |
812 | struct VirtQueue *vvq = virtio_get_queue(vdev, idx); | |
813 | ||
a9f98bb5 | 814 | |
d5970055 | 815 | vq->num = state.num = virtio_queue_get_num(vdev, idx); |
21e70425 | 816 | r = dev->vhost_ops->vhost_set_vring_num(dev, &state); |
d5970055 MT |
817 | if (r) { |
818 | return -errno; | |
819 | } | |
820 | ||
821 | state.num = virtio_queue_get_last_avail_idx(vdev, idx); | |
21e70425 | 822 | r = dev->vhost_ops->vhost_set_vring_base(dev, &state); |
d5970055 MT |
823 | if (r) { |
824 | return -errno; | |
825 | } | |
826 | ||
e5848123 | 827 | if (vhost_needs_vring_endian(vdev)) { |
04b7a152 GK |
828 | r = vhost_virtqueue_set_vring_endian_legacy(dev, |
829 | virtio_is_big_endian(vdev), | |
830 | vhost_vq_index); | |
831 | if (r) { | |
832 | return -errno; | |
833 | } | |
834 | } | |
835 | ||
d5970055 MT |
836 | s = l = virtio_queue_get_desc_size(vdev, idx); |
837 | a = virtio_queue_get_desc_addr(vdev, idx); | |
838 | vq->desc = cpu_physical_memory_map(a, &l, 0); | |
839 | if (!vq->desc || l != s) { | |
840 | r = -ENOMEM; | |
841 | goto fail_alloc_desc; | |
842 | } | |
843 | s = l = virtio_queue_get_avail_size(vdev, idx); | |
844 | a = virtio_queue_get_avail_addr(vdev, idx); | |
845 | vq->avail = cpu_physical_memory_map(a, &l, 0); | |
846 | if (!vq->avail || l != s) { | |
847 | r = -ENOMEM; | |
848 | goto fail_alloc_avail; | |
849 | } | |
850 | vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); | |
851 | vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); | |
852 | vq->used = cpu_physical_memory_map(a, &l, 1); | |
853 | if (!vq->used || l != s) { | |
854 | r = -ENOMEM; | |
855 | goto fail_alloc_used; | |
856 | } | |
857 | ||
858 | vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx); | |
859 | vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx); | |
860 | vq->ring = cpu_physical_memory_map(a, &l, 1); | |
861 | if (!vq->ring || l != s) { | |
862 | r = -ENOMEM; | |
863 | goto fail_alloc_ring; | |
864 | } | |
865 | ||
a9f98bb5 | 866 | r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); |
d5970055 MT |
867 | if (r < 0) { |
868 | r = -errno; | |
869 | goto fail_alloc; | |
870 | } | |
a9f98bb5 | 871 | |
d5970055 | 872 | file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); |
21e70425 | 873 | r = dev->vhost_ops->vhost_set_vring_kick(dev, &file); |
d5970055 | 874 | if (r) { |
c8852121 | 875 | r = -errno; |
d5970055 MT |
876 | goto fail_kick; |
877 | } | |
878 | ||
f56a1247 MT |
879 | /* Clear and discard previous events if any. */ |
880 | event_notifier_test_and_clear(&vq->masked_notifier); | |
d5970055 | 881 | |
5669655a VK |
882 | /* Init vring in unmasked state, unless guest_notifier_mask |
883 | * will do it later. | |
884 | */ | |
885 | if (!vdev->use_guest_notifier_mask) { | |
886 | /* TODO: check and handle errors. */ | |
887 | vhost_virtqueue_mask(dev, vdev, idx, false); | |
888 | } | |
889 | ||
d5970055 MT |
890 | return 0; |
891 | ||
d5970055 | 892 | fail_kick: |
d5970055 MT |
893 | fail_alloc: |
894 | cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), | |
895 | 0, 0); | |
896 | fail_alloc_ring: | |
897 | cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), | |
898 | 0, 0); | |
899 | fail_alloc_used: | |
900 | cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), | |
901 | 0, 0); | |
902 | fail_alloc_avail: | |
903 | cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), | |
904 | 0, 0); | |
905 | fail_alloc_desc: | |
906 | return r; | |
907 | } | |
908 | ||
f56a1247 | 909 | static void vhost_virtqueue_stop(struct vhost_dev *dev, |
d5970055 MT |
910 | struct VirtIODevice *vdev, |
911 | struct vhost_virtqueue *vq, | |
912 | unsigned idx) | |
913 | { | |
21e70425 | 914 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); |
d5970055 | 915 | struct vhost_vring_state state = { |
04b7a152 | 916 | .index = vhost_vq_index, |
d5970055 MT |
917 | }; |
918 | int r; | |
fc57fd99 | 919 | |
21e70425 | 920 | r = dev->vhost_ops->vhost_get_vring_base(dev, &state); |
d5970055 MT |
921 | if (r < 0) { |
922 | fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r); | |
923 | fflush(stderr); | |
924 | } | |
925 | virtio_queue_set_last_avail_idx(vdev, idx, state.num); | |
3561ba14 | 926 | virtio_queue_invalidate_signalled_used(vdev, idx); |
04b7a152 GK |
927 | |
928 | /* In the cross-endian case, we need to reset the vring endianness to | |
929 | * native as legacy devices expect so by default. | |
930 | */ | |
e5848123 | 931 | if (vhost_needs_vring_endian(vdev)) { |
04b7a152 GK |
932 | r = vhost_virtqueue_set_vring_endian_legacy(dev, |
933 | !virtio_is_big_endian(vdev), | |
934 | vhost_vq_index); | |
935 | if (r < 0) { | |
936 | error_report("failed to reset vring endianness"); | |
937 | } | |
938 | } | |
939 | ||
d5970055 MT |
940 | assert (r >= 0); |
941 | cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), | |
942 | 0, virtio_queue_get_ring_size(vdev, idx)); | |
943 | cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), | |
944 | 1, virtio_queue_get_used_size(vdev, idx)); | |
945 | cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), | |
946 | 0, virtio_queue_get_avail_size(vdev, idx)); | |
947 | cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), | |
948 | 0, virtio_queue_get_desc_size(vdev, idx)); | |
949 | } | |
950 | ||
80a1ea37 AK |
951 | static void vhost_eventfd_add(MemoryListener *listener, |
952 | MemoryRegionSection *section, | |
753d5e14 | 953 | bool match_data, uint64_t data, EventNotifier *e) |
80a1ea37 AK |
954 | { |
955 | } | |
956 | ||
957 | static void vhost_eventfd_del(MemoryListener *listener, | |
958 | MemoryRegionSection *section, | |
753d5e14 | 959 | bool match_data, uint64_t data, EventNotifier *e) |
80a1ea37 AK |
960 | { |
961 | } | |
962 | ||
f56a1247 MT |
963 | static int vhost_virtqueue_init(struct vhost_dev *dev, |
964 | struct vhost_virtqueue *vq, int n) | |
965 | { | |
21e70425 | 966 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); |
f56a1247 | 967 | struct vhost_vring_file file = { |
b931bfbf | 968 | .index = vhost_vq_index, |
f56a1247 MT |
969 | }; |
970 | int r = event_notifier_init(&vq->masked_notifier, 0); | |
971 | if (r < 0) { | |
972 | return r; | |
973 | } | |
974 | ||
975 | file.fd = event_notifier_get_fd(&vq->masked_notifier); | |
21e70425 | 976 | r = dev->vhost_ops->vhost_set_vring_call(dev, &file); |
f56a1247 MT |
977 | if (r) { |
978 | r = -errno; | |
979 | goto fail_call; | |
980 | } | |
981 | return 0; | |
982 | fail_call: | |
983 | event_notifier_cleanup(&vq->masked_notifier); | |
984 | return r; | |
985 | } | |
986 | ||
987 | static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) | |
988 | { | |
989 | event_notifier_cleanup(&vq->masked_notifier); | |
990 | } | |
991 | ||
81647a65 | 992 | int vhost_dev_init(struct vhost_dev *hdev, void *opaque, |
1e7398a1 | 993 | VhostBackendType backend_type) |
d5970055 MT |
994 | { |
995 | uint64_t features; | |
f56a1247 | 996 | int i, r; |
81647a65 | 997 | |
d2fc4402 MAL |
998 | hdev->migration_blocker = NULL; |
999 | ||
1a1bfac9 | 1000 | if (vhost_set_backend_type(hdev, backend_type) < 0) { |
b19ca188 | 1001 | close((uintptr_t)opaque); |
1a1bfac9 NN |
1002 | return -1; |
1003 | } | |
1004 | ||
24d1eb33 | 1005 | if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) { |
b19ca188 | 1006 | close((uintptr_t)opaque); |
24d1eb33 NN |
1007 | return -errno; |
1008 | } | |
1009 | ||
aebf8168 IM |
1010 | if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) { |
1011 | fprintf(stderr, "vhost backend memory slots limit is less" | |
1012 | " than current number of present memory slots\n"); | |
1013 | close((uintptr_t)opaque); | |
1014 | return -1; | |
1015 | } | |
2ce68e4c IM |
1016 | QLIST_INSERT_HEAD(&vhost_devices, hdev, entry); |
1017 | ||
21e70425 | 1018 | r = hdev->vhost_ops->vhost_set_owner(hdev); |
d5970055 MT |
1019 | if (r < 0) { |
1020 | goto fail; | |
1021 | } | |
1022 | ||
21e70425 | 1023 | r = hdev->vhost_ops->vhost_get_features(hdev, &features); |
d5970055 MT |
1024 | if (r < 0) { |
1025 | goto fail; | |
1026 | } | |
f56a1247 MT |
1027 | |
1028 | for (i = 0; i < hdev->nvqs; ++i) { | |
b931bfbf | 1029 | r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i); |
f56a1247 MT |
1030 | if (r < 0) { |
1031 | goto fail_vq; | |
1032 | } | |
1033 | } | |
d5970055 MT |
1034 | hdev->features = features; |
1035 | ||
04097f7c | 1036 | hdev->memory_listener = (MemoryListener) { |
50c1e149 AK |
1037 | .begin = vhost_begin, |
1038 | .commit = vhost_commit, | |
04097f7c AK |
1039 | .region_add = vhost_region_add, |
1040 | .region_del = vhost_region_del, | |
50c1e149 | 1041 | .region_nop = vhost_region_nop, |
04097f7c AK |
1042 | .log_start = vhost_log_start, |
1043 | .log_stop = vhost_log_stop, | |
1044 | .log_sync = vhost_log_sync, | |
1045 | .log_global_start = vhost_log_global_start, | |
1046 | .log_global_stop = vhost_log_global_stop, | |
80a1ea37 AK |
1047 | .eventfd_add = vhost_eventfd_add, |
1048 | .eventfd_del = vhost_eventfd_del, | |
72e22d2f | 1049 | .priority = 10 |
04097f7c | 1050 | }; |
d2fc4402 MAL |
1051 | |
1052 | if (hdev->migration_blocker == NULL) { | |
1053 | if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { | |
1054 | error_setg(&hdev->migration_blocker, | |
1055 | "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature."); | |
31190ed7 MAL |
1056 | } else if (!qemu_memfd_check()) { |
1057 | error_setg(&hdev->migration_blocker, | |
1058 | "Migration disabled: failed to allocate shared memory"); | |
d2fc4402 MAL |
1059 | } |
1060 | } | |
1061 | ||
1062 | if (hdev->migration_blocker != NULL) { | |
7145872e MT |
1063 | migrate_add_blocker(hdev->migration_blocker); |
1064 | } | |
d2fc4402 | 1065 | |
7267c094 | 1066 | hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); |
2817b260 AK |
1067 | hdev->n_mem_sections = 0; |
1068 | hdev->mem_sections = NULL; | |
d5970055 MT |
1069 | hdev->log = NULL; |
1070 | hdev->log_size = 0; | |
1071 | hdev->log_enabled = false; | |
1072 | hdev->started = false; | |
af603142 | 1073 | hdev->memory_changed = false; |
f6790af6 | 1074 | memory_listener_register(&hdev->memory_listener, &address_space_memory); |
d5970055 | 1075 | return 0; |
f56a1247 MT |
1076 | fail_vq: |
1077 | while (--i >= 0) { | |
1078 | vhost_virtqueue_cleanup(hdev->vqs + i); | |
1079 | } | |
d5970055 MT |
1080 | fail: |
1081 | r = -errno; | |
24d1eb33 | 1082 | hdev->vhost_ops->vhost_backend_cleanup(hdev); |
2ce68e4c | 1083 | QLIST_REMOVE(hdev, entry); |
d5970055 MT |
1084 | return r; |
1085 | } | |
1086 | ||
1087 | void vhost_dev_cleanup(struct vhost_dev *hdev) | |
1088 | { | |
f56a1247 MT |
1089 | int i; |
1090 | for (i = 0; i < hdev->nvqs; ++i) { | |
1091 | vhost_virtqueue_cleanup(hdev->vqs + i); | |
1092 | } | |
04097f7c | 1093 | memory_listener_unregister(&hdev->memory_listener); |
7145872e MT |
1094 | if (hdev->migration_blocker) { |
1095 | migrate_del_blocker(hdev->migration_blocker); | |
1096 | error_free(hdev->migration_blocker); | |
1097 | } | |
7267c094 | 1098 | g_free(hdev->mem); |
2817b260 | 1099 | g_free(hdev->mem_sections); |
24d1eb33 | 1100 | hdev->vhost_ops->vhost_backend_cleanup(hdev); |
2ce68e4c | 1101 | QLIST_REMOVE(hdev, entry); |
d5970055 MT |
1102 | } |
1103 | ||
b0b3db79 MT |
1104 | /* Stop processing guest IO notifications in qemu. |
1105 | * Start processing them in vhost in kernel. | |
1106 | */ | |
1107 | int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) | |
1108 | { | |
1c819449 FK |
1109 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); |
1110 | VirtioBusState *vbus = VIRTIO_BUS(qbus); | |
1111 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); | |
16617e36 | 1112 | int i, r, e; |
1c819449 | 1113 | if (!k->set_host_notifier) { |
b0b3db79 MT |
1114 | fprintf(stderr, "binding does not support host notifiers\n"); |
1115 | r = -ENOSYS; | |
1116 | goto fail; | |
1117 | } | |
1118 | ||
1119 | for (i = 0; i < hdev->nvqs; ++i) { | |
1c819449 | 1120 | r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true); |
b0b3db79 MT |
1121 | if (r < 0) { |
1122 | fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r); | |
1123 | goto fail_vq; | |
1124 | } | |
1125 | } | |
1126 | ||
1127 | return 0; | |
1128 | fail_vq: | |
1129 | while (--i >= 0) { | |
16617e36 JW |
1130 | e = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false); |
1131 | if (e < 0) { | |
b0b3db79 MT |
1132 | fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r); |
1133 | fflush(stderr); | |
1134 | } | |
16617e36 | 1135 | assert (e >= 0); |
b0b3db79 MT |
1136 | } |
1137 | fail: | |
1138 | return r; | |
1139 | } | |
1140 | ||
1141 | /* Stop processing guest IO notifications in vhost. | |
1142 | * Start processing them in qemu. | |
1143 | * This might actually run the qemu handlers right away, | |
1144 | * so virtio in qemu must be completely setup when this is called. | |
1145 | */ | |
1146 | void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) | |
1147 | { | |
1c819449 FK |
1148 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); |
1149 | VirtioBusState *vbus = VIRTIO_BUS(qbus); | |
1150 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); | |
b0b3db79 MT |
1151 | int i, r; |
1152 | ||
1153 | for (i = 0; i < hdev->nvqs; ++i) { | |
1c819449 | 1154 | r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false); |
b0b3db79 MT |
1155 | if (r < 0) { |
1156 | fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r); | |
1157 | fflush(stderr); | |
1158 | } | |
1159 | assert (r >= 0); | |
1160 | } | |
1161 | } | |
1162 | ||
f56a1247 MT |
1163 | /* Test and clear event pending status. |
1164 | * Should be called after unmask to avoid losing events. | |
1165 | */ | |
1166 | bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n) | |
1167 | { | |
a9f98bb5 | 1168 | struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index; |
a9f98bb5 | 1169 | assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); |
f56a1247 MT |
1170 | return event_notifier_test_and_clear(&vq->masked_notifier); |
1171 | } | |
1172 | ||
1173 | /* Mask/unmask events from this vq. */ | |
1174 | void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, | |
1175 | bool mask) | |
1176 | { | |
1177 | struct VirtQueue *vvq = virtio_get_queue(vdev, n); | |
a9f98bb5 | 1178 | int r, index = n - hdev->vq_index; |
fc57fd99 | 1179 | struct vhost_vring_file file; |
f56a1247 | 1180 | |
f56a1247 | 1181 | if (mask) { |
5669655a | 1182 | assert(vdev->use_guest_notifier_mask); |
a9f98bb5 | 1183 | file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier); |
f56a1247 MT |
1184 | } else { |
1185 | file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); | |
1186 | } | |
fc57fd99 | 1187 | |
21e70425 MAL |
1188 | file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n); |
1189 | r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file); | |
f56a1247 MT |
1190 | assert(r >= 0); |
1191 | } | |
1192 | ||
9a2ba823 CH |
1193 | uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, |
1194 | uint64_t features) | |
2e6d46d7 NN |
1195 | { |
1196 | const int *bit = feature_bits; | |
1197 | while (*bit != VHOST_INVALID_FEATURE_BIT) { | |
9a2ba823 | 1198 | uint64_t bit_mask = (1ULL << *bit); |
2e6d46d7 NN |
1199 | if (!(hdev->features & bit_mask)) { |
1200 | features &= ~bit_mask; | |
1201 | } | |
1202 | bit++; | |
1203 | } | |
1204 | return features; | |
1205 | } | |
1206 | ||
1207 | void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, | |
9a2ba823 | 1208 | uint64_t features) |
2e6d46d7 NN |
1209 | { |
1210 | const int *bit = feature_bits; | |
1211 | while (*bit != VHOST_INVALID_FEATURE_BIT) { | |
9a2ba823 | 1212 | uint64_t bit_mask = (1ULL << *bit); |
2e6d46d7 NN |
1213 | if (features & bit_mask) { |
1214 | hdev->acked_features |= bit_mask; | |
1215 | } | |
1216 | bit++; | |
1217 | } | |
1218 | } | |
1219 | ||
b0b3db79 | 1220 | /* Host notifiers must be enabled at this point. */ |
d5970055 MT |
1221 | int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) |
1222 | { | |
1223 | int i, r; | |
24f4fe34 MT |
1224 | |
1225 | hdev->started = true; | |
1226 | ||
d5970055 MT |
1227 | r = vhost_dev_set_features(hdev, hdev->log_enabled); |
1228 | if (r < 0) { | |
54dd9321 | 1229 | goto fail_features; |
d5970055 | 1230 | } |
21e70425 | 1231 | r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); |
d5970055 MT |
1232 | if (r < 0) { |
1233 | r = -errno; | |
54dd9321 | 1234 | goto fail_mem; |
d5970055 | 1235 | } |
d154e0ba | 1236 | for (i = 0; i < hdev->nvqs; ++i) { |
f56a1247 | 1237 | r = vhost_virtqueue_start(hdev, |
a9f98bb5 JW |
1238 | vdev, |
1239 | hdev->vqs + i, | |
1240 | hdev->vq_index + i); | |
d154e0ba MT |
1241 | if (r < 0) { |
1242 | goto fail_vq; | |
1243 | } | |
1244 | } | |
1245 | ||
d5970055 | 1246 | if (hdev->log_enabled) { |
e05ca820 MT |
1247 | uint64_t log_base; |
1248 | ||
d5970055 | 1249 | hdev->log_size = vhost_get_log_size(hdev); |
15324404 MAL |
1250 | hdev->log = vhost_log_get(hdev->log_size, |
1251 | vhost_dev_log_is_shared(hdev)); | |
309750fa | 1252 | log_base = (uintptr_t)hdev->log->log; |
c2bea314 | 1253 | r = hdev->vhost_ops->vhost_set_log_base(hdev, |
9a78a5dd MAL |
1254 | hdev->log_size ? log_base : 0, |
1255 | hdev->log); | |
d5970055 MT |
1256 | if (r < 0) { |
1257 | r = -errno; | |
54dd9321 | 1258 | goto fail_log; |
d5970055 MT |
1259 | } |
1260 | } | |
d154e0ba | 1261 | |
d5970055 | 1262 | return 0; |
54dd9321 | 1263 | fail_log: |
24bfa207 | 1264 | vhost_log_put(hdev, false); |
d5970055 MT |
1265 | fail_vq: |
1266 | while (--i >= 0) { | |
f56a1247 | 1267 | vhost_virtqueue_stop(hdev, |
a9f98bb5 JW |
1268 | vdev, |
1269 | hdev->vqs + i, | |
1270 | hdev->vq_index + i); | |
d5970055 | 1271 | } |
a9f98bb5 | 1272 | i = hdev->nvqs; |
54dd9321 MT |
1273 | fail_mem: |
1274 | fail_features: | |
24f4fe34 MT |
1275 | |
1276 | hdev->started = false; | |
d5970055 MT |
1277 | return r; |
1278 | } | |
1279 | ||
b0b3db79 | 1280 | /* Host notifiers must be enabled at this point. */ |
d5970055 MT |
1281 | void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) |
1282 | { | |
a9f98bb5 | 1283 | int i; |
54dd9321 | 1284 | |
d5970055 | 1285 | for (i = 0; i < hdev->nvqs; ++i) { |
f56a1247 | 1286 | vhost_virtqueue_stop(hdev, |
a9f98bb5 JW |
1287 | vdev, |
1288 | hdev->vqs + i, | |
1289 | hdev->vq_index + i); | |
d5970055 | 1290 | } |
54dd9321 | 1291 | |
309750fa | 1292 | vhost_log_put(hdev, true); |
d5970055 | 1293 | hdev->started = false; |
c1be973a | 1294 | hdev->log = NULL; |
d5970055 MT |
1295 | hdev->log_size = 0; |
1296 | } | |
a9f98bb5 | 1297 |