]>
Commit | Line | Data |
---|---|---|
d5970055 MT |
1 | /* |
2 | * vhost support | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2010 | |
5 | * | |
6 | * Authors: | |
7 | * Michael S. Tsirkin <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
6b620ca3 PB |
11 | * |
12 | * Contributions after 2012-01-13 are licensed under the terms of the | |
13 | * GNU GPL, version 2 or (at your option) any later version. | |
d5970055 MT |
14 | */ |
15 | ||
9b8bfe21 | 16 | #include "qemu/osdep.h" |
da34e65c | 17 | #include "qapi/error.h" |
0d09e41a | 18 | #include "hw/virtio/vhost.h" |
d5970055 | 19 | #include "hw/hw.h" |
5444e768 | 20 | #include "qemu/atomic.h" |
1de7afc9 | 21 | #include "qemu/range.h" |
04b7a152 | 22 | #include "qemu/error-report.h" |
15324404 | 23 | #include "qemu/memfd.h" |
11078ae3 | 24 | #include <linux/vhost.h> |
022c62cb | 25 | #include "exec/address-spaces.h" |
1c819449 | 26 | #include "hw/virtio/virtio-bus.h" |
04b7a152 | 27 | #include "hw/virtio/virtio-access.h" |
795c40b8 | 28 | #include "migration/blocker.h" |
c471ad0e | 29 | #include "sysemu/dma.h" |
aa3c40f6 | 30 | #include "trace.h" |
d5970055 | 31 | |
162bba7f MAL |
32 | /* enabled until disconnected backend stabilizes */ |
33 | #define _VHOST_DEBUG 1 | |
34 | ||
35 | #ifdef _VHOST_DEBUG | |
36 | #define VHOST_OPS_DEBUG(fmt, ...) \ | |
37 | do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \ | |
38 | strerror(errno), errno); } while (0) | |
39 | #else | |
40 | #define VHOST_OPS_DEBUG(fmt, ...) \ | |
41 | do { } while (0) | |
42 | #endif | |
43 | ||
309750fa | 44 | static struct vhost_log *vhost_log; |
15324404 | 45 | static struct vhost_log *vhost_log_shm; |
309750fa | 46 | |
2ce68e4c IM |
47 | static unsigned int used_memslots; |
48 | static QLIST_HEAD(, vhost_dev) vhost_devices = | |
49 | QLIST_HEAD_INITIALIZER(vhost_devices); | |
50 | ||
51 | bool vhost_has_free_slot(void) | |
52 | { | |
53 | unsigned int slots_limit = ~0U; | |
54 | struct vhost_dev *hdev; | |
55 | ||
56 | QLIST_FOREACH(hdev, &vhost_devices, entry) { | |
57 | unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); | |
58 | slots_limit = MIN(slots_limit, r); | |
59 | } | |
60 | return slots_limit > used_memslots; | |
61 | } | |
62 | ||
d5970055 | 63 | static void vhost_dev_sync_region(struct vhost_dev *dev, |
2817b260 | 64 | MemoryRegionSection *section, |
d5970055 MT |
65 | uint64_t mfirst, uint64_t mlast, |
66 | uint64_t rfirst, uint64_t rlast) | |
67 | { | |
309750fa JW |
68 | vhost_log_chunk_t *log = dev->log->log; |
69 | ||
d5970055 MT |
70 | uint64_t start = MAX(mfirst, rfirst); |
71 | uint64_t end = MIN(mlast, rlast); | |
309750fa JW |
72 | vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK; |
73 | vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1; | |
33c5793b | 74 | uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK); |
d5970055 | 75 | |
d5970055 MT |
76 | if (end < start) { |
77 | return; | |
78 | } | |
e314672a | 79 | assert(end / VHOST_LOG_CHUNK < dev->log_size); |
fbbaf9ae | 80 | assert(start / VHOST_LOG_CHUNK < dev->log_size); |
e314672a | 81 | |
d5970055 MT |
82 | for (;from < to; ++from) { |
83 | vhost_log_chunk_t log; | |
d5970055 MT |
84 | /* We first check with non-atomic: much cheaper, |
85 | * and we expect non-dirty to be the common case. */ | |
86 | if (!*from) { | |
0c600ce2 | 87 | addr += VHOST_LOG_CHUNK; |
d5970055 MT |
88 | continue; |
89 | } | |
5444e768 PB |
90 | /* Data must be read atomically. We don't really need barrier semantics |
91 | * but it's easier to use atomic_* than roll our own. */ | |
92 | log = atomic_xchg(from, 0); | |
747eb78b NC |
93 | while (log) { |
94 | int bit = ctzl(log); | |
6b37a23d MT |
95 | hwaddr page_addr; |
96 | hwaddr section_offset; | |
97 | hwaddr mr_offset; | |
6b37a23d MT |
98 | page_addr = addr + bit * VHOST_LOG_PAGE; |
99 | section_offset = page_addr - section->offset_within_address_space; | |
100 | mr_offset = section_offset + section->offset_within_region; | |
101 | memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE); | |
d5970055 MT |
102 | log &= ~(0x1ull << bit); |
103 | } | |
104 | addr += VHOST_LOG_CHUNK; | |
105 | } | |
106 | } | |
107 | ||
04097f7c | 108 | static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, |
2817b260 | 109 | MemoryRegionSection *section, |
6b37a23d MT |
110 | hwaddr first, |
111 | hwaddr last) | |
d5970055 | 112 | { |
d5970055 | 113 | int i; |
6b37a23d MT |
114 | hwaddr start_addr; |
115 | hwaddr end_addr; | |
04097f7c | 116 | |
d5970055 MT |
117 | if (!dev->log_enabled || !dev->started) { |
118 | return 0; | |
119 | } | |
6b37a23d | 120 | start_addr = section->offset_within_address_space; |
052e87b0 | 121 | end_addr = range_get_last(start_addr, int128_get64(section->size)); |
6b37a23d MT |
122 | start_addr = MAX(first, start_addr); |
123 | end_addr = MIN(last, end_addr); | |
124 | ||
d5970055 MT |
125 | for (i = 0; i < dev->mem->nregions; ++i) { |
126 | struct vhost_memory_region *reg = dev->mem->regions + i; | |
2817b260 | 127 | vhost_dev_sync_region(dev, section, start_addr, end_addr, |
d5970055 MT |
128 | reg->guest_phys_addr, |
129 | range_get_last(reg->guest_phys_addr, | |
130 | reg->memory_size)); | |
131 | } | |
132 | for (i = 0; i < dev->nvqs; ++i) { | |
133 | struct vhost_virtqueue *vq = dev->vqs + i; | |
2817b260 | 134 | vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys, |
d5970055 MT |
135 | range_get_last(vq->used_phys, vq->used_size)); |
136 | } | |
137 | return 0; | |
138 | } | |
139 | ||
04097f7c AK |
140 | static void vhost_log_sync(MemoryListener *listener, |
141 | MemoryRegionSection *section) | |
142 | { | |
143 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
144 | memory_listener); | |
6b37a23d MT |
145 | vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL); |
146 | } | |
04097f7c | 147 | |
6b37a23d MT |
148 | static void vhost_log_sync_range(struct vhost_dev *dev, |
149 | hwaddr first, hwaddr last) | |
150 | { | |
151 | int i; | |
152 | /* FIXME: this is N^2 in number of sections */ | |
153 | for (i = 0; i < dev->n_mem_sections; ++i) { | |
154 | MemoryRegionSection *section = &dev->mem_sections[i]; | |
155 | vhost_sync_dirty_bitmap(dev, section, first, last); | |
156 | } | |
04097f7c AK |
157 | } |
158 | ||
d5970055 MT |
159 | static uint64_t vhost_get_log_size(struct vhost_dev *dev) |
160 | { | |
161 | uint64_t log_size = 0; | |
162 | int i; | |
163 | for (i = 0; i < dev->mem->nregions; ++i) { | |
164 | struct vhost_memory_region *reg = dev->mem->regions + i; | |
165 | uint64_t last = range_get_last(reg->guest_phys_addr, | |
166 | reg->memory_size); | |
167 | log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); | |
168 | } | |
169 | for (i = 0; i < dev->nvqs; ++i) { | |
170 | struct vhost_virtqueue *vq = dev->vqs + i; | |
171 | uint64_t last = vq->used_phys + vq->used_size - 1; | |
172 | log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); | |
173 | } | |
174 | return log_size; | |
175 | } | |
15324404 MAL |
176 | |
177 | static struct vhost_log *vhost_log_alloc(uint64_t size, bool share) | |
309750fa | 178 | { |
0f2956f9 | 179 | Error *err = NULL; |
15324404 MAL |
180 | struct vhost_log *log; |
181 | uint64_t logsize = size * sizeof(*(log->log)); | |
182 | int fd = -1; | |
183 | ||
184 | log = g_new0(struct vhost_log, 1); | |
185 | if (share) { | |
186 | log->log = qemu_memfd_alloc("vhost-log", logsize, | |
187 | F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, | |
0f2956f9 MAL |
188 | &fd, &err); |
189 | if (err) { | |
190 | error_report_err(err); | |
191 | g_free(log); | |
192 | return NULL; | |
193 | } | |
15324404 MAL |
194 | memset(log->log, 0, logsize); |
195 | } else { | |
196 | log->log = g_malloc0(logsize); | |
197 | } | |
309750fa JW |
198 | |
199 | log->size = size; | |
200 | log->refcnt = 1; | |
15324404 | 201 | log->fd = fd; |
309750fa JW |
202 | |
203 | return log; | |
204 | } | |
205 | ||
15324404 | 206 | static struct vhost_log *vhost_log_get(uint64_t size, bool share) |
309750fa | 207 | { |
15324404 MAL |
208 | struct vhost_log *log = share ? vhost_log_shm : vhost_log; |
209 | ||
210 | if (!log || log->size != size) { | |
211 | log = vhost_log_alloc(size, share); | |
212 | if (share) { | |
213 | vhost_log_shm = log; | |
214 | } else { | |
215 | vhost_log = log; | |
216 | } | |
309750fa | 217 | } else { |
15324404 | 218 | ++log->refcnt; |
309750fa JW |
219 | } |
220 | ||
15324404 | 221 | return log; |
309750fa JW |
222 | } |
223 | ||
224 | static void vhost_log_put(struct vhost_dev *dev, bool sync) | |
225 | { | |
226 | struct vhost_log *log = dev->log; | |
227 | ||
228 | if (!log) { | |
229 | return; | |
230 | } | |
231 | ||
232 | --log->refcnt; | |
233 | if (log->refcnt == 0) { | |
234 | /* Sync only the range covered by the old log */ | |
235 | if (dev->log_size && sync) { | |
236 | vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1); | |
237 | } | |
15324404 | 238 | |
309750fa | 239 | if (vhost_log == log) { |
15324404 | 240 | g_free(log->log); |
309750fa | 241 | vhost_log = NULL; |
15324404 MAL |
242 | } else if (vhost_log_shm == log) { |
243 | qemu_memfd_free(log->log, log->size * sizeof(*(log->log)), | |
244 | log->fd); | |
245 | vhost_log_shm = NULL; | |
309750fa | 246 | } |
15324404 | 247 | |
309750fa JW |
248 | g_free(log); |
249 | } | |
5c0ba1be FF |
250 | |
251 | dev->log = NULL; | |
252 | dev->log_size = 0; | |
309750fa | 253 | } |
d5970055 | 254 | |
15324404 MAL |
255 | static bool vhost_dev_log_is_shared(struct vhost_dev *dev) |
256 | { | |
257 | return dev->vhost_ops->vhost_requires_shm_log && | |
258 | dev->vhost_ops->vhost_requires_shm_log(dev); | |
259 | } | |
260 | ||
261 | static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size) | |
d5970055 | 262 | { |
15324404 | 263 | struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev)); |
309750fa | 264 | uint64_t log_base = (uintptr_t)log->log; |
6b37a23d | 265 | int r; |
6528499f | 266 | |
636f4ddd MAL |
267 | /* inform backend of log switching, this must be done before |
268 | releasing the current log, to ensure no logging is lost */ | |
9a78a5dd | 269 | r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log); |
162bba7f MAL |
270 | if (r < 0) { |
271 | VHOST_OPS_DEBUG("vhost_set_log_base failed"); | |
272 | } | |
273 | ||
309750fa | 274 | vhost_log_put(dev, true); |
d5970055 MT |
275 | dev->log = log; |
276 | dev->log_size = size; | |
277 | } | |
278 | ||
c471ad0e JW |
279 | static int vhost_dev_has_iommu(struct vhost_dev *dev) |
280 | { | |
281 | VirtIODevice *vdev = dev->vdev; | |
c471ad0e | 282 | |
375f74f4 | 283 | return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); |
c471ad0e JW |
284 | } |
285 | ||
286 | static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr, | |
287 | hwaddr *plen, int is_write) | |
288 | { | |
289 | if (!vhost_dev_has_iommu(dev)) { | |
290 | return cpu_physical_memory_map(addr, plen, is_write); | |
291 | } else { | |
292 | return (void *)(uintptr_t)addr; | |
293 | } | |
294 | } | |
295 | ||
296 | static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer, | |
297 | hwaddr len, int is_write, | |
298 | hwaddr access_len) | |
299 | { | |
300 | if (!vhost_dev_has_iommu(dev)) { | |
301 | cpu_physical_memory_unmap(buffer, len, is_write, access_len); | |
302 | } | |
303 | } | |
f1f9e6c5 | 304 | |
0ca1fd2d DDAG |
305 | static int vhost_verify_ring_part_mapping(void *ring_hva, |
306 | uint64_t ring_gpa, | |
307 | uint64_t ring_size, | |
308 | void *reg_hva, | |
309 | uint64_t reg_gpa, | |
310 | uint64_t reg_size) | |
f1f9e6c5 | 311 | { |
0ca1fd2d DDAG |
312 | uint64_t hva_ring_offset; |
313 | uint64_t ring_last = range_get_last(ring_gpa, ring_size); | |
314 | uint64_t reg_last = range_get_last(reg_gpa, reg_size); | |
f1f9e6c5 | 315 | |
0ca1fd2d | 316 | if (ring_last < reg_gpa || ring_gpa > reg_last) { |
f1f9e6c5 GK |
317 | return 0; |
318 | } | |
0ca1fd2d DDAG |
319 | /* check that whole ring's is mapped */ |
320 | if (ring_last > reg_last) { | |
321 | return -ENOMEM; | |
f1f9e6c5 | 322 | } |
0ca1fd2d DDAG |
323 | /* check that ring's MemoryRegion wasn't replaced */ |
324 | hva_ring_offset = ring_gpa - reg_gpa; | |
325 | if (ring_hva != reg_hva + hva_ring_offset) { | |
326 | return -EBUSY; | |
f1f9e6c5 | 327 | } |
0ca1fd2d DDAG |
328 | |
329 | return 0; | |
f1f9e6c5 GK |
330 | } |
331 | ||
d5970055 | 332 | static int vhost_verify_ring_mappings(struct vhost_dev *dev, |
0ca1fd2d DDAG |
333 | void *reg_hva, |
334 | uint64_t reg_gpa, | |
335 | uint64_t reg_size) | |
d5970055 | 336 | { |
f1f9e6c5 | 337 | int i, j; |
8617343f | 338 | int r = 0; |
f1f9e6c5 GK |
339 | const char *part_name[] = { |
340 | "descriptor table", | |
341 | "available ring", | |
342 | "used ring" | |
343 | }; | |
8617343f | 344 | |
aebbdbee JW |
345 | if (vhost_dev_has_iommu(dev)) { |
346 | return 0; | |
347 | } | |
348 | ||
f1f9e6c5 | 349 | for (i = 0; i < dev->nvqs; ++i) { |
d5970055 | 350 | struct vhost_virtqueue *vq = dev->vqs + i; |
d5970055 | 351 | |
fb20fbb7 JH |
352 | if (vq->desc_phys == 0) { |
353 | continue; | |
354 | } | |
355 | ||
f1f9e6c5 | 356 | j = 0; |
0ca1fd2d DDAG |
357 | r = vhost_verify_ring_part_mapping( |
358 | vq->desc, vq->desc_phys, vq->desc_size, | |
359 | reg_hva, reg_gpa, reg_size); | |
2fe45ec3 | 360 | if (r) { |
f1f9e6c5 | 361 | break; |
d5970055 | 362 | } |
f1f9e6c5 GK |
363 | |
364 | j++; | |
0ca1fd2d | 365 | r = vhost_verify_ring_part_mapping( |
9fac50c8 | 366 | vq->avail, vq->avail_phys, vq->avail_size, |
0ca1fd2d | 367 | reg_hva, reg_gpa, reg_size); |
2fe45ec3 | 368 | if (r) { |
f1f9e6c5 | 369 | break; |
d5970055 | 370 | } |
f1f9e6c5 GK |
371 | |
372 | j++; | |
0ca1fd2d | 373 | r = vhost_verify_ring_part_mapping( |
9fac50c8 | 374 | vq->used, vq->used_phys, vq->used_size, |
0ca1fd2d | 375 | reg_hva, reg_gpa, reg_size); |
2fe45ec3 | 376 | if (r) { |
f1f9e6c5 | 377 | break; |
d5970055 | 378 | } |
f1f9e6c5 GK |
379 | } |
380 | ||
381 | if (r == -ENOMEM) { | |
382 | error_report("Unable to map %s for ring %d", part_name[j], i); | |
383 | } else if (r == -EBUSY) { | |
384 | error_report("%s relocated for ring %d", part_name[j], i); | |
d5970055 | 385 | } |
8617343f | 386 | return r; |
d5970055 MT |
387 | } |
388 | ||
af603142 NB |
389 | static bool vhost_section(MemoryRegionSection *section) |
390 | { | |
aa3c40f6 DDAG |
391 | bool result; |
392 | bool log_dirty = memory_region_get_dirty_log_mask(section->mr) & | |
393 | ~(1 << DIRTY_MEMORY_MIGRATION); | |
394 | result = memory_region_is_ram(section->mr) && | |
d56ec1e9 | 395 | !memory_region_is_rom(section->mr); |
aa3c40f6 DDAG |
396 | |
397 | /* Vhost doesn't handle any block which is doing dirty-tracking other | |
398 | * than migration; this typically fires on VGA areas. | |
399 | */ | |
400 | result &= !log_dirty; | |
401 | ||
402 | trace_vhost_section(section->mr->name, result); | |
403 | return result; | |
af603142 NB |
404 | } |
405 | ||
406 | static void vhost_begin(MemoryListener *listener) | |
407 | { | |
408 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
409 | memory_listener); | |
c44317ef DDAG |
410 | dev->tmp_sections = NULL; |
411 | dev->n_tmp_sections = 0; | |
af603142 | 412 | } |
d5970055 | 413 | |
af603142 NB |
414 | static void vhost_commit(MemoryListener *listener) |
415 | { | |
416 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
417 | memory_listener); | |
c44317ef DDAG |
418 | MemoryRegionSection *old_sections; |
419 | int n_old_sections; | |
af603142 | 420 | uint64_t log_size; |
ade6d081 | 421 | size_t regions_size; |
af603142 | 422 | int r; |
0ca1fd2d | 423 | int i; |
ade6d081 | 424 | bool changed = false; |
af603142 | 425 | |
ade6d081 DDAG |
426 | /* Note we can be called before the device is started, but then |
427 | * starting the device calls set_mem_table, so we need to have | |
428 | * built the data structures. | |
429 | */ | |
c44317ef DDAG |
430 | old_sections = dev->mem_sections; |
431 | n_old_sections = dev->n_mem_sections; | |
432 | dev->mem_sections = dev->tmp_sections; | |
433 | dev->n_mem_sections = dev->n_tmp_sections; | |
434 | ||
ade6d081 DDAG |
435 | if (dev->n_mem_sections != n_old_sections) { |
436 | changed = true; | |
437 | } else { | |
438 | /* Same size, lets check the contents */ | |
439 | changed = n_old_sections && memcmp(dev->mem_sections, old_sections, | |
440 | n_old_sections * sizeof(old_sections[0])) != 0; | |
af603142 | 441 | } |
ade6d081 DDAG |
442 | |
443 | trace_vhost_commit(dev->started, changed); | |
444 | if (!changed) { | |
c44317ef | 445 | goto out; |
d5970055 | 446 | } |
ade6d081 DDAG |
447 | |
448 | /* Rebuild the regions list from the new sections list */ | |
449 | regions_size = offsetof(struct vhost_memory, regions) + | |
450 | dev->n_mem_sections * sizeof dev->mem->regions[0]; | |
451 | dev->mem = g_realloc(dev->mem, regions_size); | |
452 | dev->mem->nregions = dev->n_mem_sections; | |
453 | used_memslots = dev->mem->nregions; | |
454 | for (i = 0; i < dev->n_mem_sections; i++) { | |
455 | struct vhost_memory_region *cur_vmr = dev->mem->regions + i; | |
456 | struct MemoryRegionSection *mrs = dev->mem_sections + i; | |
457 | ||
458 | cur_vmr->guest_phys_addr = mrs->offset_within_address_space; | |
459 | cur_vmr->memory_size = int128_get64(mrs->size); | |
460 | cur_vmr->userspace_addr = | |
461 | (uintptr_t)memory_region_get_ram_ptr(mrs->mr) + | |
462 | mrs->offset_within_region; | |
463 | cur_vmr->flags_padding = 0; | |
464 | } | |
465 | ||
466 | if (!dev->started) { | |
c44317ef | 467 | goto out; |
af603142 | 468 | } |
d5970055 | 469 | |
0ca1fd2d DDAG |
470 | for (i = 0; i < dev->mem->nregions; i++) { |
471 | if (vhost_verify_ring_mappings(dev, | |
472 | (void *)(uintptr_t)dev->mem->regions[i].userspace_addr, | |
473 | dev->mem->regions[i].guest_phys_addr, | |
474 | dev->mem->regions[i].memory_size)) { | |
475 | error_report("Verify ring failure on region %d", i); | |
476 | abort(); | |
477 | } | |
d5970055 MT |
478 | } |
479 | ||
480 | if (!dev->log_enabled) { | |
21e70425 | 481 | r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); |
162bba7f MAL |
482 | if (r < 0) { |
483 | VHOST_OPS_DEBUG("vhost_set_mem_table failed"); | |
484 | } | |
c44317ef | 485 | goto out; |
d5970055 MT |
486 | } |
487 | log_size = vhost_get_log_size(dev); | |
488 | /* We allocate an extra 4K bytes to log, | |
489 | * to reduce the * number of reallocations. */ | |
490 | #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) | |
491 | /* To log more, must increase log size before table update. */ | |
492 | if (dev->log_size < log_size) { | |
493 | vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); | |
494 | } | |
21e70425 | 495 | r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); |
162bba7f MAL |
496 | if (r < 0) { |
497 | VHOST_OPS_DEBUG("vhost_set_mem_table failed"); | |
498 | } | |
d5970055 MT |
499 | /* To log less, can only decrease log size after table update. */ |
500 | if (dev->log_size > log_size + VHOST_LOG_BUFFER) { | |
501 | vhost_dev_log_resize(dev, log_size); | |
502 | } | |
c44317ef DDAG |
503 | |
504 | out: | |
505 | /* Deref the old list of sections, this must happen _after_ the | |
506 | * vhost_set_mem_table to ensure the client isn't still using the | |
507 | * section we're about to unref. | |
508 | */ | |
509 | while (n_old_sections--) { | |
510 | memory_region_unref(old_sections[n_old_sections].mr); | |
511 | } | |
512 | g_free(old_sections); | |
513 | return; | |
514 | } | |
515 | ||
48d7c975 DDAG |
516 | /* Adds the section data to the tmp_section structure. |
517 | * It relies on the listener calling us in memory address order | |
518 | * and for each region (via the _add and _nop methods) to | |
519 | * join neighbours. | |
520 | */ | |
521 | static void vhost_region_add_section(struct vhost_dev *dev, | |
522 | MemoryRegionSection *section) | |
c44317ef | 523 | { |
48d7c975 DDAG |
524 | bool need_add = true; |
525 | uint64_t mrs_size = int128_get64(section->size); | |
526 | uint64_t mrs_gpa = section->offset_within_address_space; | |
527 | uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) + | |
528 | section->offset_within_region; | |
c1ece84e DDAG |
529 | RAMBlock *mrs_rb = section->mr->ram_block; |
530 | size_t mrs_page = qemu_ram_pagesize(mrs_rb); | |
48d7c975 DDAG |
531 | |
532 | trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size, | |
533 | mrs_host); | |
534 | ||
c1ece84e DDAG |
535 | /* Round the section to it's page size */ |
536 | /* First align the start down to a page boundary */ | |
537 | uint64_t alignage = mrs_host & (mrs_page - 1); | |
538 | if (alignage) { | |
539 | mrs_host -= alignage; | |
540 | mrs_size += alignage; | |
541 | mrs_gpa -= alignage; | |
542 | } | |
543 | /* Now align the size up to a page boundary */ | |
544 | alignage = mrs_size & (mrs_page - 1); | |
545 | if (alignage) { | |
546 | mrs_size += mrs_page - alignage; | |
547 | } | |
548 | trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa, mrs_size, | |
549 | mrs_host); | |
550 | ||
48d7c975 DDAG |
551 | if (dev->n_tmp_sections) { |
552 | /* Since we already have at least one section, lets see if | |
553 | * this extends it; since we're scanning in order, we only | |
554 | * have to look at the last one, and the FlatView that calls | |
555 | * us shouldn't have overlaps. | |
556 | */ | |
557 | MemoryRegionSection *prev_sec = dev->tmp_sections + | |
558 | (dev->n_tmp_sections - 1); | |
559 | uint64_t prev_gpa_start = prev_sec->offset_within_address_space; | |
560 | uint64_t prev_size = int128_get64(prev_sec->size); | |
561 | uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size); | |
562 | uint64_t prev_host_start = | |
563 | (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) + | |
564 | prev_sec->offset_within_region; | |
565 | uint64_t prev_host_end = range_get_last(prev_host_start, prev_size); | |
566 | ||
c1ece84e DDAG |
567 | if (mrs_gpa <= (prev_gpa_end + 1)) { |
568 | /* OK, looks like overlapping/intersecting - it's possible that | |
569 | * the rounding to page sizes has made them overlap, but they should | |
570 | * match up in the same RAMBlock if they do. | |
571 | */ | |
572 | if (mrs_gpa < prev_gpa_start) { | |
573 | error_report("%s:Section rounded to %"PRIx64 | |
574 | " prior to previous %"PRIx64, | |
575 | __func__, mrs_gpa, prev_gpa_start); | |
576 | /* A way to cleanly fail here would be better */ | |
577 | return; | |
578 | } | |
579 | /* Offset from the start of the previous GPA to this GPA */ | |
580 | size_t offset = mrs_gpa - prev_gpa_start; | |
581 | ||
582 | if (prev_host_start + offset == mrs_host && | |
583 | section->mr == prev_sec->mr && | |
584 | (!dev->vhost_ops->vhost_backend_can_merge || | |
585 | dev->vhost_ops->vhost_backend_can_merge(dev, | |
48d7c975 DDAG |
586 | mrs_host, mrs_size, |
587 | prev_host_start, prev_size))) { | |
c1ece84e DDAG |
588 | uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size); |
589 | need_add = false; | |
590 | prev_sec->offset_within_address_space = | |
591 | MIN(prev_gpa_start, mrs_gpa); | |
592 | prev_sec->offset_within_region = | |
593 | MIN(prev_host_start, mrs_host) - | |
594 | (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr); | |
595 | prev_sec->size = int128_make64(max_end - MIN(prev_host_start, | |
596 | mrs_host)); | |
597 | trace_vhost_region_add_section_merge(section->mr->name, | |
598 | int128_get64(prev_sec->size), | |
599 | prev_sec->offset_within_address_space, | |
600 | prev_sec->offset_within_region); | |
601 | } else { | |
e7b94a84 DDAG |
602 | /* adjoining regions are fine, but overlapping ones with |
603 | * different blocks/offsets shouldn't happen | |
604 | */ | |
605 | if (mrs_gpa != prev_gpa_end + 1) { | |
606 | error_report("%s: Overlapping but not coherent sections " | |
607 | "at %"PRIx64, | |
608 | __func__, mrs_gpa); | |
609 | return; | |
610 | } | |
c1ece84e | 611 | } |
48d7c975 DDAG |
612 | } |
613 | } | |
614 | ||
615 | if (need_add) { | |
616 | ++dev->n_tmp_sections; | |
617 | dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections, | |
618 | dev->n_tmp_sections); | |
619 | dev->tmp_sections[dev->n_tmp_sections - 1] = *section; | |
620 | /* The flatview isn't stable and we don't use it, making it NULL | |
621 | * means we can memcmp the list. | |
622 | */ | |
623 | dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL; | |
624 | memory_region_ref(section->mr); | |
625 | } | |
50c1e149 AK |
626 | } |
627 | ||
938eeb64 DDAG |
628 | /* Used for both add and nop callbacks */ |
629 | static void vhost_region_addnop(MemoryListener *listener, | |
630 | MemoryRegionSection *section) | |
04097f7c | 631 | { |
2817b260 AK |
632 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
633 | memory_listener); | |
634 | ||
c49450b9 AK |
635 | if (!vhost_section(section)) { |
636 | return; | |
637 | } | |
48d7c975 | 638 | vhost_region_add_section(dev, section); |
04097f7c AK |
639 | } |
640 | ||
375f74f4 JW |
641 | static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) |
642 | { | |
643 | struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n); | |
644 | struct vhost_dev *hdev = iommu->hdev; | |
645 | hwaddr iova = iotlb->iova + iommu->iommu_offset; | |
646 | ||
020e571b MC |
647 | if (vhost_backend_invalidate_device_iotlb(hdev, iova, |
648 | iotlb->addr_mask + 1)) { | |
375f74f4 JW |
649 | error_report("Fail to invalidate device iotlb"); |
650 | } | |
651 | } | |
652 | ||
653 | static void vhost_iommu_region_add(MemoryListener *listener, | |
654 | MemoryRegionSection *section) | |
655 | { | |
656 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
657 | iommu_listener); | |
658 | struct vhost_iommu *iommu; | |
698feb5e | 659 | Int128 end; |
375f74f4 JW |
660 | |
661 | if (!memory_region_is_iommu(section->mr)) { | |
662 | return; | |
663 | } | |
664 | ||
665 | iommu = g_malloc0(sizeof(*iommu)); | |
698feb5e PX |
666 | end = int128_add(int128_make64(section->offset_within_region), |
667 | section->size); | |
668 | end = int128_sub(end, int128_one()); | |
669 | iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify, | |
670 | IOMMU_NOTIFIER_UNMAP, | |
671 | section->offset_within_region, | |
672 | int128_get64(end)); | |
375f74f4 JW |
673 | iommu->mr = section->mr; |
674 | iommu->iommu_offset = section->offset_within_address_space - | |
675 | section->offset_within_region; | |
676 | iommu->hdev = dev; | |
677 | memory_region_register_iommu_notifier(section->mr, &iommu->n); | |
678 | QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next); | |
679 | /* TODO: can replay help performance here? */ | |
680 | } | |
681 | ||
682 | static void vhost_iommu_region_del(MemoryListener *listener, | |
683 | MemoryRegionSection *section) | |
684 | { | |
685 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
686 | iommu_listener); | |
687 | struct vhost_iommu *iommu; | |
688 | ||
689 | if (!memory_region_is_iommu(section->mr)) { | |
690 | return; | |
691 | } | |
692 | ||
693 | QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { | |
698feb5e PX |
694 | if (iommu->mr == section->mr && |
695 | iommu->n.start == section->offset_within_region) { | |
375f74f4 JW |
696 | memory_region_unregister_iommu_notifier(iommu->mr, |
697 | &iommu->n); | |
698 | QLIST_REMOVE(iommu, iommu_next); | |
699 | g_free(iommu); | |
700 | break; | |
701 | } | |
702 | } | |
703 | } | |
704 | ||
d5970055 MT |
705 | static int vhost_virtqueue_set_addr(struct vhost_dev *dev, |
706 | struct vhost_virtqueue *vq, | |
707 | unsigned idx, bool enable_log) | |
708 | { | |
709 | struct vhost_vring_addr addr = { | |
710 | .index = idx, | |
2b3af999 SW |
711 | .desc_user_addr = (uint64_t)(unsigned long)vq->desc, |
712 | .avail_user_addr = (uint64_t)(unsigned long)vq->avail, | |
713 | .used_user_addr = (uint64_t)(unsigned long)vq->used, | |
d5970055 MT |
714 | .log_guest_addr = vq->used_phys, |
715 | .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0, | |
716 | }; | |
21e70425 | 717 | int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr); |
d5970055 | 718 | if (r < 0) { |
c6409692 | 719 | VHOST_OPS_DEBUG("vhost_set_vring_addr failed"); |
d5970055 MT |
720 | return -errno; |
721 | } | |
722 | return 0; | |
723 | } | |
724 | ||
c471ad0e JW |
725 | static int vhost_dev_set_features(struct vhost_dev *dev, |
726 | bool enable_log) | |
d5970055 MT |
727 | { |
728 | uint64_t features = dev->acked_features; | |
729 | int r; | |
730 | if (enable_log) { | |
9a2ba823 | 731 | features |= 0x1ULL << VHOST_F_LOG_ALL; |
d5970055 | 732 | } |
21e70425 | 733 | r = dev->vhost_ops->vhost_set_features(dev, features); |
c6409692 MAL |
734 | if (r < 0) { |
735 | VHOST_OPS_DEBUG("vhost_set_features failed"); | |
736 | } | |
d5970055 MT |
737 | return r < 0 ? -errno : 0; |
738 | } | |
739 | ||
740 | static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) | |
741 | { | |
162bba7f | 742 | int r, i, idx; |
d5970055 MT |
743 | r = vhost_dev_set_features(dev, enable_log); |
744 | if (r < 0) { | |
745 | goto err_features; | |
746 | } | |
747 | for (i = 0; i < dev->nvqs; ++i) { | |
25a2a920 TC |
748 | idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); |
749 | r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, | |
d5970055 MT |
750 | enable_log); |
751 | if (r < 0) { | |
752 | goto err_vq; | |
753 | } | |
754 | } | |
755 | return 0; | |
756 | err_vq: | |
757 | for (; i >= 0; --i) { | |
25a2a920 | 758 | idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); |
162bba7f MAL |
759 | vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, |
760 | dev->log_enabled); | |
d5970055 | 761 | } |
162bba7f | 762 | vhost_dev_set_features(dev, dev->log_enabled); |
d5970055 MT |
763 | err_features: |
764 | return r; | |
765 | } | |
766 | ||
04097f7c | 767 | static int vhost_migration_log(MemoryListener *listener, int enable) |
d5970055 | 768 | { |
04097f7c AK |
769 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
770 | memory_listener); | |
d5970055 MT |
771 | int r; |
772 | if (!!enable == dev->log_enabled) { | |
773 | return 0; | |
774 | } | |
775 | if (!dev->started) { | |
776 | dev->log_enabled = enable; | |
777 | return 0; | |
778 | } | |
779 | if (!enable) { | |
780 | r = vhost_dev_set_log(dev, false); | |
781 | if (r < 0) { | |
782 | return r; | |
783 | } | |
309750fa | 784 | vhost_log_put(dev, false); |
d5970055 MT |
785 | } else { |
786 | vhost_dev_log_resize(dev, vhost_get_log_size(dev)); | |
787 | r = vhost_dev_set_log(dev, true); | |
788 | if (r < 0) { | |
789 | return r; | |
790 | } | |
791 | } | |
792 | dev->log_enabled = enable; | |
793 | return 0; | |
794 | } | |
795 | ||
04097f7c AK |
796 | static void vhost_log_global_start(MemoryListener *listener) |
797 | { | |
798 | int r; | |
799 | ||
800 | r = vhost_migration_log(listener, true); | |
801 | if (r < 0) { | |
802 | abort(); | |
803 | } | |
804 | } | |
805 | ||
806 | static void vhost_log_global_stop(MemoryListener *listener) | |
807 | { | |
808 | int r; | |
809 | ||
810 | r = vhost_migration_log(listener, false); | |
811 | if (r < 0) { | |
812 | abort(); | |
813 | } | |
814 | } | |
815 | ||
816 | static void vhost_log_start(MemoryListener *listener, | |
b2dfd71c PB |
817 | MemoryRegionSection *section, |
818 | int old, int new) | |
04097f7c AK |
819 | { |
820 | /* FIXME: implement */ | |
821 | } | |
822 | ||
823 | static void vhost_log_stop(MemoryListener *listener, | |
b2dfd71c PB |
824 | MemoryRegionSection *section, |
825 | int old, int new) | |
04097f7c AK |
826 | { |
827 | /* FIXME: implement */ | |
828 | } | |
829 | ||
46f70ff1 GK |
830 | /* The vhost driver natively knows how to handle the vrings of non |
831 | * cross-endian legacy devices and modern devices. Only legacy devices | |
832 | * exposed to a bi-endian guest may require the vhost driver to use a | |
833 | * specific endianness. | |
834 | */ | |
a122ab24 GK |
835 | static inline bool vhost_needs_vring_endian(VirtIODevice *vdev) |
836 | { | |
e5848123 GK |
837 | if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
838 | return false; | |
839 | } | |
a122ab24 | 840 | #ifdef HOST_WORDS_BIGENDIAN |
46f70ff1 | 841 | return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE; |
a122ab24 | 842 | #else |
46f70ff1 | 843 | return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG; |
a122ab24 | 844 | #endif |
a122ab24 GK |
845 | } |
846 | ||
04b7a152 GK |
847 | static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev, |
848 | bool is_big_endian, | |
849 | int vhost_vq_index) | |
850 | { | |
851 | struct vhost_vring_state s = { | |
852 | .index = vhost_vq_index, | |
853 | .num = is_big_endian | |
854 | }; | |
855 | ||
21e70425 | 856 | if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) { |
04b7a152 GK |
857 | return 0; |
858 | } | |
859 | ||
c6409692 | 860 | VHOST_OPS_DEBUG("vhost_set_vring_endian failed"); |
04b7a152 GK |
861 | if (errno == ENOTTY) { |
862 | error_report("vhost does not support cross-endian"); | |
863 | return -ENOSYS; | |
864 | } | |
865 | ||
866 | return -errno; | |
867 | } | |
868 | ||
c471ad0e JW |
869 | static int vhost_memory_region_lookup(struct vhost_dev *hdev, |
870 | uint64_t gpa, uint64_t *uaddr, | |
871 | uint64_t *len) | |
872 | { | |
873 | int i; | |
874 | ||
875 | for (i = 0; i < hdev->mem->nregions; i++) { | |
876 | struct vhost_memory_region *reg = hdev->mem->regions + i; | |
877 | ||
878 | if (gpa >= reg->guest_phys_addr && | |
879 | reg->guest_phys_addr + reg->memory_size > gpa) { | |
880 | *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr; | |
881 | *len = reg->guest_phys_addr + reg->memory_size - gpa; | |
882 | return 0; | |
883 | } | |
884 | } | |
885 | ||
886 | return -EFAULT; | |
887 | } | |
888 | ||
fc58bd0d | 889 | int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write) |
c471ad0e JW |
890 | { |
891 | IOMMUTLBEntry iotlb; | |
892 | uint64_t uaddr, len; | |
fc58bd0d | 893 | int ret = -EFAULT; |
c471ad0e JW |
894 | |
895 | rcu_read_lock(); | |
896 | ||
897 | iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, | |
898 | iova, write); | |
899 | if (iotlb.target_as != NULL) { | |
fc58bd0d MC |
900 | ret = vhost_memory_region_lookup(dev, iotlb.translated_addr, |
901 | &uaddr, &len); | |
902 | if (ret) { | |
c471ad0e JW |
903 | error_report("Fail to lookup the translated address " |
904 | "%"PRIx64, iotlb.translated_addr); | |
905 | goto out; | |
906 | } | |
907 | ||
908 | len = MIN(iotlb.addr_mask + 1, len); | |
909 | iova = iova & ~iotlb.addr_mask; | |
910 | ||
020e571b MC |
911 | ret = vhost_backend_update_device_iotlb(dev, iova, uaddr, |
912 | len, iotlb.perm); | |
fc58bd0d | 913 | if (ret) { |
c471ad0e JW |
914 | error_report("Fail to update device iotlb"); |
915 | goto out; | |
916 | } | |
917 | } | |
918 | out: | |
919 | rcu_read_unlock(); | |
fc58bd0d MC |
920 | |
921 | return ret; | |
c471ad0e JW |
922 | } |
923 | ||
f56a1247 | 924 | static int vhost_virtqueue_start(struct vhost_dev *dev, |
d5970055 MT |
925 | struct VirtIODevice *vdev, |
926 | struct vhost_virtqueue *vq, | |
927 | unsigned idx) | |
928 | { | |
96a3d98d JW |
929 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); |
930 | VirtioBusState *vbus = VIRTIO_BUS(qbus); | |
931 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); | |
a8170e5e | 932 | hwaddr s, l, a; |
d5970055 | 933 | int r; |
21e70425 | 934 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); |
d5970055 | 935 | struct vhost_vring_file file = { |
a9f98bb5 | 936 | .index = vhost_vq_index |
d5970055 MT |
937 | }; |
938 | struct vhost_vring_state state = { | |
a9f98bb5 | 939 | .index = vhost_vq_index |
d5970055 MT |
940 | }; |
941 | struct VirtQueue *vvq = virtio_get_queue(vdev, idx); | |
942 | ||
fb20fbb7 JH |
943 | a = virtio_queue_get_desc_addr(vdev, idx); |
944 | if (a == 0) { | |
945 | /* Queue might not be ready for start */ | |
946 | return 0; | |
947 | } | |
a9f98bb5 | 948 | |
d5970055 | 949 | vq->num = state.num = virtio_queue_get_num(vdev, idx); |
21e70425 | 950 | r = dev->vhost_ops->vhost_set_vring_num(dev, &state); |
d5970055 | 951 | if (r) { |
c6409692 | 952 | VHOST_OPS_DEBUG("vhost_set_vring_num failed"); |
d5970055 MT |
953 | return -errno; |
954 | } | |
955 | ||
956 | state.num = virtio_queue_get_last_avail_idx(vdev, idx); | |
21e70425 | 957 | r = dev->vhost_ops->vhost_set_vring_base(dev, &state); |
d5970055 | 958 | if (r) { |
c6409692 | 959 | VHOST_OPS_DEBUG("vhost_set_vring_base failed"); |
d5970055 MT |
960 | return -errno; |
961 | } | |
962 | ||
e5848123 | 963 | if (vhost_needs_vring_endian(vdev)) { |
04b7a152 GK |
964 | r = vhost_virtqueue_set_vring_endian_legacy(dev, |
965 | virtio_is_big_endian(vdev), | |
966 | vhost_vq_index); | |
967 | if (r) { | |
968 | return -errno; | |
969 | } | |
970 | } | |
971 | ||
f1f9e6c5 | 972 | vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx); |
fb20fbb7 | 973 | vq->desc_phys = a; |
c471ad0e | 974 | vq->desc = vhost_memory_map(dev, a, &l, 0); |
d5970055 MT |
975 | if (!vq->desc || l != s) { |
976 | r = -ENOMEM; | |
977 | goto fail_alloc_desc; | |
978 | } | |
f1f9e6c5 GK |
979 | vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx); |
980 | vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx); | |
c471ad0e | 981 | vq->avail = vhost_memory_map(dev, a, &l, 0); |
d5970055 MT |
982 | if (!vq->avail || l != s) { |
983 | r = -ENOMEM; | |
984 | goto fail_alloc_avail; | |
985 | } | |
986 | vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); | |
987 | vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); | |
c471ad0e | 988 | vq->used = vhost_memory_map(dev, a, &l, 1); |
d5970055 MT |
989 | if (!vq->used || l != s) { |
990 | r = -ENOMEM; | |
991 | goto fail_alloc_used; | |
992 | } | |
993 | ||
a9f98bb5 | 994 | r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); |
d5970055 MT |
995 | if (r < 0) { |
996 | r = -errno; | |
997 | goto fail_alloc; | |
998 | } | |
a9f98bb5 | 999 | |
d5970055 | 1000 | file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); |
21e70425 | 1001 | r = dev->vhost_ops->vhost_set_vring_kick(dev, &file); |
d5970055 | 1002 | if (r) { |
c6409692 | 1003 | VHOST_OPS_DEBUG("vhost_set_vring_kick failed"); |
c8852121 | 1004 | r = -errno; |
d5970055 MT |
1005 | goto fail_kick; |
1006 | } | |
1007 | ||
f56a1247 MT |
1008 | /* Clear and discard previous events if any. */ |
1009 | event_notifier_test_and_clear(&vq->masked_notifier); | |
d5970055 | 1010 | |
5669655a VK |
1011 | /* Init vring in unmasked state, unless guest_notifier_mask |
1012 | * will do it later. | |
1013 | */ | |
1014 | if (!vdev->use_guest_notifier_mask) { | |
1015 | /* TODO: check and handle errors. */ | |
1016 | vhost_virtqueue_mask(dev, vdev, idx, false); | |
1017 | } | |
1018 | ||
96a3d98d JW |
1019 | if (k->query_guest_notifiers && |
1020 | k->query_guest_notifiers(qbus->parent) && | |
1021 | virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) { | |
1022 | file.fd = -1; | |
1023 | r = dev->vhost_ops->vhost_set_vring_call(dev, &file); | |
1024 | if (r) { | |
1025 | goto fail_vector; | |
1026 | } | |
1027 | } | |
1028 | ||
d5970055 MT |
1029 | return 0; |
1030 | ||
96a3d98d | 1031 | fail_vector: |
d5970055 | 1032 | fail_kick: |
d5970055 | 1033 | fail_alloc: |
c471ad0e JW |
1034 | vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), |
1035 | 0, 0); | |
d5970055 | 1036 | fail_alloc_used: |
c471ad0e JW |
1037 | vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), |
1038 | 0, 0); | |
d5970055 | 1039 | fail_alloc_avail: |
c471ad0e JW |
1040 | vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), |
1041 | 0, 0); | |
d5970055 MT |
1042 | fail_alloc_desc: |
1043 | return r; | |
1044 | } | |
1045 | ||
f56a1247 | 1046 | static void vhost_virtqueue_stop(struct vhost_dev *dev, |
d5970055 MT |
1047 | struct VirtIODevice *vdev, |
1048 | struct vhost_virtqueue *vq, | |
1049 | unsigned idx) | |
1050 | { | |
21e70425 | 1051 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); |
d5970055 | 1052 | struct vhost_vring_state state = { |
04b7a152 | 1053 | .index = vhost_vq_index, |
d5970055 MT |
1054 | }; |
1055 | int r; | |
fb20fbb7 JH |
1056 | int a; |
1057 | ||
1058 | a = virtio_queue_get_desc_addr(vdev, idx); | |
1059 | if (a == 0) { | |
1060 | /* Don't stop the virtqueue which might have not been started */ | |
1061 | return; | |
1062 | } | |
fc57fd99 | 1063 | |
21e70425 | 1064 | r = dev->vhost_ops->vhost_get_vring_base(dev, &state); |
d5970055 | 1065 | if (r < 0) { |
c6409692 | 1066 | VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r); |
2ae39a11 MC |
1067 | /* Connection to the backend is broken, so let's sync internal |
1068 | * last avail idx to the device used idx. | |
1069 | */ | |
1070 | virtio_queue_restore_last_avail_idx(vdev, idx); | |
499c5579 MAL |
1071 | } else { |
1072 | virtio_queue_set_last_avail_idx(vdev, idx, state.num); | |
d5970055 | 1073 | } |
3561ba14 | 1074 | virtio_queue_invalidate_signalled_used(vdev, idx); |
aa94d521 | 1075 | virtio_queue_update_used_idx(vdev, idx); |
04b7a152 GK |
1076 | |
1077 | /* In the cross-endian case, we need to reset the vring endianness to | |
1078 | * native as legacy devices expect so by default. | |
1079 | */ | |
e5848123 | 1080 | if (vhost_needs_vring_endian(vdev)) { |
162bba7f MAL |
1081 | vhost_virtqueue_set_vring_endian_legacy(dev, |
1082 | !virtio_is_big_endian(vdev), | |
1083 | vhost_vq_index); | |
04b7a152 GK |
1084 | } |
1085 | ||
c471ad0e JW |
1086 | vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), |
1087 | 1, virtio_queue_get_used_size(vdev, idx)); | |
1088 | vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), | |
1089 | 0, virtio_queue_get_avail_size(vdev, idx)); | |
1090 | vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), | |
1091 | 0, virtio_queue_get_desc_size(vdev, idx)); | |
d5970055 MT |
1092 | } |
1093 | ||
80a1ea37 AK |
1094 | static void vhost_eventfd_add(MemoryListener *listener, |
1095 | MemoryRegionSection *section, | |
753d5e14 | 1096 | bool match_data, uint64_t data, EventNotifier *e) |
80a1ea37 AK |
1097 | { |
1098 | } | |
1099 | ||
1100 | static void vhost_eventfd_del(MemoryListener *listener, | |
1101 | MemoryRegionSection *section, | |
753d5e14 | 1102 | bool match_data, uint64_t data, EventNotifier *e) |
80a1ea37 AK |
1103 | { |
1104 | } | |
1105 | ||
69e87b32 JW |
1106 | static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, |
1107 | int n, uint32_t timeout) | |
1108 | { | |
1109 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); | |
1110 | struct vhost_vring_state state = { | |
1111 | .index = vhost_vq_index, | |
1112 | .num = timeout, | |
1113 | }; | |
1114 | int r; | |
1115 | ||
1116 | if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) { | |
1117 | return -EINVAL; | |
1118 | } | |
1119 | ||
1120 | r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state); | |
1121 | if (r) { | |
c6409692 | 1122 | VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed"); |
69e87b32 JW |
1123 | return r; |
1124 | } | |
1125 | ||
1126 | return 0; | |
1127 | } | |
1128 | ||
f56a1247 MT |
1129 | static int vhost_virtqueue_init(struct vhost_dev *dev, |
1130 | struct vhost_virtqueue *vq, int n) | |
1131 | { | |
21e70425 | 1132 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); |
f56a1247 | 1133 | struct vhost_vring_file file = { |
b931bfbf | 1134 | .index = vhost_vq_index, |
f56a1247 MT |
1135 | }; |
1136 | int r = event_notifier_init(&vq->masked_notifier, 0); | |
1137 | if (r < 0) { | |
1138 | return r; | |
1139 | } | |
1140 | ||
1141 | file.fd = event_notifier_get_fd(&vq->masked_notifier); | |
21e70425 | 1142 | r = dev->vhost_ops->vhost_set_vring_call(dev, &file); |
f56a1247 | 1143 | if (r) { |
c6409692 | 1144 | VHOST_OPS_DEBUG("vhost_set_vring_call failed"); |
f56a1247 MT |
1145 | r = -errno; |
1146 | goto fail_call; | |
1147 | } | |
c471ad0e JW |
1148 | |
1149 | vq->dev = dev; | |
1150 | ||
f56a1247 MT |
1151 | return 0; |
1152 | fail_call: | |
1153 | event_notifier_cleanup(&vq->masked_notifier); | |
1154 | return r; | |
1155 | } | |
1156 | ||
1157 | static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) | |
1158 | { | |
1159 | event_notifier_cleanup(&vq->masked_notifier); | |
1160 | } | |
1161 | ||
81647a65 | 1162 | int vhost_dev_init(struct vhost_dev *hdev, void *opaque, |
69e87b32 | 1163 | VhostBackendType backend_type, uint32_t busyloop_timeout) |
d5970055 MT |
1164 | { |
1165 | uint64_t features; | |
a06db3ec | 1166 | int i, r, n_initialized_vqs = 0; |
fe44dc91 | 1167 | Error *local_err = NULL; |
81647a65 | 1168 | |
c471ad0e | 1169 | hdev->vdev = NULL; |
d2fc4402 MAL |
1170 | hdev->migration_blocker = NULL; |
1171 | ||
7cb8a9b9 MAL |
1172 | r = vhost_set_backend_type(hdev, backend_type); |
1173 | assert(r >= 0); | |
1a1bfac9 | 1174 | |
7cb8a9b9 MAL |
1175 | r = hdev->vhost_ops->vhost_backend_init(hdev, opaque); |
1176 | if (r < 0) { | |
1177 | goto fail; | |
24d1eb33 NN |
1178 | } |
1179 | ||
21e70425 | 1180 | r = hdev->vhost_ops->vhost_set_owner(hdev); |
d5970055 | 1181 | if (r < 0) { |
c6409692 | 1182 | VHOST_OPS_DEBUG("vhost_set_owner failed"); |
d5970055 MT |
1183 | goto fail; |
1184 | } | |
1185 | ||
21e70425 | 1186 | r = hdev->vhost_ops->vhost_get_features(hdev, &features); |
d5970055 | 1187 | if (r < 0) { |
c6409692 | 1188 | VHOST_OPS_DEBUG("vhost_get_features failed"); |
d5970055 MT |
1189 | goto fail; |
1190 | } | |
f56a1247 | 1191 | |
a06db3ec | 1192 | for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) { |
b931bfbf | 1193 | r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i); |
f56a1247 | 1194 | if (r < 0) { |
a06db3ec | 1195 | goto fail; |
f56a1247 MT |
1196 | } |
1197 | } | |
69e87b32 JW |
1198 | |
1199 | if (busyloop_timeout) { | |
1200 | for (i = 0; i < hdev->nvqs; ++i) { | |
1201 | r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, | |
1202 | busyloop_timeout); | |
1203 | if (r < 0) { | |
1204 | goto fail_busyloop; | |
1205 | } | |
1206 | } | |
1207 | } | |
1208 | ||
d5970055 MT |
1209 | hdev->features = features; |
1210 | ||
04097f7c | 1211 | hdev->memory_listener = (MemoryListener) { |
50c1e149 AK |
1212 | .begin = vhost_begin, |
1213 | .commit = vhost_commit, | |
938eeb64 DDAG |
1214 | .region_add = vhost_region_addnop, |
1215 | .region_nop = vhost_region_addnop, | |
04097f7c AK |
1216 | .log_start = vhost_log_start, |
1217 | .log_stop = vhost_log_stop, | |
1218 | .log_sync = vhost_log_sync, | |
1219 | .log_global_start = vhost_log_global_start, | |
1220 | .log_global_stop = vhost_log_global_stop, | |
80a1ea37 AK |
1221 | .eventfd_add = vhost_eventfd_add, |
1222 | .eventfd_del = vhost_eventfd_del, | |
72e22d2f | 1223 | .priority = 10 |
04097f7c | 1224 | }; |
d2fc4402 | 1225 | |
375f74f4 JW |
1226 | hdev->iommu_listener = (MemoryListener) { |
1227 | .region_add = vhost_iommu_region_add, | |
1228 | .region_del = vhost_iommu_region_del, | |
1229 | }; | |
c471ad0e | 1230 | |
d2fc4402 MAL |
1231 | if (hdev->migration_blocker == NULL) { |
1232 | if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { | |
1233 | error_setg(&hdev->migration_blocker, | |
1234 | "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature."); | |
648abbfb | 1235 | } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) { |
31190ed7 MAL |
1236 | error_setg(&hdev->migration_blocker, |
1237 | "Migration disabled: failed to allocate shared memory"); | |
d2fc4402 MAL |
1238 | } |
1239 | } | |
1240 | ||
1241 | if (hdev->migration_blocker != NULL) { | |
fe44dc91 AA |
1242 | r = migrate_add_blocker(hdev->migration_blocker, &local_err); |
1243 | if (local_err) { | |
1244 | error_report_err(local_err); | |
1245 | error_free(hdev->migration_blocker); | |
1246 | goto fail_busyloop; | |
1247 | } | |
7145872e | 1248 | } |
d2fc4402 | 1249 | |
7267c094 | 1250 | hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); |
2817b260 AK |
1251 | hdev->n_mem_sections = 0; |
1252 | hdev->mem_sections = NULL; | |
d5970055 MT |
1253 | hdev->log = NULL; |
1254 | hdev->log_size = 0; | |
1255 | hdev->log_enabled = false; | |
1256 | hdev->started = false; | |
f6790af6 | 1257 | memory_listener_register(&hdev->memory_listener, &address_space_memory); |
5be5f9be | 1258 | QLIST_INSERT_HEAD(&vhost_devices, hdev, entry); |
9e2a2a3e JZ |
1259 | |
1260 | if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) { | |
1261 | error_report("vhost backend memory slots limit is less" | |
1262 | " than current number of present memory slots"); | |
1263 | r = -1; | |
1264 | if (busyloop_timeout) { | |
1265 | goto fail_busyloop; | |
1266 | } else { | |
1267 | goto fail; | |
1268 | } | |
1269 | } | |
1270 | ||
d5970055 | 1271 | return 0; |
a06db3ec | 1272 | |
69e87b32 JW |
1273 | fail_busyloop: |
1274 | while (--i >= 0) { | |
1275 | vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0); | |
1276 | } | |
d5970055 | 1277 | fail: |
a06db3ec MAL |
1278 | hdev->nvqs = n_initialized_vqs; |
1279 | vhost_dev_cleanup(hdev); | |
d5970055 MT |
1280 | return r; |
1281 | } | |
1282 | ||
1283 | void vhost_dev_cleanup(struct vhost_dev *hdev) | |
1284 | { | |
f56a1247 | 1285 | int i; |
e0547b59 | 1286 | |
f56a1247 MT |
1287 | for (i = 0; i < hdev->nvqs; ++i) { |
1288 | vhost_virtqueue_cleanup(hdev->vqs + i); | |
1289 | } | |
5be5f9be MAL |
1290 | if (hdev->mem) { |
1291 | /* those are only safe after successful init */ | |
1292 | memory_listener_unregister(&hdev->memory_listener); | |
1293 | QLIST_REMOVE(hdev, entry); | |
1294 | } | |
7145872e MT |
1295 | if (hdev->migration_blocker) { |
1296 | migrate_del_blocker(hdev->migration_blocker); | |
1297 | error_free(hdev->migration_blocker); | |
1298 | } | |
7267c094 | 1299 | g_free(hdev->mem); |
2817b260 | 1300 | g_free(hdev->mem_sections); |
e0547b59 MAL |
1301 | if (hdev->vhost_ops) { |
1302 | hdev->vhost_ops->vhost_backend_cleanup(hdev); | |
1303 | } | |
7b527247 | 1304 | assert(!hdev->log); |
e0547b59 MAL |
1305 | |
1306 | memset(hdev, 0, sizeof(struct vhost_dev)); | |
d5970055 MT |
1307 | } |
1308 | ||
b0b3db79 MT |
1309 | /* Stop processing guest IO notifications in qemu. |
1310 | * Start processing them in vhost in kernel. | |
1311 | */ | |
1312 | int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) | |
1313 | { | |
1c819449 | 1314 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); |
16617e36 | 1315 | int i, r, e; |
4afba631 | 1316 | |
310837de PB |
1317 | /* We will pass the notifiers to the kernel, make sure that QEMU |
1318 | * doesn't interfere. | |
1319 | */ | |
1320 | r = virtio_device_grab_ioeventfd(vdev); | |
1321 | if (r < 0) { | |
4afba631 | 1322 | error_report("binding does not support host notifiers"); |
b0b3db79 MT |
1323 | goto fail; |
1324 | } | |
1325 | ||
1326 | for (i = 0; i < hdev->nvqs; ++i) { | |
b1f0a33d CH |
1327 | r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, |
1328 | true); | |
b0b3db79 | 1329 | if (r < 0) { |
4afba631 | 1330 | error_report("vhost VQ %d notifier binding failed: %d", i, -r); |
b0b3db79 MT |
1331 | goto fail_vq; |
1332 | } | |
1333 | } | |
1334 | ||
1335 | return 0; | |
1336 | fail_vq: | |
1337 | while (--i >= 0) { | |
b1f0a33d CH |
1338 | e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, |
1339 | false); | |
16617e36 | 1340 | if (e < 0) { |
4afba631 | 1341 | error_report("vhost VQ %d notifier cleanup error: %d", i, -r); |
b0b3db79 | 1342 | } |
16617e36 | 1343 | assert (e >= 0); |
76143618 | 1344 | virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i); |
b0b3db79 | 1345 | } |
310837de | 1346 | virtio_device_release_ioeventfd(vdev); |
b0b3db79 MT |
1347 | fail: |
1348 | return r; | |
1349 | } | |
1350 | ||
1351 | /* Stop processing guest IO notifications in vhost. | |
1352 | * Start processing them in qemu. | |
1353 | * This might actually run the qemu handlers right away, | |
1354 | * so virtio in qemu must be completely setup when this is called. | |
1355 | */ | |
1356 | void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) | |
1357 | { | |
1c819449 | 1358 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); |
b0b3db79 MT |
1359 | int i, r; |
1360 | ||
1361 | for (i = 0; i < hdev->nvqs; ++i) { | |
b1f0a33d CH |
1362 | r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, |
1363 | false); | |
b0b3db79 | 1364 | if (r < 0) { |
4afba631 | 1365 | error_report("vhost VQ %d notifier cleanup failed: %d", i, -r); |
b0b3db79 MT |
1366 | } |
1367 | assert (r >= 0); | |
76143618 | 1368 | virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i); |
b0b3db79 | 1369 | } |
310837de | 1370 | virtio_device_release_ioeventfd(vdev); |
b0b3db79 MT |
1371 | } |
1372 | ||
f56a1247 MT |
1373 | /* Test and clear event pending status. |
1374 | * Should be called after unmask to avoid losing events. | |
1375 | */ | |
1376 | bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n) | |
1377 | { | |
a9f98bb5 | 1378 | struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index; |
a9f98bb5 | 1379 | assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); |
f56a1247 MT |
1380 | return event_notifier_test_and_clear(&vq->masked_notifier); |
1381 | } | |
1382 | ||
1383 | /* Mask/unmask events from this vq. */ | |
1384 | void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, | |
1385 | bool mask) | |
1386 | { | |
1387 | struct VirtQueue *vvq = virtio_get_queue(vdev, n); | |
a9f98bb5 | 1388 | int r, index = n - hdev->vq_index; |
fc57fd99 | 1389 | struct vhost_vring_file file; |
f56a1247 | 1390 | |
8695de0f MAL |
1391 | /* should only be called after backend is connected */ |
1392 | assert(hdev->vhost_ops); | |
1393 | ||
f56a1247 | 1394 | if (mask) { |
5669655a | 1395 | assert(vdev->use_guest_notifier_mask); |
a9f98bb5 | 1396 | file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier); |
f56a1247 MT |
1397 | } else { |
1398 | file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); | |
1399 | } | |
fc57fd99 | 1400 | |
21e70425 MAL |
1401 | file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n); |
1402 | r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file); | |
162bba7f MAL |
1403 | if (r < 0) { |
1404 | VHOST_OPS_DEBUG("vhost_set_vring_call failed"); | |
1405 | } | |
f56a1247 MT |
1406 | } |
1407 | ||
9a2ba823 CH |
1408 | uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, |
1409 | uint64_t features) | |
2e6d46d7 NN |
1410 | { |
1411 | const int *bit = feature_bits; | |
1412 | while (*bit != VHOST_INVALID_FEATURE_BIT) { | |
9a2ba823 | 1413 | uint64_t bit_mask = (1ULL << *bit); |
2e6d46d7 NN |
1414 | if (!(hdev->features & bit_mask)) { |
1415 | features &= ~bit_mask; | |
1416 | } | |
1417 | bit++; | |
1418 | } | |
1419 | return features; | |
1420 | } | |
1421 | ||
1422 | void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, | |
9a2ba823 | 1423 | uint64_t features) |
2e6d46d7 NN |
1424 | { |
1425 | const int *bit = feature_bits; | |
1426 | while (*bit != VHOST_INVALID_FEATURE_BIT) { | |
9a2ba823 | 1427 | uint64_t bit_mask = (1ULL << *bit); |
2e6d46d7 NN |
1428 | if (features & bit_mask) { |
1429 | hdev->acked_features |= bit_mask; | |
1430 | } | |
1431 | bit++; | |
1432 | } | |
1433 | } | |
1434 | ||
4c3e257b CL |
1435 | int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, |
1436 | uint32_t config_len) | |
1437 | { | |
1438 | assert(hdev->vhost_ops); | |
1439 | ||
1440 | if (hdev->vhost_ops->vhost_get_config) { | |
1441 | return hdev->vhost_ops->vhost_get_config(hdev, config, config_len); | |
1442 | } | |
1443 | ||
1444 | return -1; | |
1445 | } | |
1446 | ||
1447 | int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data, | |
1448 | uint32_t offset, uint32_t size, uint32_t flags) | |
1449 | { | |
1450 | assert(hdev->vhost_ops); | |
1451 | ||
1452 | if (hdev->vhost_ops->vhost_set_config) { | |
1453 | return hdev->vhost_ops->vhost_set_config(hdev, data, offset, | |
1454 | size, flags); | |
1455 | } | |
1456 | ||
1457 | return -1; | |
1458 | } | |
1459 | ||
1460 | void vhost_dev_set_config_notifier(struct vhost_dev *hdev, | |
1461 | const VhostDevConfigOps *ops) | |
1462 | { | |
4c3e257b CL |
1463 | hdev->config_ops = ops; |
1464 | } | |
1465 | ||
b0b3db79 | 1466 | /* Host notifiers must be enabled at this point. */ |
d5970055 MT |
1467 | int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) |
1468 | { | |
1469 | int i, r; | |
24f4fe34 | 1470 | |
8695de0f MAL |
1471 | /* should only be called after backend is connected */ |
1472 | assert(hdev->vhost_ops); | |
1473 | ||
24f4fe34 | 1474 | hdev->started = true; |
c471ad0e | 1475 | hdev->vdev = vdev; |
24f4fe34 | 1476 | |
d5970055 MT |
1477 | r = vhost_dev_set_features(hdev, hdev->log_enabled); |
1478 | if (r < 0) { | |
54dd9321 | 1479 | goto fail_features; |
d5970055 | 1480 | } |
c471ad0e JW |
1481 | |
1482 | if (vhost_dev_has_iommu(hdev)) { | |
375f74f4 | 1483 | memory_listener_register(&hdev->iommu_listener, vdev->dma_as); |
c471ad0e JW |
1484 | } |
1485 | ||
21e70425 | 1486 | r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); |
d5970055 | 1487 | if (r < 0) { |
c6409692 | 1488 | VHOST_OPS_DEBUG("vhost_set_mem_table failed"); |
d5970055 | 1489 | r = -errno; |
54dd9321 | 1490 | goto fail_mem; |
d5970055 | 1491 | } |
d154e0ba | 1492 | for (i = 0; i < hdev->nvqs; ++i) { |
f56a1247 | 1493 | r = vhost_virtqueue_start(hdev, |
a9f98bb5 JW |
1494 | vdev, |
1495 | hdev->vqs + i, | |
1496 | hdev->vq_index + i); | |
d154e0ba MT |
1497 | if (r < 0) { |
1498 | goto fail_vq; | |
1499 | } | |
1500 | } | |
1501 | ||
d5970055 | 1502 | if (hdev->log_enabled) { |
e05ca820 MT |
1503 | uint64_t log_base; |
1504 | ||
d5970055 | 1505 | hdev->log_size = vhost_get_log_size(hdev); |
15324404 MAL |
1506 | hdev->log = vhost_log_get(hdev->log_size, |
1507 | vhost_dev_log_is_shared(hdev)); | |
309750fa | 1508 | log_base = (uintptr_t)hdev->log->log; |
c2bea314 | 1509 | r = hdev->vhost_ops->vhost_set_log_base(hdev, |
9a78a5dd MAL |
1510 | hdev->log_size ? log_base : 0, |
1511 | hdev->log); | |
d5970055 | 1512 | if (r < 0) { |
c6409692 | 1513 | VHOST_OPS_DEBUG("vhost_set_log_base failed"); |
d5970055 | 1514 | r = -errno; |
54dd9321 | 1515 | goto fail_log; |
d5970055 MT |
1516 | } |
1517 | } | |
d154e0ba | 1518 | |
c471ad0e JW |
1519 | if (vhost_dev_has_iommu(hdev)) { |
1520 | hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true); | |
1521 | ||
1522 | /* Update used ring information for IOTLB to work correctly, | |
1523 | * vhost-kernel code requires for this.*/ | |
1524 | for (i = 0; i < hdev->nvqs; ++i) { | |
1525 | struct vhost_virtqueue *vq = hdev->vqs + i; | |
1526 | vhost_device_iotlb_miss(hdev, vq->used_phys, true); | |
1527 | } | |
1528 | } | |
d5970055 | 1529 | return 0; |
54dd9321 | 1530 | fail_log: |
24bfa207 | 1531 | vhost_log_put(hdev, false); |
d5970055 MT |
1532 | fail_vq: |
1533 | while (--i >= 0) { | |
f56a1247 | 1534 | vhost_virtqueue_stop(hdev, |
a9f98bb5 JW |
1535 | vdev, |
1536 | hdev->vqs + i, | |
1537 | hdev->vq_index + i); | |
d5970055 | 1538 | } |
a9f98bb5 | 1539 | i = hdev->nvqs; |
c471ad0e | 1540 | |
54dd9321 MT |
1541 | fail_mem: |
1542 | fail_features: | |
24f4fe34 MT |
1543 | |
1544 | hdev->started = false; | |
d5970055 MT |
1545 | return r; |
1546 | } | |
1547 | ||
b0b3db79 | 1548 | /* Host notifiers must be enabled at this point. */ |
d5970055 MT |
1549 | void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) |
1550 | { | |
a9f98bb5 | 1551 | int i; |
54dd9321 | 1552 | |
8695de0f MAL |
1553 | /* should only be called after backend is connected */ |
1554 | assert(hdev->vhost_ops); | |
1555 | ||
d5970055 | 1556 | for (i = 0; i < hdev->nvqs; ++i) { |
f56a1247 | 1557 | vhost_virtqueue_stop(hdev, |
a9f98bb5 JW |
1558 | vdev, |
1559 | hdev->vqs + i, | |
1560 | hdev->vq_index + i); | |
d5970055 | 1561 | } |
54dd9321 | 1562 | |
c471ad0e JW |
1563 | if (vhost_dev_has_iommu(hdev)) { |
1564 | hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false); | |
375f74f4 | 1565 | memory_listener_unregister(&hdev->iommu_listener); |
c471ad0e | 1566 | } |
309750fa | 1567 | vhost_log_put(hdev, true); |
d5970055 | 1568 | hdev->started = false; |
c471ad0e | 1569 | hdev->vdev = NULL; |
d5970055 | 1570 | } |
950d94ba MAL |
1571 | |
1572 | int vhost_net_set_backend(struct vhost_dev *hdev, | |
1573 | struct vhost_vring_file *file) | |
1574 | { | |
1575 | if (hdev->vhost_ops->vhost_net_set_backend) { | |
1576 | return hdev->vhost_ops->vhost_net_set_backend(hdev, file); | |
1577 | } | |
1578 | ||
1579 | return -1; | |
1580 | } |