]>
Commit | Line | Data |
---|---|---|
6cbf4c8c CM |
1 | /* |
2 | * Inter-VM Shared Memory PCI device. | |
3 | * | |
4 | * Author: | |
5 | * Cam Macdonell <[email protected]> | |
6 | * | |
7 | * Based On: cirrus_vga.c | |
8 | * Copyright (c) 2004 Fabrice Bellard | |
9 | * Copyright (c) 2004 Makoto Suzuki (suzu) | |
10 | * | |
11 | * and rtl8139.c | |
12 | * Copyright (c) 2006 Igor Kovalenko | |
13 | * | |
14 | * This code is licensed under the GNU GPL v2. | |
6b620ca3 PB |
15 | * |
16 | * Contributions after 2012-01-13 are licensed under the terms of the | |
17 | * GNU GPL, version 2 or (at your option) any later version. | |
6cbf4c8c | 18 | */ |
0d1c9782 | 19 | #include "qemu/osdep.h" |
83c9f4ca | 20 | #include "hw/hw.h" |
0d09e41a | 21 | #include "hw/i386/pc.h" |
83c9f4ca | 22 | #include "hw/pci/pci.h" |
660c97ee | 23 | #include "hw/pci/msi.h" |
83c9f4ca | 24 | #include "hw/pci/msix.h" |
9c17d615 | 25 | #include "sysemu/kvm.h" |
caf71f86 | 26 | #include "migration/migration.h" |
d49b6836 | 27 | #include "qemu/error-report.h" |
1de7afc9 | 28 | #include "qemu/event_notifier.h" |
5503e285 | 29 | #include "qom/object_interfaces.h" |
dccfcd0e | 30 | #include "sysemu/char.h" |
d9453c93 MAL |
31 | #include "sysemu/hostmem.h" |
32 | #include "qapi/visitor.h" | |
56a571d9 | 33 | #include "exec/ram_addr.h" |
6cbf4c8c | 34 | |
5105b1d8 DM |
35 | #include "hw/misc/ivshmem.h" |
36 | ||
6cbf4c8c | 37 | #include <sys/mman.h> |
6cbf4c8c | 38 | |
b8ef62a9 PB |
39 | #define PCI_VENDOR_ID_IVSHMEM PCI_VENDOR_ID_REDHAT_QUMRANET |
40 | #define PCI_DEVICE_ID_IVSHMEM 0x1110 | |
41 | ||
cd9953f7 | 42 | #define IVSHMEM_MAX_PEERS UINT16_MAX |
6cbf4c8c CM |
43 | #define IVSHMEM_IOEVENTFD 0 |
44 | #define IVSHMEM_MSI 1 | |
45 | ||
46 | #define IVSHMEM_PEER 0 | |
47 | #define IVSHMEM_MASTER 1 | |
48 | ||
49 | #define IVSHMEM_REG_BAR_SIZE 0x100 | |
50 | ||
a4fa93bf MA |
51 | #define IVSHMEM_DEBUG 0 |
52 | #define IVSHMEM_DPRINTF(fmt, ...) \ | |
53 | do { \ | |
54 | if (IVSHMEM_DEBUG) { \ | |
55 | printf("IVSHMEM: " fmt, ## __VA_ARGS__); \ | |
56 | } \ | |
57 | } while (0) | |
6cbf4c8c | 58 | |
eb3fedf3 PC |
59 | #define TYPE_IVSHMEM "ivshmem" |
60 | #define IVSHMEM(obj) \ | |
61 | OBJECT_CHECK(IVShmemState, (obj), TYPE_IVSHMEM) | |
62 | ||
6cbf4c8c CM |
63 | typedef struct Peer { |
64 | int nb_eventfds; | |
563027cc | 65 | EventNotifier *eventfds; |
6cbf4c8c CM |
66 | } Peer; |
67 | ||
0f57350e | 68 | typedef struct MSIVector { |
6cbf4c8c | 69 | PCIDevice *pdev; |
660c97ee | 70 | int virq; |
0f57350e | 71 | } MSIVector; |
6cbf4c8c CM |
72 | |
73 | typedef struct IVShmemState { | |
b7578eaa AF |
74 | /*< private >*/ |
75 | PCIDevice parent_obj; | |
76 | /*< public >*/ | |
77 | ||
d9453c93 | 78 | HostMemoryBackend *hostmem; |
6cbf4c8c CM |
79 | uint32_t intrmask; |
80 | uint32_t intrstatus; | |
6cbf4c8c | 81 | |
6cbf4c8c | 82 | CharDriverState *server_chr; |
cb06608e | 83 | MemoryRegion ivshmem_mmio; |
6cbf4c8c | 84 | |
cb06608e AK |
85 | /* We might need to register the BAR before we actually have the memory. |
86 | * So prepare a container MemoryRegion for the BAR immediately and | |
87 | * add a subregion when we have the memory. | |
88 | */ | |
89 | MemoryRegion bar; | |
90 | MemoryRegion ivshmem; | |
08183c20 | 91 | size_t ivshmem_size; /* size of shared memory region */ |
c08ba66f | 92 | uint32_t ivshmem_64bit; |
6cbf4c8c CM |
93 | |
94 | Peer *peers; | |
cd9953f7 | 95 | int nb_peers; /* space in @peers[] */ |
6cbf4c8c CM |
96 | |
97 | int vm_id; | |
98 | uint32_t vectors; | |
99 | uint32_t features; | |
0f57350e | 100 | MSIVector *msi_vectors; |
ee276391 MA |
101 | uint64_t msg_buf; /* buffer for receiving server messages */ |
102 | int msg_buffered_bytes; /* #bytes in @msg_buf */ | |
6cbf4c8c | 103 | |
38e0735e AL |
104 | Error *migration_blocker; |
105 | ||
6cbf4c8c CM |
106 | char * shmobj; |
107 | char * sizearg; | |
108 | char * role; | |
109 | int role_val; /* scalar to avoid multiple string comparisons */ | |
110 | } IVShmemState; | |
111 | ||
112 | /* registers for the Inter-VM shared memory device */ | |
113 | enum ivshmem_registers { | |
114 | INTRMASK = 0, | |
115 | INTRSTATUS = 4, | |
116 | IVPOSITION = 8, | |
117 | DOORBELL = 12, | |
118 | }; | |
119 | ||
120 | static inline uint32_t ivshmem_has_feature(IVShmemState *ivs, | |
121 | unsigned int feature) { | |
122 | return (ivs->features & (1 << feature)); | |
123 | } | |
124 | ||
d8a5da07 | 125 | static void ivshmem_update_irq(IVShmemState *s) |
6cbf4c8c | 126 | { |
b7578eaa | 127 | PCIDevice *d = PCI_DEVICE(s); |
434ad76d | 128 | uint32_t isr = s->intrstatus & s->intrmask; |
6cbf4c8c | 129 | |
2d1d422d MA |
130 | /* No INTx with msi=on, whether the guest enabled MSI-X or not */ |
131 | if (ivshmem_has_feature(s, IVSHMEM_MSI)) { | |
132 | return; | |
133 | } | |
134 | ||
6cbf4c8c CM |
135 | /* don't print ISR resets */ |
136 | if (isr) { | |
137 | IVSHMEM_DPRINTF("Set IRQ to %d (%04x %04x)\n", | |
dbc464d4 | 138 | isr ? 1 : 0, s->intrstatus, s->intrmask); |
6cbf4c8c CM |
139 | } |
140 | ||
434ad76d | 141 | pci_set_irq(d, isr != 0); |
6cbf4c8c CM |
142 | } |
143 | ||
144 | static void ivshmem_IntrMask_write(IVShmemState *s, uint32_t val) | |
145 | { | |
146 | IVSHMEM_DPRINTF("IntrMask write(w) val = 0x%04x\n", val); | |
147 | ||
148 | s->intrmask = val; | |
d8a5da07 | 149 | ivshmem_update_irq(s); |
6cbf4c8c CM |
150 | } |
151 | ||
152 | static uint32_t ivshmem_IntrMask_read(IVShmemState *s) | |
153 | { | |
154 | uint32_t ret = s->intrmask; | |
155 | ||
156 | IVSHMEM_DPRINTF("intrmask read(w) val = 0x%04x\n", ret); | |
6cbf4c8c CM |
157 | return ret; |
158 | } | |
159 | ||
160 | static void ivshmem_IntrStatus_write(IVShmemState *s, uint32_t val) | |
161 | { | |
162 | IVSHMEM_DPRINTF("IntrStatus write(w) val = 0x%04x\n", val); | |
163 | ||
164 | s->intrstatus = val; | |
d8a5da07 | 165 | ivshmem_update_irq(s); |
6cbf4c8c CM |
166 | } |
167 | ||
168 | static uint32_t ivshmem_IntrStatus_read(IVShmemState *s) | |
169 | { | |
170 | uint32_t ret = s->intrstatus; | |
171 | ||
172 | /* reading ISR clears all interrupts */ | |
173 | s->intrstatus = 0; | |
d8a5da07 | 174 | ivshmem_update_irq(s); |
6cbf4c8c CM |
175 | return ret; |
176 | } | |
177 | ||
a8170e5e | 178 | static void ivshmem_io_write(void *opaque, hwaddr addr, |
cb06608e | 179 | uint64_t val, unsigned size) |
6cbf4c8c CM |
180 | { |
181 | IVShmemState *s = opaque; | |
182 | ||
6cbf4c8c CM |
183 | uint16_t dest = val >> 16; |
184 | uint16_t vector = val & 0xff; | |
185 | ||
186 | addr &= 0xfc; | |
187 | ||
188 | IVSHMEM_DPRINTF("writing to addr " TARGET_FMT_plx "\n", addr); | |
189 | switch (addr) | |
190 | { | |
191 | case INTRMASK: | |
192 | ivshmem_IntrMask_write(s, val); | |
193 | break; | |
194 | ||
195 | case INTRSTATUS: | |
196 | ivshmem_IntrStatus_write(s, val); | |
197 | break; | |
198 | ||
199 | case DOORBELL: | |
200 | /* check that dest VM ID is reasonable */ | |
95c8425c | 201 | if (dest >= s->nb_peers) { |
6cbf4c8c CM |
202 | IVSHMEM_DPRINTF("Invalid destination VM ID (%d)\n", dest); |
203 | break; | |
204 | } | |
205 | ||
206 | /* check doorbell range */ | |
1b27d7a1 | 207 | if (vector < s->peers[dest].nb_eventfds) { |
563027cc PB |
208 | IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector); |
209 | event_notifier_set(&s->peers[dest].eventfds[vector]); | |
f59bb378 MAL |
210 | } else { |
211 | IVSHMEM_DPRINTF("Invalid destination vector %d on VM %d\n", | |
212 | vector, dest); | |
6cbf4c8c CM |
213 | } |
214 | break; | |
215 | default: | |
f59bb378 | 216 | IVSHMEM_DPRINTF("Unhandled write " TARGET_FMT_plx "\n", addr); |
6cbf4c8c CM |
217 | } |
218 | } | |
219 | ||
a8170e5e | 220 | static uint64_t ivshmem_io_read(void *opaque, hwaddr addr, |
cb06608e | 221 | unsigned size) |
6cbf4c8c CM |
222 | { |
223 | ||
224 | IVShmemState *s = opaque; | |
225 | uint32_t ret; | |
226 | ||
227 | switch (addr) | |
228 | { | |
229 | case INTRMASK: | |
230 | ret = ivshmem_IntrMask_read(s); | |
231 | break; | |
232 | ||
233 | case INTRSTATUS: | |
234 | ret = ivshmem_IntrStatus_read(s); | |
235 | break; | |
236 | ||
237 | case IVPOSITION: | |
1309cf44 | 238 | ret = s->vm_id; |
6cbf4c8c CM |
239 | break; |
240 | ||
241 | default: | |
242 | IVSHMEM_DPRINTF("why are we reading " TARGET_FMT_plx "\n", addr); | |
243 | ret = 0; | |
244 | } | |
245 | ||
246 | return ret; | |
247 | } | |
248 | ||
cb06608e AK |
249 | static const MemoryRegionOps ivshmem_mmio_ops = { |
250 | .read = ivshmem_io_read, | |
251 | .write = ivshmem_io_write, | |
252 | .endianness = DEVICE_NATIVE_ENDIAN, | |
253 | .impl = { | |
254 | .min_access_size = 4, | |
255 | .max_access_size = 4, | |
256 | }, | |
6cbf4c8c CM |
257 | }; |
258 | ||
9940c323 MAL |
259 | static void ivshmem_vector_notify(void *opaque) |
260 | { | |
0f57350e | 261 | MSIVector *entry = opaque; |
6cbf4c8c | 262 | PCIDevice *pdev = entry->pdev; |
d160f3f7 | 263 | IVShmemState *s = IVSHMEM(pdev); |
0f57350e | 264 | int vector = entry - s->msi_vectors; |
9940c323 MAL |
265 | EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; |
266 | ||
267 | if (!event_notifier_test_and_clear(n)) { | |
268 | return; | |
269 | } | |
6cbf4c8c | 270 | |
d160f3f7 | 271 | IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, vector); |
9940c323 | 272 | if (ivshmem_has_feature(s, IVSHMEM_MSI)) { |
082751e8 MA |
273 | if (msix_enabled(pdev)) { |
274 | msix_notify(pdev, vector); | |
275 | } | |
9940c323 MAL |
276 | } else { |
277 | ivshmem_IntrStatus_write(s, 1); | |
278 | } | |
6cbf4c8c CM |
279 | } |
280 | ||
660c97ee MAL |
281 | static int ivshmem_vector_unmask(PCIDevice *dev, unsigned vector, |
282 | MSIMessage msg) | |
283 | { | |
284 | IVShmemState *s = IVSHMEM(dev); | |
285 | EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; | |
286 | MSIVector *v = &s->msi_vectors[vector]; | |
287 | int ret; | |
288 | ||
289 | IVSHMEM_DPRINTF("vector unmask %p %d\n", dev, vector); | |
290 | ||
291 | ret = kvm_irqchip_update_msi_route(kvm_state, v->virq, msg, dev); | |
292 | if (ret < 0) { | |
293 | return ret; | |
294 | } | |
295 | ||
296 | return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq); | |
297 | } | |
298 | ||
299 | static void ivshmem_vector_mask(PCIDevice *dev, unsigned vector) | |
300 | { | |
301 | IVShmemState *s = IVSHMEM(dev); | |
302 | EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; | |
303 | int ret; | |
304 | ||
305 | IVSHMEM_DPRINTF("vector mask %p %d\n", dev, vector); | |
306 | ||
307 | ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, | |
308 | s->msi_vectors[vector].virq); | |
309 | if (ret != 0) { | |
310 | error_report("remove_irqfd_notifier_gsi failed"); | |
311 | } | |
312 | } | |
313 | ||
314 | static void ivshmem_vector_poll(PCIDevice *dev, | |
315 | unsigned int vector_start, | |
316 | unsigned int vector_end) | |
317 | { | |
318 | IVShmemState *s = IVSHMEM(dev); | |
319 | unsigned int vector; | |
320 | ||
321 | IVSHMEM_DPRINTF("vector poll %p %d-%d\n", dev, vector_start, vector_end); | |
322 | ||
323 | vector_end = MIN(vector_end, s->vectors); | |
324 | ||
325 | for (vector = vector_start; vector < vector_end; vector++) { | |
326 | EventNotifier *notifier = &s->peers[s->vm_id].eventfds[vector]; | |
327 | ||
328 | if (!msix_is_masked(dev, vector)) { | |
329 | continue; | |
330 | } | |
331 | ||
332 | if (event_notifier_test_and_clear(notifier)) { | |
333 | msix_set_pending(dev, vector); | |
334 | } | |
335 | } | |
336 | } | |
337 | ||
9940c323 MAL |
338 | static void watch_vector_notifier(IVShmemState *s, EventNotifier *n, |
339 | int vector) | |
6cbf4c8c | 340 | { |
563027cc | 341 | int eventfd = event_notifier_get_fd(n); |
6cbf4c8c | 342 | |
3c27969b | 343 | assert(!s->msi_vectors[vector].pdev); |
9940c323 | 344 | s->msi_vectors[vector].pdev = PCI_DEVICE(s); |
6cbf4c8c | 345 | |
9940c323 MAL |
346 | qemu_set_fd_handler(eventfd, ivshmem_vector_notify, |
347 | NULL, &s->msi_vectors[vector]); | |
6cbf4c8c CM |
348 | } |
349 | ||
d58d7e84 MAL |
350 | static int check_shm_size(IVShmemState *s, int fd, Error **errp) |
351 | { | |
6cbf4c8c CM |
352 | /* check that the guest isn't going to try and map more memory than the |
353 | * the object has allocated return -1 to indicate error */ | |
354 | ||
355 | struct stat buf; | |
356 | ||
5edbdbcd | 357 | if (fstat(fd, &buf) < 0) { |
d58d7e84 MAL |
358 | error_setg(errp, "exiting: fstat on fd %d failed: %s", |
359 | fd, strerror(errno)); | |
5edbdbcd HZ |
360 | return -1; |
361 | } | |
6cbf4c8c CM |
362 | |
363 | if (s->ivshmem_size > buf.st_size) { | |
d58d7e84 | 364 | error_setg(errp, "Requested memory size greater" |
08183c20 | 365 | " than shared object size (%zu > %" PRIu64")", |
d58d7e84 | 366 | s->ivshmem_size, (uint64_t)buf.st_size); |
6cbf4c8c CM |
367 | return -1; |
368 | } else { | |
369 | return 0; | |
370 | } | |
371 | } | |
372 | ||
563027cc PB |
373 | static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i) |
374 | { | |
375 | memory_region_add_eventfd(&s->ivshmem_mmio, | |
376 | DOORBELL, | |
377 | 4, | |
378 | true, | |
379 | (posn << 16) | i, | |
753d5e14 | 380 | &s->peers[posn].eventfds[i]); |
563027cc PB |
381 | } |
382 | ||
383 | static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i) | |
384 | { | |
385 | memory_region_del_eventfd(&s->ivshmem_mmio, | |
386 | DOORBELL, | |
387 | 4, | |
388 | true, | |
389 | (posn << 16) | i, | |
753d5e14 | 390 | &s->peers[posn].eventfds[i]); |
563027cc PB |
391 | } |
392 | ||
f456179f | 393 | static void close_peer_eventfds(IVShmemState *s, int posn) |
6cbf4c8c | 394 | { |
f456179f | 395 | int i, n; |
6cbf4c8c | 396 | |
9db51b4d | 397 | assert(posn >= 0 && posn < s->nb_peers); |
f456179f | 398 | n = s->peers[posn].nb_eventfds; |
6cbf4c8c | 399 | |
9db51b4d MA |
400 | if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { |
401 | memory_region_transaction_begin(); | |
402 | for (i = 0; i < n; i++) { | |
403 | ivshmem_del_eventfd(s, posn, i); | |
404 | } | |
405 | memory_region_transaction_commit(); | |
b6a1f3a5 | 406 | } |
9db51b4d | 407 | |
f456179f | 408 | for (i = 0; i < n; i++) { |
563027cc | 409 | event_notifier_cleanup(&s->peers[posn].eventfds[i]); |
6cbf4c8c CM |
410 | } |
411 | ||
7267c094 | 412 | g_free(s->peers[posn].eventfds); |
6cbf4c8c CM |
413 | s->peers[posn].nb_eventfds = 0; |
414 | } | |
415 | ||
cd9953f7 | 416 | static void resize_peers(IVShmemState *s, int nb_peers) |
34bc07c5 | 417 | { |
cd9953f7 MA |
418 | int old_nb_peers = s->nb_peers; |
419 | int i; | |
6cbf4c8c | 420 | |
cd9953f7 MA |
421 | assert(nb_peers > old_nb_peers); |
422 | IVSHMEM_DPRINTF("bumping storage to %d peers\n", nb_peers); | |
6cbf4c8c | 423 | |
cd9953f7 MA |
424 | s->peers = g_realloc(s->peers, nb_peers * sizeof(Peer)); |
425 | s->nb_peers = nb_peers; | |
1300b273 | 426 | |
cd9953f7 MA |
427 | for (i = old_nb_peers; i < nb_peers; i++) { |
428 | s->peers[i].eventfds = g_new0(EventNotifier, s->vectors); | |
429 | s->peers[i].nb_eventfds = 0; | |
6cbf4c8c CM |
430 | } |
431 | } | |
432 | ||
1309cf44 MA |
433 | static void ivshmem_add_kvm_msi_virq(IVShmemState *s, int vector, |
434 | Error **errp) | |
660c97ee MAL |
435 | { |
436 | PCIDevice *pdev = PCI_DEVICE(s); | |
437 | MSIMessage msg = msix_get_message(pdev, vector); | |
438 | int ret; | |
439 | ||
440 | IVSHMEM_DPRINTF("ivshmem_add_kvm_msi_virq vector:%d\n", vector); | |
3c27969b | 441 | assert(!s->msi_vectors[vector].pdev); |
660c97ee MAL |
442 | |
443 | ret = kvm_irqchip_add_msi_route(kvm_state, msg, pdev); | |
444 | if (ret < 0) { | |
1309cf44 MA |
445 | error_setg(errp, "kvm_irqchip_add_msi_route failed"); |
446 | return; | |
660c97ee MAL |
447 | } |
448 | ||
449 | s->msi_vectors[vector].virq = ret; | |
450 | s->msi_vectors[vector].pdev = pdev; | |
660c97ee MAL |
451 | } |
452 | ||
1309cf44 | 453 | static void setup_interrupt(IVShmemState *s, int vector, Error **errp) |
660c97ee MAL |
454 | { |
455 | EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; | |
456 | bool with_irqfd = kvm_msi_via_irqfd_enabled() && | |
457 | ivshmem_has_feature(s, IVSHMEM_MSI); | |
458 | PCIDevice *pdev = PCI_DEVICE(s); | |
1309cf44 | 459 | Error *err = NULL; |
660c97ee MAL |
460 | |
461 | IVSHMEM_DPRINTF("setting up interrupt for vector: %d\n", vector); | |
462 | ||
463 | if (!with_irqfd) { | |
97553976 | 464 | IVSHMEM_DPRINTF("with eventfd\n"); |
9940c323 | 465 | watch_vector_notifier(s, n, vector); |
660c97ee | 466 | } else if (msix_enabled(pdev)) { |
97553976 | 467 | IVSHMEM_DPRINTF("with irqfd\n"); |
1309cf44 MA |
468 | ivshmem_add_kvm_msi_virq(s, vector, &err); |
469 | if (err) { | |
470 | error_propagate(errp, err); | |
660c97ee MAL |
471 | return; |
472 | } | |
473 | ||
474 | if (!msix_is_masked(pdev, vector)) { | |
475 | kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, | |
476 | s->msi_vectors[vector].virq); | |
1309cf44 | 477 | /* TODO handle error */ |
660c97ee MAL |
478 | } |
479 | } else { | |
480 | /* it will be delayed until msix is enabled, in write_config */ | |
97553976 | 481 | IVSHMEM_DPRINTF("with irqfd, delayed until msix enabled\n"); |
660c97ee MAL |
482 | } |
483 | } | |
484 | ||
1309cf44 | 485 | static void process_msg_shmem(IVShmemState *s, int fd, Error **errp) |
6cbf4c8c | 486 | { |
d58d7e84 | 487 | Error *err = NULL; |
ca0b7566 | 488 | void *ptr; |
6cbf4c8c | 489 | |
ca0b7566 | 490 | if (memory_region_is_mapped(&s->ivshmem)) { |
1309cf44 | 491 | error_setg(errp, "server sent unexpected shared memory message"); |
ca0b7566 | 492 | close(fd); |
0f14fd71 | 493 | return; |
a2e9011b SH |
494 | } |
495 | ||
ca0b7566 | 496 | if (check_shm_size(s, fd, &err) == -1) { |
1309cf44 | 497 | error_propagate(errp, err); |
ca0b7566 | 498 | close(fd); |
cd9953f7 MA |
499 | return; |
500 | } | |
501 | ||
ca0b7566 MA |
502 | /* mmap the region and map into the BAR2 */ |
503 | ptr = mmap(0, s->ivshmem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); | |
504 | if (ptr == MAP_FAILED) { | |
1309cf44 | 505 | error_setg_errno(errp, errno, "Failed to mmap shared memory"); |
ca0b7566 MA |
506 | close(fd); |
507 | return; | |
6cbf4c8c | 508 | } |
ca0b7566 MA |
509 | memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s), |
510 | "ivshmem.bar2", s->ivshmem_size, ptr); | |
511 | qemu_set_ram_fd(memory_region_get_ram_addr(&s->ivshmem), fd); | |
512 | vmstate_register_ram(&s->ivshmem, DEVICE(s)); | |
513 | memory_region_add_subregion(&s->bar, 0, &s->ivshmem); | |
514 | } | |
515 | ||
1309cf44 MA |
516 | static void process_msg_disconnect(IVShmemState *s, uint16_t posn, |
517 | Error **errp) | |
ca0b7566 MA |
518 | { |
519 | IVSHMEM_DPRINTF("posn %d has gone away\n", posn); | |
9db51b4d | 520 | if (posn >= s->nb_peers || posn == s->vm_id) { |
1309cf44 | 521 | error_setg(errp, "invalid peer %d", posn); |
9db51b4d MA |
522 | return; |
523 | } | |
ca0b7566 MA |
524 | close_peer_eventfds(s, posn); |
525 | } | |
6cbf4c8c | 526 | |
1309cf44 MA |
527 | static void process_msg_connect(IVShmemState *s, uint16_t posn, int fd, |
528 | Error **errp) | |
ca0b7566 MA |
529 | { |
530 | Peer *peer = &s->peers[posn]; | |
531 | int vector; | |
9a2f0e64 | 532 | |
ca0b7566 MA |
533 | /* |
534 | * The N-th connect message for this peer comes with the file | |
535 | * descriptor for vector N-1. Count messages to find the vector. | |
536 | */ | |
537 | if (peer->nb_eventfds >= s->vectors) { | |
1309cf44 MA |
538 | error_setg(errp, "Too many eventfd received, device has %d vectors", |
539 | s->vectors); | |
ca0b7566 | 540 | close(fd); |
6f8a16d5 | 541 | return; |
6cbf4c8c | 542 | } |
ca0b7566 | 543 | vector = peer->nb_eventfds++; |
6cbf4c8c | 544 | |
ca0b7566 MA |
545 | IVSHMEM_DPRINTF("eventfds[%d][%d] = %d\n", posn, vector, fd); |
546 | event_notifier_init_fd(&peer->eventfds[vector], fd); | |
547 | fcntl_setfl(fd, O_NONBLOCK); /* msix/irqfd poll non block */ | |
945001a1 | 548 | |
ca0b7566 | 549 | if (posn == s->vm_id) { |
1309cf44 MA |
550 | setup_interrupt(s, vector, errp); |
551 | /* TODO do we need to handle the error? */ | |
ca0b7566 | 552 | } |
6cbf4c8c | 553 | |
ca0b7566 MA |
554 | if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { |
555 | ivshmem_add_eventfd(s, posn, vector); | |
556 | } | |
557 | } | |
6cbf4c8c | 558 | |
1309cf44 | 559 | static void process_msg(IVShmemState *s, int64_t msg, int fd, Error **errp) |
ca0b7566 MA |
560 | { |
561 | IVSHMEM_DPRINTF("posn is %" PRId64 ", fd is %d\n", msg, fd); | |
6cbf4c8c | 562 | |
ca0b7566 | 563 | if (msg < -1 || msg > IVSHMEM_MAX_PEERS) { |
1309cf44 | 564 | error_setg(errp, "server sent invalid message %" PRId64, msg); |
ca0b7566 | 565 | close(fd); |
6cbf4c8c CM |
566 | return; |
567 | } | |
568 | ||
ca0b7566 | 569 | if (msg == -1) { |
1309cf44 | 570 | process_msg_shmem(s, fd, errp); |
1ee57de4 MAL |
571 | return; |
572 | } | |
573 | ||
ca0b7566 MA |
574 | if (msg >= s->nb_peers) { |
575 | resize_peers(s, msg + 1); | |
576 | } | |
6cbf4c8c | 577 | |
ca0b7566 | 578 | if (fd >= 0) { |
1309cf44 | 579 | process_msg_connect(s, msg, fd, errp); |
ca0b7566 | 580 | } else { |
1309cf44 | 581 | process_msg_disconnect(s, msg, errp); |
6cbf4c8c | 582 | } |
ca0b7566 | 583 | } |
6cbf4c8c | 584 | |
ee276391 MA |
585 | static int ivshmem_can_receive(void *opaque) |
586 | { | |
587 | IVShmemState *s = opaque; | |
588 | ||
589 | assert(s->msg_buffered_bytes < sizeof(s->msg_buf)); | |
590 | return sizeof(s->msg_buf) - s->msg_buffered_bytes; | |
591 | } | |
592 | ||
ca0b7566 MA |
593 | static void ivshmem_read(void *opaque, const uint8_t *buf, int size) |
594 | { | |
595 | IVShmemState *s = opaque; | |
1309cf44 | 596 | Error *err = NULL; |
ca0b7566 MA |
597 | int fd; |
598 | int64_t msg; | |
599 | ||
ee276391 MA |
600 | assert(size >= 0 && s->msg_buffered_bytes + size <= sizeof(s->msg_buf)); |
601 | memcpy((unsigned char *)&s->msg_buf + s->msg_buffered_bytes, buf, size); | |
602 | s->msg_buffered_bytes += size; | |
603 | if (s->msg_buffered_bytes < sizeof(s->msg_buf)) { | |
ca0b7566 | 604 | return; |
6cbf4c8c | 605 | } |
ee276391 MA |
606 | msg = le64_to_cpu(s->msg_buf); |
607 | s->msg_buffered_bytes = 0; | |
ca0b7566 MA |
608 | |
609 | fd = qemu_chr_fe_get_msgfd(s->server_chr); | |
610 | IVSHMEM_DPRINTF("posn is %" PRId64 ", fd is %d\n", msg, fd); | |
611 | ||
1309cf44 MA |
612 | process_msg(s, msg, fd, &err); |
613 | if (err) { | |
614 | error_report_err(err); | |
615 | } | |
6cbf4c8c CM |
616 | } |
617 | ||
1309cf44 | 618 | static int64_t ivshmem_recv_msg(IVShmemState *s, int *pfd, Error **errp) |
5105b1d8 | 619 | { |
3a55fc0f MA |
620 | int64_t msg; |
621 | int n, ret; | |
622 | ||
623 | n = 0; | |
624 | do { | |
625 | ret = qemu_chr_fe_read_all(s->server_chr, (uint8_t *)&msg + n, | |
626 | sizeof(msg) - n); | |
627 | if (ret < 0 && ret != -EINTR) { | |
1309cf44 | 628 | error_setg_errno(errp, -ret, "read from server failed"); |
3a55fc0f MA |
629 | return INT64_MIN; |
630 | } | |
631 | n += ret; | |
632 | } while (n < sizeof(msg)); | |
5105b1d8 | 633 | |
3a55fc0f MA |
634 | *pfd = qemu_chr_fe_get_msgfd(s->server_chr); |
635 | return msg; | |
636 | } | |
5105b1d8 | 637 | |
1309cf44 | 638 | static void ivshmem_recv_setup(IVShmemState *s, Error **errp) |
3a55fc0f | 639 | { |
1309cf44 | 640 | Error *err = NULL; |
3a55fc0f MA |
641 | int64_t msg; |
642 | int fd; | |
643 | ||
1309cf44 MA |
644 | msg = ivshmem_recv_msg(s, &fd, &err); |
645 | if (err) { | |
646 | error_propagate(errp, err); | |
647 | return; | |
648 | } | |
649 | if (msg != IVSHMEM_PROTOCOL_VERSION) { | |
650 | error_setg(errp, "server sent version %" PRId64 ", expecting %d", | |
651 | msg, IVSHMEM_PROTOCOL_VERSION); | |
652 | return; | |
653 | } | |
654 | if (fd != -1) { | |
655 | error_setg(errp, "server sent invalid version message"); | |
5105b1d8 DM |
656 | return; |
657 | } | |
658 | ||
a3feb086 MA |
659 | /* |
660 | * ivshmem-server sends the remaining initial messages in a fixed | |
661 | * order, but the device has always accepted them in any order. | |
662 | * Stay as compatible as practical, just in case people use | |
663 | * servers that behave differently. | |
664 | */ | |
665 | ||
666 | /* | |
667 | * ivshmem_device_spec.txt has always required the ID message | |
668 | * right here, and ivshmem-server has always complied. However, | |
669 | * older versions of the device accepted it out of order, but | |
670 | * broke when an interrupt setup message arrived before it. | |
671 | */ | |
672 | msg = ivshmem_recv_msg(s, &fd, &err); | |
673 | if (err) { | |
674 | error_propagate(errp, err); | |
675 | return; | |
676 | } | |
677 | if (fd != -1 || msg < 0 || msg > IVSHMEM_MAX_PEERS) { | |
678 | error_setg(errp, "server sent invalid ID message"); | |
679 | return; | |
680 | } | |
681 | s->vm_id = msg; | |
682 | ||
3a55fc0f MA |
683 | /* |
684 | * Receive more messages until we got shared memory. | |
685 | */ | |
686 | do { | |
1309cf44 MA |
687 | msg = ivshmem_recv_msg(s, &fd, &err); |
688 | if (err) { | |
689 | error_propagate(errp, err); | |
690 | return; | |
691 | } | |
692 | process_msg(s, msg, fd, &err); | |
693 | if (err) { | |
694 | error_propagate(errp, err); | |
695 | return; | |
696 | } | |
3a55fc0f | 697 | } while (msg != -1); |
1309cf44 MA |
698 | |
699 | /* | |
700 | * This function must either map the shared memory or fail. The | |
701 | * loop above ensures that: it terminates normally only after it | |
702 | * successfully processed the server's shared memory message. | |
703 | * Assert that actually mapped the shared memory: | |
704 | */ | |
705 | assert(memory_region_is_mapped(&s->ivshmem)); | |
5105b1d8 DM |
706 | } |
707 | ||
4490c711 MT |
708 | /* Select the MSI-X vectors used by device. |
709 | * ivshmem maps events to vectors statically, so | |
710 | * we just enable all vectors on init and after reset. */ | |
082751e8 | 711 | static void ivshmem_msix_vector_use(IVShmemState *s) |
4490c711 | 712 | { |
b7578eaa | 713 | PCIDevice *d = PCI_DEVICE(s); |
4490c711 MT |
714 | int i; |
715 | ||
4490c711 | 716 | for (i = 0; i < s->vectors; i++) { |
b7578eaa | 717 | msix_vector_use(d, i); |
4490c711 MT |
718 | } |
719 | } | |
720 | ||
6cbf4c8c CM |
721 | static void ivshmem_reset(DeviceState *d) |
722 | { | |
eb3fedf3 | 723 | IVShmemState *s = IVSHMEM(d); |
6cbf4c8c CM |
724 | |
725 | s->intrstatus = 0; | |
972ad215 | 726 | s->intrmask = 0; |
082751e8 MA |
727 | if (ivshmem_has_feature(s, IVSHMEM_MSI)) { |
728 | ivshmem_msix_vector_use(s); | |
729 | } | |
6cbf4c8c CM |
730 | } |
731 | ||
fd47bfe5 | 732 | static int ivshmem_setup_interrupts(IVShmemState *s) |
4490c711 | 733 | { |
fd47bfe5 MAL |
734 | /* allocate QEMU callback data for receiving interrupts */ |
735 | s->msi_vectors = g_malloc0(s->vectors * sizeof(MSIVector)); | |
6cbf4c8c | 736 | |
fd47bfe5 MAL |
737 | if (ivshmem_has_feature(s, IVSHMEM_MSI)) { |
738 | if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1)) { | |
739 | return -1; | |
740 | } | |
1116b539 | 741 | |
fd47bfe5 | 742 | IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors); |
082751e8 | 743 | ivshmem_msix_vector_use(s); |
fd47bfe5 | 744 | } |
4490c711 | 745 | |
d58d7e84 | 746 | return 0; |
6cbf4c8c CM |
747 | } |
748 | ||
660c97ee MAL |
749 | static void ivshmem_enable_irqfd(IVShmemState *s) |
750 | { | |
751 | PCIDevice *pdev = PCI_DEVICE(s); | |
752 | int i; | |
753 | ||
754 | for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) { | |
1309cf44 MA |
755 | Error *err = NULL; |
756 | ||
757 | ivshmem_add_kvm_msi_virq(s, i, &err); | |
758 | if (err) { | |
759 | error_report_err(err); | |
760 | /* TODO do we need to handle the error? */ | |
761 | } | |
660c97ee MAL |
762 | } |
763 | ||
764 | if (msix_set_vector_notifiers(pdev, | |
765 | ivshmem_vector_unmask, | |
766 | ivshmem_vector_mask, | |
767 | ivshmem_vector_poll)) { | |
768 | error_report("ivshmem: msix_set_vector_notifiers failed"); | |
769 | } | |
770 | } | |
771 | ||
772 | static void ivshmem_remove_kvm_msi_virq(IVShmemState *s, int vector) | |
773 | { | |
774 | IVSHMEM_DPRINTF("ivshmem_remove_kvm_msi_virq vector:%d\n", vector); | |
775 | ||
776 | if (s->msi_vectors[vector].pdev == NULL) { | |
777 | return; | |
778 | } | |
779 | ||
780 | /* it was cleaned when masked in the frontend. */ | |
781 | kvm_irqchip_release_virq(kvm_state, s->msi_vectors[vector].virq); | |
782 | ||
783 | s->msi_vectors[vector].pdev = NULL; | |
784 | } | |
785 | ||
786 | static void ivshmem_disable_irqfd(IVShmemState *s) | |
787 | { | |
788 | PCIDevice *pdev = PCI_DEVICE(s); | |
789 | int i; | |
790 | ||
791 | for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) { | |
792 | ivshmem_remove_kvm_msi_virq(s, i); | |
793 | } | |
794 | ||
795 | msix_unset_vector_notifiers(pdev); | |
796 | } | |
797 | ||
798 | static void ivshmem_write_config(PCIDevice *pdev, uint32_t address, | |
d58d7e84 | 799 | uint32_t val, int len) |
4490c711 | 800 | { |
660c97ee MAL |
801 | IVShmemState *s = IVSHMEM(pdev); |
802 | int is_enabled, was_enabled = msix_enabled(pdev); | |
803 | ||
804 | pci_default_write_config(pdev, address, val, len); | |
805 | is_enabled = msix_enabled(pdev); | |
806 | ||
1309cf44 | 807 | if (kvm_msi_via_irqfd_enabled()) { |
660c97ee MAL |
808 | if (!was_enabled && is_enabled) { |
809 | ivshmem_enable_irqfd(s); | |
810 | } else if (was_enabled && !is_enabled) { | |
811 | ivshmem_disable_irqfd(s); | |
812 | } | |
813 | } | |
4490c711 MT |
814 | } |
815 | ||
5503e285 MA |
816 | static void desugar_shm(IVShmemState *s) |
817 | { | |
818 | Object *obj; | |
819 | char *path; | |
820 | ||
821 | obj = object_new("memory-backend-file"); | |
822 | path = g_strdup_printf("/dev/shm/%s", s->shmobj); | |
823 | object_property_set_str(obj, path, "mem-path", &error_abort); | |
824 | g_free(path); | |
825 | object_property_set_int(obj, s->ivshmem_size, "size", &error_abort); | |
826 | object_property_set_bool(obj, true, "share", &error_abort); | |
827 | object_property_add_child(OBJECT(s), "internal-shm-backend", obj, | |
828 | &error_abort); | |
829 | user_creatable_complete(obj, &error_abort); | |
830 | s->hostmem = MEMORY_BACKEND(obj); | |
831 | } | |
832 | ||
d58d7e84 | 833 | static void pci_ivshmem_realize(PCIDevice *dev, Error **errp) |
6cbf4c8c | 834 | { |
eb3fedf3 | 835 | IVShmemState *s = IVSHMEM(dev); |
d855e275 | 836 | Error *err = NULL; |
6cbf4c8c | 837 | uint8_t *pci_conf; |
9113e3f3 MAL |
838 | uint8_t attr = PCI_BASE_ADDRESS_SPACE_MEMORY | |
839 | PCI_BASE_ADDRESS_MEM_PREFETCH; | |
6cbf4c8c | 840 | |
d9453c93 | 841 | if (!!s->server_chr + !!s->shmobj + !!s->hostmem != 1) { |
1d649244 MA |
842 | error_setg(errp, |
843 | "You must specify either 'shm', 'chardev' or 'x-memdev'"); | |
d9453c93 MAL |
844 | return; |
845 | } | |
846 | ||
847 | if (s->hostmem) { | |
848 | MemoryRegion *mr; | |
849 | ||
850 | if (s->sizearg) { | |
851 | g_warning("size argument ignored with hostmem"); | |
852 | } | |
853 | ||
9cf70c52 | 854 | mr = host_memory_backend_get_memory(s->hostmem, &error_abort); |
d9453c93 MAL |
855 | s->ivshmem_size = memory_region_size(mr); |
856 | } else if (s->sizearg == NULL) { | |
6cbf4c8c | 857 | s->ivshmem_size = 4 << 20; /* 4 MB default */ |
d58d7e84 | 858 | } else { |
2c04752c MAL |
859 | char *end; |
860 | int64_t size = qemu_strtosz(s->sizearg, &end); | |
08183c20 MA |
861 | if (size < 0 || (size_t)size != size || *end != '\0' |
862 | || !is_power_of_2(size)) { | |
2c04752c | 863 | error_setg(errp, "Invalid size %s", s->sizearg); |
d58d7e84 MAL |
864 | return; |
865 | } | |
2c04752c | 866 | s->ivshmem_size = size; |
6cbf4c8c CM |
867 | } |
868 | ||
6cbf4c8c CM |
869 | /* IRQFD requires MSI */ |
870 | if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) && | |
871 | !ivshmem_has_feature(s, IVSHMEM_MSI)) { | |
d58d7e84 MAL |
872 | error_setg(errp, "ioeventfd/irqfd requires MSI"); |
873 | return; | |
6cbf4c8c CM |
874 | } |
875 | ||
876 | /* check that role is reasonable */ | |
877 | if (s->role) { | |
878 | if (strncmp(s->role, "peer", 5) == 0) { | |
879 | s->role_val = IVSHMEM_PEER; | |
880 | } else if (strncmp(s->role, "master", 7) == 0) { | |
881 | s->role_val = IVSHMEM_MASTER; | |
882 | } else { | |
d58d7e84 MAL |
883 | error_setg(errp, "'role' must be 'peer' or 'master'"); |
884 | return; | |
6cbf4c8c CM |
885 | } |
886 | } else { | |
887 | s->role_val = IVSHMEM_MASTER; /* default */ | |
888 | } | |
889 | ||
b7578eaa | 890 | pci_conf = dev->config; |
6cbf4c8c | 891 | pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY; |
6cbf4c8c | 892 | |
2d1d422d MA |
893 | /* |
894 | * Note: we don't use INTx with IVSHMEM_MSI at all, so this is a | |
895 | * bald-faced lie then. But it's a backwards compatible lie. | |
896 | */ | |
6cbf4c8c CM |
897 | pci_config_set_interrupt_pin(pci_conf, 1); |
898 | ||
3c161542 | 899 | memory_region_init_io(&s->ivshmem_mmio, OBJECT(s), &ivshmem_mmio_ops, s, |
cb06608e AK |
900 | "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE); |
901 | ||
6cbf4c8c | 902 | /* region for registers*/ |
b7578eaa | 903 | pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, |
e824b2cc | 904 | &s->ivshmem_mmio); |
cb06608e | 905 | |
3c161542 | 906 | memory_region_init(&s->bar, OBJECT(s), "ivshmem-bar2-container", s->ivshmem_size); |
c08ba66f | 907 | if (s->ivshmem_64bit) { |
9113e3f3 | 908 | attr |= PCI_BASE_ADDRESS_MEM_TYPE_64; |
c08ba66f | 909 | } |
6cbf4c8c | 910 | |
5503e285 MA |
911 | if (s->shmobj) { |
912 | desugar_shm(s); | |
913 | } | |
914 | ||
d9453c93 MAL |
915 | if (s->hostmem != NULL) { |
916 | MemoryRegion *mr; | |
917 | ||
918 | IVSHMEM_DPRINTF("using hostmem\n"); | |
919 | ||
9cf70c52 MA |
920 | mr = host_memory_backend_get_memory(MEMORY_BACKEND(s->hostmem), |
921 | &error_abort); | |
d9453c93 MAL |
922 | vmstate_register_ram(mr, DEVICE(s)); |
923 | memory_region_add_subregion(&s->bar, 0, mr); | |
924 | pci_register_bar(PCI_DEVICE(s), 2, attr, &s->bar); | |
5503e285 | 925 | } else { |
6cbf4c8c | 926 | IVSHMEM_DPRINTF("using shared memory server (socket = %s)\n", |
dbc464d4 | 927 | s->server_chr->filename); |
6cbf4c8c | 928 | |
f456179f | 929 | /* we allocate enough space for 16 peers and grow as needed */ |
1300b273 | 930 | resize_peers(s, 16); |
6cbf4c8c | 931 | |
9113e3f3 | 932 | pci_register_bar(dev, 2, attr, &s->bar); |
6cbf4c8c | 933 | |
3a55fc0f MA |
934 | /* |
935 | * Receive setup messages from server synchronously. | |
936 | * Older versions did it asynchronously, but that creates a | |
937 | * number of entertaining race conditions. | |
3a55fc0f | 938 | */ |
1309cf44 MA |
939 | ivshmem_recv_setup(s, &err); |
940 | if (err) { | |
941 | error_propagate(errp, err); | |
942 | return; | |
3a55fc0f MA |
943 | } |
944 | ||
1309cf44 MA |
945 | qemu_chr_add_handlers(s->server_chr, ivshmem_can_receive, |
946 | ivshmem_read, NULL, s); | |
947 | ||
3a55fc0f MA |
948 | if (ivshmem_setup_interrupts(s) < 0) { |
949 | error_setg(errp, "failed to initialize interrupts"); | |
950 | return; | |
951 | } | |
d855e275 MA |
952 | } |
953 | ||
d855e275 MA |
954 | if (s->role_val == IVSHMEM_PEER) { |
955 | error_setg(&s->migration_blocker, | |
956 | "Migration is disabled when using feature 'peer mode' in device 'ivshmem'"); | |
957 | migrate_add_blocker(s->migration_blocker); | |
6cbf4c8c | 958 | } |
6cbf4c8c CM |
959 | } |
960 | ||
d58d7e84 | 961 | static void pci_ivshmem_exit(PCIDevice *dev) |
6cbf4c8c | 962 | { |
eb3fedf3 | 963 | IVShmemState *s = IVSHMEM(dev); |
f64a078d MAL |
964 | int i; |
965 | ||
38e0735e AL |
966 | if (s->migration_blocker) { |
967 | migrate_del_blocker(s->migration_blocker); | |
968 | error_free(s->migration_blocker); | |
969 | } | |
970 | ||
f689d281 | 971 | if (memory_region_is_mapped(&s->ivshmem)) { |
d9453c93 MAL |
972 | if (!s->hostmem) { |
973 | void *addr = memory_region_get_ram_ptr(&s->ivshmem); | |
56a571d9 | 974 | int fd; |
d9453c93 MAL |
975 | |
976 | if (munmap(addr, s->ivshmem_size) == -1) { | |
977 | error_report("Failed to munmap shared memory %s", | |
978 | strerror(errno)); | |
979 | } | |
56a571d9 | 980 | |
8e41fb63 FZ |
981 | fd = qemu_get_ram_fd(memory_region_get_ram_addr(&s->ivshmem)); |
982 | if (fd != -1) { | |
56a571d9 | 983 | close(fd); |
8e41fb63 | 984 | } |
d9453c93 | 985 | } |
f64a078d MAL |
986 | |
987 | vmstate_unregister_ram(&s->ivshmem, DEVICE(dev)); | |
988 | memory_region_del_subregion(&s->bar, &s->ivshmem); | |
f64a078d MAL |
989 | } |
990 | ||
f64a078d MAL |
991 | if (s->peers) { |
992 | for (i = 0; i < s->nb_peers; i++) { | |
f456179f | 993 | close_peer_eventfds(s, i); |
f64a078d MAL |
994 | } |
995 | g_free(s->peers); | |
996 | } | |
997 | ||
998 | if (ivshmem_has_feature(s, IVSHMEM_MSI)) { | |
999 | msix_uninit_exclusive_bar(dev); | |
1000 | } | |
1001 | ||
0f57350e | 1002 | g_free(s->msi_vectors); |
6cbf4c8c CM |
1003 | } |
1004 | ||
1f8552df MAL |
1005 | static bool test_msix(void *opaque, int version_id) |
1006 | { | |
1007 | IVShmemState *s = opaque; | |
1008 | ||
1009 | return ivshmem_has_feature(s, IVSHMEM_MSI); | |
1010 | } | |
1011 | ||
1012 | static bool test_no_msix(void *opaque, int version_id) | |
1013 | { | |
1014 | return !test_msix(opaque, version_id); | |
1015 | } | |
1016 | ||
1017 | static int ivshmem_pre_load(void *opaque) | |
1018 | { | |
1019 | IVShmemState *s = opaque; | |
1020 | ||
1021 | if (s->role_val == IVSHMEM_PEER) { | |
1022 | error_report("'peer' devices are not migratable"); | |
1023 | return -EINVAL; | |
1024 | } | |
1025 | ||
1026 | return 0; | |
1027 | } | |
1028 | ||
1029 | static int ivshmem_post_load(void *opaque, int version_id) | |
1030 | { | |
1031 | IVShmemState *s = opaque; | |
1032 | ||
1033 | if (ivshmem_has_feature(s, IVSHMEM_MSI)) { | |
082751e8 | 1034 | ivshmem_msix_vector_use(s); |
1f8552df | 1035 | } |
1f8552df MAL |
1036 | return 0; |
1037 | } | |
1038 | ||
1039 | static int ivshmem_load_old(QEMUFile *f, void *opaque, int version_id) | |
1040 | { | |
1041 | IVShmemState *s = opaque; | |
1042 | PCIDevice *pdev = PCI_DEVICE(s); | |
1043 | int ret; | |
1044 | ||
1045 | IVSHMEM_DPRINTF("ivshmem_load_old\n"); | |
1046 | ||
1047 | if (version_id != 0) { | |
1048 | return -EINVAL; | |
1049 | } | |
1050 | ||
1051 | if (s->role_val == IVSHMEM_PEER) { | |
1052 | error_report("'peer' devices are not migratable"); | |
1053 | return -EINVAL; | |
1054 | } | |
1055 | ||
1056 | ret = pci_device_load(pdev, f); | |
1057 | if (ret) { | |
1058 | return ret; | |
1059 | } | |
1060 | ||
1061 | if (ivshmem_has_feature(s, IVSHMEM_MSI)) { | |
1062 | msix_load(pdev, f); | |
082751e8 | 1063 | ivshmem_msix_vector_use(s); |
1f8552df MAL |
1064 | } else { |
1065 | s->intrstatus = qemu_get_be32(f); | |
1066 | s->intrmask = qemu_get_be32(f); | |
1067 | } | |
1068 | ||
1069 | return 0; | |
1070 | } | |
1071 | ||
1072 | static const VMStateDescription ivshmem_vmsd = { | |
1073 | .name = "ivshmem", | |
1074 | .version_id = 1, | |
1075 | .minimum_version_id = 1, | |
1076 | .pre_load = ivshmem_pre_load, | |
1077 | .post_load = ivshmem_post_load, | |
1078 | .fields = (VMStateField[]) { | |
1079 | VMSTATE_PCI_DEVICE(parent_obj, IVShmemState), | |
1080 | ||
1081 | VMSTATE_MSIX_TEST(parent_obj, IVShmemState, test_msix), | |
1082 | VMSTATE_UINT32_TEST(intrstatus, IVShmemState, test_no_msix), | |
1083 | VMSTATE_UINT32_TEST(intrmask, IVShmemState, test_no_msix), | |
1084 | ||
1085 | VMSTATE_END_OF_LIST() | |
1086 | }, | |
1087 | .load_state_old = ivshmem_load_old, | |
1088 | .minimum_version_id_old = 0 | |
1089 | }; | |
1090 | ||
40021f08 AL |
1091 | static Property ivshmem_properties[] = { |
1092 | DEFINE_PROP_CHR("chardev", IVShmemState, server_chr), | |
1093 | DEFINE_PROP_STRING("size", IVShmemState, sizearg), | |
1094 | DEFINE_PROP_UINT32("vectors", IVShmemState, vectors, 1), | |
1095 | DEFINE_PROP_BIT("ioeventfd", IVShmemState, features, IVSHMEM_IOEVENTFD, false), | |
1096 | DEFINE_PROP_BIT("msi", IVShmemState, features, IVSHMEM_MSI, true), | |
1097 | DEFINE_PROP_STRING("shm", IVShmemState, shmobj), | |
1098 | DEFINE_PROP_STRING("role", IVShmemState, role), | |
c08ba66f | 1099 | DEFINE_PROP_UINT32("use64", IVShmemState, ivshmem_64bit, 1), |
40021f08 AL |
1100 | DEFINE_PROP_END_OF_LIST(), |
1101 | }; | |
1102 | ||
1103 | static void ivshmem_class_init(ObjectClass *klass, void *data) | |
1104 | { | |
39bffca2 | 1105 | DeviceClass *dc = DEVICE_CLASS(klass); |
40021f08 AL |
1106 | PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); |
1107 | ||
d58d7e84 MAL |
1108 | k->realize = pci_ivshmem_realize; |
1109 | k->exit = pci_ivshmem_exit; | |
1110 | k->config_write = ivshmem_write_config; | |
b8ef62a9 PB |
1111 | k->vendor_id = PCI_VENDOR_ID_IVSHMEM; |
1112 | k->device_id = PCI_DEVICE_ID_IVSHMEM; | |
40021f08 | 1113 | k->class_id = PCI_CLASS_MEMORY_RAM; |
39bffca2 AL |
1114 | dc->reset = ivshmem_reset; |
1115 | dc->props = ivshmem_properties; | |
1f8552df | 1116 | dc->vmsd = &ivshmem_vmsd; |
125ee0ed | 1117 | set_bit(DEVICE_CATEGORY_MISC, dc->categories); |
d383537d | 1118 | dc->desc = "Inter-VM shared memory"; |
40021f08 AL |
1119 | } |
1120 | ||
d9453c93 MAL |
1121 | static void ivshmem_check_memdev_is_busy(Object *obj, const char *name, |
1122 | Object *val, Error **errp) | |
1123 | { | |
1124 | MemoryRegion *mr; | |
1125 | ||
9cf70c52 | 1126 | mr = host_memory_backend_get_memory(MEMORY_BACKEND(val), &error_abort); |
d9453c93 MAL |
1127 | if (memory_region_is_mapped(mr)) { |
1128 | char *path = object_get_canonical_path_component(val); | |
1129 | error_setg(errp, "can't use already busy memdev: %s", path); | |
1130 | g_free(path); | |
1131 | } else { | |
1132 | qdev_prop_allow_set_link_before_realize(obj, name, val, errp); | |
1133 | } | |
1134 | } | |
1135 | ||
1136 | static void ivshmem_init(Object *obj) | |
1137 | { | |
1138 | IVShmemState *s = IVSHMEM(obj); | |
1139 | ||
1d649244 | 1140 | object_property_add_link(obj, "x-memdev", TYPE_MEMORY_BACKEND, |
d9453c93 MAL |
1141 | (Object **)&s->hostmem, |
1142 | ivshmem_check_memdev_is_busy, | |
1143 | OBJ_PROP_LINK_UNREF_ON_RELEASE, | |
1144 | &error_abort); | |
1145 | } | |
1146 | ||
8c43a6f0 | 1147 | static const TypeInfo ivshmem_info = { |
eb3fedf3 | 1148 | .name = TYPE_IVSHMEM, |
39bffca2 AL |
1149 | .parent = TYPE_PCI_DEVICE, |
1150 | .instance_size = sizeof(IVShmemState), | |
d9453c93 | 1151 | .instance_init = ivshmem_init, |
39bffca2 | 1152 | .class_init = ivshmem_class_init, |
6cbf4c8c CM |
1153 | }; |
1154 | ||
83f7d43a | 1155 | static void ivshmem_register_types(void) |
6cbf4c8c | 1156 | { |
39bffca2 | 1157 | type_register_static(&ivshmem_info); |
6cbf4c8c CM |
1158 | } |
1159 | ||
83f7d43a | 1160 | type_init(ivshmem_register_types) |