]>
Commit | Line | Data |
---|---|---|
65501a74 AW |
1 | /* |
2 | * vfio based device assignment support | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2012 | |
5 | * | |
6 | * Authors: | |
7 | * Alex Williamson <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | * Based on qemu-kvm device-assignment: | |
13 | * Adapted for KVM by Qumranet. | |
14 | * Copyright (c) 2007, Neocleus, Alex Novik ([email protected]) | |
15 | * Copyright (c) 2007, Neocleus, Guy Zana ([email protected]) | |
16 | * Copyright (C) 2008, Qumranet, Amit Shah ([email protected]) | |
17 | * Copyright (C) 2008, Red Hat, Amit Shah ([email protected]) | |
18 | * Copyright (C) 2008, IBM, Muli Ben-Yehuda ([email protected]) | |
19 | */ | |
20 | ||
6dcfdbad | 21 | #include <linux/vfio.h> |
65501a74 AW |
22 | #include <sys/ioctl.h> |
23 | #include <sys/mman.h> | |
24 | #include <sys/stat.h> | |
25 | #include <sys/types.h> | |
6dcfdbad | 26 | #include <unistd.h> |
65501a74 AW |
27 | |
28 | #include "config.h" | |
83c9f4ca PB |
29 | #include "hw/pci/msi.h" |
30 | #include "hw/pci/msix.h" | |
1de7afc9 | 31 | #include "qemu/error-report.h" |
1de7afc9 | 32 | #include "qemu/range.h" |
6dcfdbad AW |
33 | #include "sysemu/kvm.h" |
34 | #include "sysemu/sysemu.h" | |
78f33d2b | 35 | #include "pci.h" |
385f57cf | 36 | #include "trace.h" |
4b943029 | 37 | |
65501a74 AW |
38 | #define MSIX_CAP_LENGTH 12 |
39 | ||
9ee27d73 | 40 | static void vfio_disable_interrupts(VFIOPCIDevice *vdev); |
9ee27d73 | 41 | static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled); |
65501a74 | 42 | |
ea486926 AW |
43 | /* |
44 | * Disabling BAR mmaping can be slow, but toggling it around INTx can | |
45 | * also be a huge overhead. We try to get the best of both worlds by | |
46 | * waiting until an interrupt to disable mmaps (subsequent transitions | |
47 | * to the same state are effectively no overhead). If the interrupt has | |
48 | * been serviced and the time gap is long enough, we re-enable mmaps for | |
49 | * performance. This works well for things like graphics cards, which | |
50 | * may not use their interrupt at all and are penalized to an unusable | |
51 | * level by read/write BAR traps. Other devices, like NICs, have more | |
52 | * regular interrupts and see much better latency by staying in non-mmap | |
53 | * mode. We therefore set the default mmap_timeout such that a ping | |
54 | * is just enough to keep the mmap disabled. Users can experiment with | |
55 | * other options with the x-intx-mmap-timeout-ms parameter (a value of | |
56 | * zero disables the timer). | |
57 | */ | |
58 | static void vfio_intx_mmap_enable(void *opaque) | |
59 | { | |
9ee27d73 | 60 | VFIOPCIDevice *vdev = opaque; |
ea486926 AW |
61 | |
62 | if (vdev->intx.pending) { | |
bc72ad67 AB |
63 | timer_mod(vdev->intx.mmap_timer, |
64 | qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout); | |
ea486926 AW |
65 | return; |
66 | } | |
67 | ||
68 | vfio_mmap_set_enabled(vdev, true); | |
69 | } | |
70 | ||
65501a74 AW |
71 | static void vfio_intx_interrupt(void *opaque) |
72 | { | |
9ee27d73 | 73 | VFIOPCIDevice *vdev = opaque; |
65501a74 AW |
74 | |
75 | if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) { | |
76 | return; | |
77 | } | |
78 | ||
df92ee44 | 79 | trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin); |
65501a74 AW |
80 | |
81 | vdev->intx.pending = true; | |
68919cac | 82 | pci_irq_assert(&vdev->pdev); |
ea486926 AW |
83 | vfio_mmap_set_enabled(vdev, false); |
84 | if (vdev->intx.mmap_timeout) { | |
bc72ad67 AB |
85 | timer_mod(vdev->intx.mmap_timer, |
86 | qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout); | |
ea486926 | 87 | } |
65501a74 AW |
88 | } |
89 | ||
870cb6f1 | 90 | static void vfio_intx_eoi(VFIODevice *vbasedev) |
65501a74 | 91 | { |
a664477d EA |
92 | VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); |
93 | ||
65501a74 AW |
94 | if (!vdev->intx.pending) { |
95 | return; | |
96 | } | |
97 | ||
870cb6f1 | 98 | trace_vfio_intx_eoi(vbasedev->name); |
65501a74 AW |
99 | |
100 | vdev->intx.pending = false; | |
68919cac | 101 | pci_irq_deassert(&vdev->pdev); |
a664477d | 102 | vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX); |
65501a74 AW |
103 | } |
104 | ||
870cb6f1 | 105 | static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev) |
e1d1e586 AW |
106 | { |
107 | #ifdef CONFIG_KVM | |
108 | struct kvm_irqfd irqfd = { | |
109 | .fd = event_notifier_get_fd(&vdev->intx.interrupt), | |
110 | .gsi = vdev->intx.route.irq, | |
111 | .flags = KVM_IRQFD_FLAG_RESAMPLE, | |
112 | }; | |
113 | struct vfio_irq_set *irq_set; | |
114 | int ret, argsz; | |
115 | int32_t *pfd; | |
116 | ||
46746dba | 117 | if (vdev->no_kvm_intx || !kvm_irqfds_enabled() || |
e1d1e586 | 118 | vdev->intx.route.mode != PCI_INTX_ENABLED || |
9fc0e2d8 | 119 | !kvm_resamplefds_enabled()) { |
e1d1e586 AW |
120 | return; |
121 | } | |
122 | ||
123 | /* Get to a known interrupt state */ | |
124 | qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev); | |
5546a621 | 125 | vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); |
e1d1e586 | 126 | vdev->intx.pending = false; |
68919cac | 127 | pci_irq_deassert(&vdev->pdev); |
e1d1e586 AW |
128 | |
129 | /* Get an eventfd for resample/unmask */ | |
130 | if (event_notifier_init(&vdev->intx.unmask, 0)) { | |
312fd5f2 | 131 | error_report("vfio: Error: event_notifier_init failed eoi"); |
e1d1e586 AW |
132 | goto fail; |
133 | } | |
134 | ||
135 | /* KVM triggers it, VFIO listens for it */ | |
136 | irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask); | |
137 | ||
138 | if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) { | |
312fd5f2 | 139 | error_report("vfio: Error: Failed to setup resample irqfd: %m"); |
e1d1e586 AW |
140 | goto fail_irqfd; |
141 | } | |
142 | ||
143 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
144 | ||
145 | irq_set = g_malloc0(argsz); | |
146 | irq_set->argsz = argsz; | |
147 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK; | |
148 | irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; | |
149 | irq_set->start = 0; | |
150 | irq_set->count = 1; | |
151 | pfd = (int32_t *)&irq_set->data; | |
152 | ||
153 | *pfd = irqfd.resamplefd; | |
154 | ||
5546a621 | 155 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); |
e1d1e586 AW |
156 | g_free(irq_set); |
157 | if (ret) { | |
312fd5f2 | 158 | error_report("vfio: Error: Failed to setup INTx unmask fd: %m"); |
e1d1e586 AW |
159 | goto fail_vfio; |
160 | } | |
161 | ||
162 | /* Let'em rip */ | |
5546a621 | 163 | vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); |
e1d1e586 AW |
164 | |
165 | vdev->intx.kvm_accel = true; | |
166 | ||
870cb6f1 | 167 | trace_vfio_intx_enable_kvm(vdev->vbasedev.name); |
e1d1e586 AW |
168 | |
169 | return; | |
170 | ||
171 | fail_vfio: | |
172 | irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN; | |
173 | kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd); | |
174 | fail_irqfd: | |
175 | event_notifier_cleanup(&vdev->intx.unmask); | |
176 | fail: | |
177 | qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev); | |
5546a621 | 178 | vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); |
e1d1e586 AW |
179 | #endif |
180 | } | |
181 | ||
870cb6f1 | 182 | static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev) |
e1d1e586 AW |
183 | { |
184 | #ifdef CONFIG_KVM | |
185 | struct kvm_irqfd irqfd = { | |
186 | .fd = event_notifier_get_fd(&vdev->intx.interrupt), | |
187 | .gsi = vdev->intx.route.irq, | |
188 | .flags = KVM_IRQFD_FLAG_DEASSIGN, | |
189 | }; | |
190 | ||
191 | if (!vdev->intx.kvm_accel) { | |
192 | return; | |
193 | } | |
194 | ||
195 | /* | |
196 | * Get to a known state, hardware masked, QEMU ready to accept new | |
197 | * interrupts, QEMU IRQ de-asserted. | |
198 | */ | |
5546a621 | 199 | vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); |
e1d1e586 | 200 | vdev->intx.pending = false; |
68919cac | 201 | pci_irq_deassert(&vdev->pdev); |
e1d1e586 AW |
202 | |
203 | /* Tell KVM to stop listening for an INTx irqfd */ | |
204 | if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) { | |
312fd5f2 | 205 | error_report("vfio: Error: Failed to disable INTx irqfd: %m"); |
e1d1e586 AW |
206 | } |
207 | ||
208 | /* We only need to close the eventfd for VFIO to cleanup the kernel side */ | |
209 | event_notifier_cleanup(&vdev->intx.unmask); | |
210 | ||
211 | /* QEMU starts listening for interrupt events. */ | |
212 | qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev); | |
213 | ||
214 | vdev->intx.kvm_accel = false; | |
215 | ||
216 | /* If we've missed an event, let it re-fire through QEMU */ | |
5546a621 | 217 | vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); |
e1d1e586 | 218 | |
870cb6f1 | 219 | trace_vfio_intx_disable_kvm(vdev->vbasedev.name); |
e1d1e586 AW |
220 | #endif |
221 | } | |
222 | ||
870cb6f1 | 223 | static void vfio_intx_update(PCIDevice *pdev) |
e1d1e586 | 224 | { |
9ee27d73 | 225 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); |
e1d1e586 AW |
226 | PCIINTxRoute route; |
227 | ||
228 | if (vdev->interrupt != VFIO_INT_INTx) { | |
229 | return; | |
230 | } | |
231 | ||
232 | route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin); | |
233 | ||
234 | if (!pci_intx_route_changed(&vdev->intx.route, &route)) { | |
235 | return; /* Nothing changed */ | |
236 | } | |
237 | ||
870cb6f1 AW |
238 | trace_vfio_intx_update(vdev->vbasedev.name, |
239 | vdev->intx.route.irq, route.irq); | |
e1d1e586 | 240 | |
870cb6f1 | 241 | vfio_intx_disable_kvm(vdev); |
e1d1e586 AW |
242 | |
243 | vdev->intx.route = route; | |
244 | ||
245 | if (route.mode != PCI_INTX_ENABLED) { | |
246 | return; | |
247 | } | |
248 | ||
870cb6f1 | 249 | vfio_intx_enable_kvm(vdev); |
e1d1e586 AW |
250 | |
251 | /* Re-enable the interrupt in cased we missed an EOI */ | |
870cb6f1 | 252 | vfio_intx_eoi(&vdev->vbasedev); |
e1d1e586 AW |
253 | } |
254 | ||
870cb6f1 | 255 | static int vfio_intx_enable(VFIOPCIDevice *vdev) |
65501a74 | 256 | { |
65501a74 | 257 | uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1); |
1a403133 AW |
258 | int ret, argsz; |
259 | struct vfio_irq_set *irq_set; | |
260 | int32_t *pfd; | |
65501a74 | 261 | |
ea486926 | 262 | if (!pin) { |
65501a74 AW |
263 | return 0; |
264 | } | |
265 | ||
266 | vfio_disable_interrupts(vdev); | |
267 | ||
268 | vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */ | |
68919cac | 269 | pci_config_set_interrupt_pin(vdev->pdev.config, pin); |
e1d1e586 AW |
270 | |
271 | #ifdef CONFIG_KVM | |
272 | /* | |
273 | * Only conditional to avoid generating error messages on platforms | |
274 | * where we won't actually use the result anyway. | |
275 | */ | |
9fc0e2d8 | 276 | if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) { |
e1d1e586 AW |
277 | vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev, |
278 | vdev->intx.pin); | |
279 | } | |
280 | #endif | |
281 | ||
65501a74 AW |
282 | ret = event_notifier_init(&vdev->intx.interrupt, 0); |
283 | if (ret) { | |
312fd5f2 | 284 | error_report("vfio: Error: event_notifier_init failed"); |
65501a74 AW |
285 | return ret; |
286 | } | |
287 | ||
1a403133 AW |
288 | argsz = sizeof(*irq_set) + sizeof(*pfd); |
289 | ||
290 | irq_set = g_malloc0(argsz); | |
291 | irq_set->argsz = argsz; | |
292 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; | |
293 | irq_set->index = VFIO_PCI_INTX_IRQ_INDEX; | |
294 | irq_set->start = 0; | |
295 | irq_set->count = 1; | |
296 | pfd = (int32_t *)&irq_set->data; | |
297 | ||
298 | *pfd = event_notifier_get_fd(&vdev->intx.interrupt); | |
299 | qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev); | |
65501a74 | 300 | |
5546a621 | 301 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); |
1a403133 AW |
302 | g_free(irq_set); |
303 | if (ret) { | |
312fd5f2 | 304 | error_report("vfio: Error: Failed to setup INTx fd: %m"); |
1a403133 | 305 | qemu_set_fd_handler(*pfd, NULL, NULL, vdev); |
ce59af2d | 306 | event_notifier_cleanup(&vdev->intx.interrupt); |
65501a74 AW |
307 | return -errno; |
308 | } | |
309 | ||
870cb6f1 | 310 | vfio_intx_enable_kvm(vdev); |
e1d1e586 | 311 | |
65501a74 AW |
312 | vdev->interrupt = VFIO_INT_INTx; |
313 | ||
870cb6f1 | 314 | trace_vfio_intx_enable(vdev->vbasedev.name); |
65501a74 AW |
315 | |
316 | return 0; | |
317 | } | |
318 | ||
870cb6f1 | 319 | static void vfio_intx_disable(VFIOPCIDevice *vdev) |
65501a74 AW |
320 | { |
321 | int fd; | |
322 | ||
bc72ad67 | 323 | timer_del(vdev->intx.mmap_timer); |
870cb6f1 | 324 | vfio_intx_disable_kvm(vdev); |
5546a621 | 325 | vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); |
65501a74 | 326 | vdev->intx.pending = false; |
68919cac | 327 | pci_irq_deassert(&vdev->pdev); |
65501a74 AW |
328 | vfio_mmap_set_enabled(vdev, true); |
329 | ||
330 | fd = event_notifier_get_fd(&vdev->intx.interrupt); | |
331 | qemu_set_fd_handler(fd, NULL, NULL, vdev); | |
332 | event_notifier_cleanup(&vdev->intx.interrupt); | |
333 | ||
334 | vdev->interrupt = VFIO_INT_NONE; | |
335 | ||
870cb6f1 | 336 | trace_vfio_intx_disable(vdev->vbasedev.name); |
65501a74 AW |
337 | } |
338 | ||
339 | /* | |
340 | * MSI/X | |
341 | */ | |
342 | static void vfio_msi_interrupt(void *opaque) | |
343 | { | |
344 | VFIOMSIVector *vector = opaque; | |
9ee27d73 | 345 | VFIOPCIDevice *vdev = vector->vdev; |
0de70dc7 AW |
346 | MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector); |
347 | void (*notify)(PCIDevice *dev, unsigned vector); | |
348 | MSIMessage msg; | |
65501a74 AW |
349 | int nr = vector - vdev->msi_vectors; |
350 | ||
351 | if (!event_notifier_test_and_clear(&vector->interrupt)) { | |
352 | return; | |
353 | } | |
354 | ||
b3ebc10c | 355 | if (vdev->interrupt == VFIO_INT_MSIX) { |
0de70dc7 AW |
356 | get_msg = msix_get_message; |
357 | notify = msix_notify; | |
9035f8c0 | 358 | } else if (vdev->interrupt == VFIO_INT_MSI) { |
0de70dc7 AW |
359 | get_msg = msi_get_message; |
360 | notify = msi_notify; | |
b3ebc10c AW |
361 | } else { |
362 | abort(); | |
363 | } | |
364 | ||
0de70dc7 | 365 | msg = get_msg(&vdev->pdev, nr); |
bc5baffa | 366 | trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data); |
0de70dc7 | 367 | notify(&vdev->pdev, nr); |
65501a74 AW |
368 | } |
369 | ||
9ee27d73 | 370 | static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix) |
65501a74 AW |
371 | { |
372 | struct vfio_irq_set *irq_set; | |
373 | int ret = 0, i, argsz; | |
374 | int32_t *fds; | |
375 | ||
376 | argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds)); | |
377 | ||
378 | irq_set = g_malloc0(argsz); | |
379 | irq_set->argsz = argsz; | |
380 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; | |
381 | irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX; | |
382 | irq_set->start = 0; | |
383 | irq_set->count = vdev->nr_vectors; | |
384 | fds = (int32_t *)&irq_set->data; | |
385 | ||
386 | for (i = 0; i < vdev->nr_vectors; i++) { | |
c048be5c AW |
387 | int fd = -1; |
388 | ||
389 | /* | |
390 | * MSI vs MSI-X - The guest has direct access to MSI mask and pending | |
391 | * bits, therefore we always use the KVM signaling path when setup. | |
392 | * MSI-X mask and pending bits are emulated, so we want to use the | |
393 | * KVM signaling path only when configured and unmasked. | |
394 | */ | |
395 | if (vdev->msi_vectors[i].use) { | |
396 | if (vdev->msi_vectors[i].virq < 0 || | |
397 | (msix && msix_is_masked(&vdev->pdev, i))) { | |
398 | fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt); | |
399 | } else { | |
400 | fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt); | |
401 | } | |
65501a74 | 402 | } |
c048be5c AW |
403 | |
404 | fds[i] = fd; | |
65501a74 AW |
405 | } |
406 | ||
5546a621 | 407 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); |
65501a74 AW |
408 | |
409 | g_free(irq_set); | |
410 | ||
65501a74 AW |
411 | return ret; |
412 | } | |
413 | ||
46746dba AW |
414 | static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector, |
415 | MSIMessage *msg, bool msix) | |
f4d45d47 AW |
416 | { |
417 | int virq; | |
418 | ||
46746dba | 419 | if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi) || !msg) { |
f4d45d47 AW |
420 | return; |
421 | } | |
422 | ||
423 | if (event_notifier_init(&vector->kvm_interrupt, 0)) { | |
424 | return; | |
425 | } | |
426 | ||
dc9f06ca | 427 | virq = kvm_irqchip_add_msi_route(kvm_state, *msg, &vdev->pdev); |
f4d45d47 AW |
428 | if (virq < 0) { |
429 | event_notifier_cleanup(&vector->kvm_interrupt); | |
430 | return; | |
431 | } | |
432 | ||
1c9b71a7 | 433 | if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt, |
f4d45d47 AW |
434 | NULL, virq) < 0) { |
435 | kvm_irqchip_release_virq(kvm_state, virq); | |
436 | event_notifier_cleanup(&vector->kvm_interrupt); | |
437 | return; | |
438 | } | |
439 | ||
f4d45d47 AW |
440 | vector->virq = virq; |
441 | } | |
442 | ||
443 | static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector) | |
444 | { | |
1c9b71a7 EA |
445 | kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt, |
446 | vector->virq); | |
f4d45d47 AW |
447 | kvm_irqchip_release_virq(kvm_state, vector->virq); |
448 | vector->virq = -1; | |
449 | event_notifier_cleanup(&vector->kvm_interrupt); | |
450 | } | |
451 | ||
dc9f06ca PF |
452 | static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg, |
453 | PCIDevice *pdev) | |
f4d45d47 | 454 | { |
dc9f06ca | 455 | kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev); |
f4d45d47 AW |
456 | } |
457 | ||
b0223e29 AW |
458 | static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, |
459 | MSIMessage *msg, IOHandler *handler) | |
65501a74 | 460 | { |
9ee27d73 | 461 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); |
65501a74 AW |
462 | VFIOMSIVector *vector; |
463 | int ret; | |
464 | ||
df92ee44 | 465 | trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr); |
65501a74 | 466 | |
65501a74 | 467 | vector = &vdev->msi_vectors[nr]; |
65501a74 | 468 | |
f4d45d47 AW |
469 | if (!vector->use) { |
470 | vector->vdev = vdev; | |
471 | vector->virq = -1; | |
472 | if (event_notifier_init(&vector->interrupt, 0)) { | |
473 | error_report("vfio: Error: event_notifier_init failed"); | |
474 | } | |
475 | vector->use = true; | |
476 | msix_vector_use(pdev, nr); | |
65501a74 AW |
477 | } |
478 | ||
f4d45d47 AW |
479 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
480 | handler, NULL, vector); | |
481 | ||
65501a74 AW |
482 | /* |
483 | * Attempt to enable route through KVM irqchip, | |
484 | * default to userspace handling if unavailable. | |
485 | */ | |
f4d45d47 AW |
486 | if (vector->virq >= 0) { |
487 | if (!msg) { | |
488 | vfio_remove_kvm_msi_virq(vector); | |
489 | } else { | |
dc9f06ca | 490 | vfio_update_kvm_msi_virq(vector, *msg, pdev); |
65501a74 | 491 | } |
f4d45d47 | 492 | } else { |
46746dba | 493 | vfio_add_kvm_msi_virq(vdev, vector, msg, true); |
65501a74 AW |
494 | } |
495 | ||
496 | /* | |
497 | * We don't want to have the host allocate all possible MSI vectors | |
498 | * for a device if they're not in use, so we shutdown and incrementally | |
499 | * increase them as needed. | |
500 | */ | |
501 | if (vdev->nr_vectors < nr + 1) { | |
5546a621 | 502 | vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); |
65501a74 AW |
503 | vdev->nr_vectors = nr + 1; |
504 | ret = vfio_enable_vectors(vdev, true); | |
505 | if (ret) { | |
312fd5f2 | 506 | error_report("vfio: failed to enable vectors, %d", ret); |
65501a74 | 507 | } |
65501a74 | 508 | } else { |
1a403133 AW |
509 | int argsz; |
510 | struct vfio_irq_set *irq_set; | |
511 | int32_t *pfd; | |
512 | ||
513 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
514 | ||
515 | irq_set = g_malloc0(argsz); | |
516 | irq_set->argsz = argsz; | |
517 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
518 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
519 | irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; | |
520 | irq_set->start = nr; | |
521 | irq_set->count = 1; | |
522 | pfd = (int32_t *)&irq_set->data; | |
523 | ||
f4d45d47 AW |
524 | if (vector->virq >= 0) { |
525 | *pfd = event_notifier_get_fd(&vector->kvm_interrupt); | |
526 | } else { | |
527 | *pfd = event_notifier_get_fd(&vector->interrupt); | |
528 | } | |
1a403133 | 529 | |
5546a621 | 530 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); |
1a403133 | 531 | g_free(irq_set); |
65501a74 | 532 | if (ret) { |
312fd5f2 | 533 | error_report("vfio: failed to modify vector, %d", ret); |
65501a74 | 534 | } |
65501a74 AW |
535 | } |
536 | ||
537 | return 0; | |
538 | } | |
539 | ||
b0223e29 AW |
540 | static int vfio_msix_vector_use(PCIDevice *pdev, |
541 | unsigned int nr, MSIMessage msg) | |
542 | { | |
543 | return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt); | |
544 | } | |
545 | ||
65501a74 AW |
546 | static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr) |
547 | { | |
9ee27d73 | 548 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); |
65501a74 | 549 | VFIOMSIVector *vector = &vdev->msi_vectors[nr]; |
65501a74 | 550 | |
df92ee44 | 551 | trace_vfio_msix_vector_release(vdev->vbasedev.name, nr); |
65501a74 AW |
552 | |
553 | /* | |
f4d45d47 AW |
554 | * There are still old guests that mask and unmask vectors on every |
555 | * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of | |
556 | * the KVM setup in place, simply switch VFIO to use the non-bypass | |
557 | * eventfd. We'll then fire the interrupt through QEMU and the MSI-X | |
558 | * core will mask the interrupt and set pending bits, allowing it to | |
559 | * be re-asserted on unmask. Nothing to do if already using QEMU mode. | |
65501a74 | 560 | */ |
f4d45d47 AW |
561 | if (vector->virq >= 0) { |
562 | int argsz; | |
563 | struct vfio_irq_set *irq_set; | |
564 | int32_t *pfd; | |
1a403133 | 565 | |
f4d45d47 | 566 | argsz = sizeof(*irq_set) + sizeof(*pfd); |
1a403133 | 567 | |
f4d45d47 AW |
568 | irq_set = g_malloc0(argsz); |
569 | irq_set->argsz = argsz; | |
570 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
571 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
572 | irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; | |
573 | irq_set->start = nr; | |
574 | irq_set->count = 1; | |
575 | pfd = (int32_t *)&irq_set->data; | |
1a403133 | 576 | |
f4d45d47 | 577 | *pfd = event_notifier_get_fd(&vector->interrupt); |
1a403133 | 578 | |
5546a621 | 579 | ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); |
65501a74 | 580 | |
f4d45d47 | 581 | g_free(irq_set); |
65501a74 | 582 | } |
65501a74 AW |
583 | } |
584 | ||
0de70dc7 | 585 | static void vfio_msix_enable(VFIOPCIDevice *vdev) |
fd704adc AW |
586 | { |
587 | vfio_disable_interrupts(vdev); | |
588 | ||
589 | vdev->msi_vectors = g_malloc0(vdev->msix->entries * sizeof(VFIOMSIVector)); | |
590 | ||
591 | vdev->interrupt = VFIO_INT_MSIX; | |
592 | ||
b0223e29 AW |
593 | /* |
594 | * Some communication channels between VF & PF or PF & fw rely on the | |
595 | * physical state of the device and expect that enabling MSI-X from the | |
596 | * guest enables the same on the host. When our guest is Linux, the | |
597 | * guest driver call to pci_enable_msix() sets the enabling bit in the | |
598 | * MSI-X capability, but leaves the vector table masked. We therefore | |
599 | * can't rely on a vector_use callback (from request_irq() in the guest) | |
600 | * to switch the physical device into MSI-X mode because that may come a | |
601 | * long time after pci_enable_msix(). This code enables vector 0 with | |
602 | * triggering to userspace, then immediately release the vector, leaving | |
603 | * the physical device with no vectors enabled, but MSI-X enabled, just | |
604 | * like the guest view. | |
605 | */ | |
606 | vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL); | |
607 | vfio_msix_vector_release(&vdev->pdev, 0); | |
608 | ||
fd704adc | 609 | if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use, |
bbef882c | 610 | vfio_msix_vector_release, NULL)) { |
312fd5f2 | 611 | error_report("vfio: msix_set_vector_notifiers failed"); |
fd704adc AW |
612 | } |
613 | ||
0de70dc7 | 614 | trace_vfio_msix_enable(vdev->vbasedev.name); |
fd704adc AW |
615 | } |
616 | ||
0de70dc7 | 617 | static void vfio_msi_enable(VFIOPCIDevice *vdev) |
65501a74 AW |
618 | { |
619 | int ret, i; | |
620 | ||
621 | vfio_disable_interrupts(vdev); | |
622 | ||
623 | vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev); | |
624 | retry: | |
625 | vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector)); | |
626 | ||
627 | for (i = 0; i < vdev->nr_vectors; i++) { | |
65501a74 | 628 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; |
9b3af4c0 | 629 | MSIMessage msg = msi_get_message(&vdev->pdev, i); |
65501a74 AW |
630 | |
631 | vector->vdev = vdev; | |
f4d45d47 | 632 | vector->virq = -1; |
65501a74 AW |
633 | vector->use = true; |
634 | ||
635 | if (event_notifier_init(&vector->interrupt, 0)) { | |
312fd5f2 | 636 | error_report("vfio: Error: event_notifier_init failed"); |
65501a74 AW |
637 | } |
638 | ||
f4d45d47 AW |
639 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
640 | vfio_msi_interrupt, NULL, vector); | |
641 | ||
65501a74 AW |
642 | /* |
643 | * Attempt to enable route through KVM irqchip, | |
644 | * default to userspace handling if unavailable. | |
645 | */ | |
46746dba | 646 | vfio_add_kvm_msi_virq(vdev, vector, &msg, false); |
65501a74 AW |
647 | } |
648 | ||
f4d45d47 AW |
649 | /* Set interrupt type prior to possible interrupts */ |
650 | vdev->interrupt = VFIO_INT_MSI; | |
651 | ||
65501a74 AW |
652 | ret = vfio_enable_vectors(vdev, false); |
653 | if (ret) { | |
654 | if (ret < 0) { | |
312fd5f2 | 655 | error_report("vfio: Error: Failed to setup MSI fds: %m"); |
65501a74 AW |
656 | } else if (ret != vdev->nr_vectors) { |
657 | error_report("vfio: Error: Failed to enable %d " | |
312fd5f2 | 658 | "MSI vectors, retry with %d", vdev->nr_vectors, ret); |
65501a74 AW |
659 | } |
660 | ||
661 | for (i = 0; i < vdev->nr_vectors; i++) { | |
662 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; | |
663 | if (vector->virq >= 0) { | |
f4d45d47 | 664 | vfio_remove_kvm_msi_virq(vector); |
65501a74 | 665 | } |
f4d45d47 AW |
666 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), |
667 | NULL, NULL, NULL); | |
65501a74 AW |
668 | event_notifier_cleanup(&vector->interrupt); |
669 | } | |
670 | ||
671 | g_free(vdev->msi_vectors); | |
672 | ||
673 | if (ret > 0 && ret != vdev->nr_vectors) { | |
674 | vdev->nr_vectors = ret; | |
675 | goto retry; | |
676 | } | |
677 | vdev->nr_vectors = 0; | |
678 | ||
f4d45d47 AW |
679 | /* |
680 | * Failing to setup MSI doesn't really fall within any specification. | |
681 | * Let's try leaving interrupts disabled and hope the guest figures | |
682 | * out to fall back to INTx for this device. | |
683 | */ | |
684 | error_report("vfio: Error: Failed to enable MSI"); | |
685 | vdev->interrupt = VFIO_INT_NONE; | |
686 | ||
65501a74 AW |
687 | return; |
688 | } | |
689 | ||
0de70dc7 | 690 | trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors); |
65501a74 AW |
691 | } |
692 | ||
0de70dc7 | 693 | static void vfio_msi_disable_common(VFIOPCIDevice *vdev) |
fd704adc | 694 | { |
f4d45d47 AW |
695 | int i; |
696 | ||
697 | for (i = 0; i < vdev->nr_vectors; i++) { | |
698 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; | |
699 | if (vdev->msi_vectors[i].use) { | |
700 | if (vector->virq >= 0) { | |
701 | vfio_remove_kvm_msi_virq(vector); | |
702 | } | |
703 | qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), | |
704 | NULL, NULL, NULL); | |
705 | event_notifier_cleanup(&vector->interrupt); | |
706 | } | |
707 | } | |
708 | ||
fd704adc AW |
709 | g_free(vdev->msi_vectors); |
710 | vdev->msi_vectors = NULL; | |
711 | vdev->nr_vectors = 0; | |
712 | vdev->interrupt = VFIO_INT_NONE; | |
713 | ||
870cb6f1 | 714 | vfio_intx_enable(vdev); |
fd704adc AW |
715 | } |
716 | ||
0de70dc7 | 717 | static void vfio_msix_disable(VFIOPCIDevice *vdev) |
fd704adc | 718 | { |
3e40ba0f AW |
719 | int i; |
720 | ||
fd704adc AW |
721 | msix_unset_vector_notifiers(&vdev->pdev); |
722 | ||
3e40ba0f AW |
723 | /* |
724 | * MSI-X will only release vectors if MSI-X is still enabled on the | |
725 | * device, check through the rest and release it ourselves if necessary. | |
726 | */ | |
727 | for (i = 0; i < vdev->nr_vectors; i++) { | |
728 | if (vdev->msi_vectors[i].use) { | |
729 | vfio_msix_vector_release(&vdev->pdev, i); | |
f4d45d47 | 730 | msix_vector_unuse(&vdev->pdev, i); |
3e40ba0f AW |
731 | } |
732 | } | |
733 | ||
fd704adc | 734 | if (vdev->nr_vectors) { |
5546a621 | 735 | vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); |
fd704adc AW |
736 | } |
737 | ||
0de70dc7 | 738 | vfio_msi_disable_common(vdev); |
fd704adc | 739 | |
0de70dc7 | 740 | trace_vfio_msix_disable(vdev->vbasedev.name); |
fd704adc AW |
741 | } |
742 | ||
0de70dc7 | 743 | static void vfio_msi_disable(VFIOPCIDevice *vdev) |
65501a74 | 744 | { |
5546a621 | 745 | vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX); |
0de70dc7 | 746 | vfio_msi_disable_common(vdev); |
65501a74 | 747 | |
0de70dc7 | 748 | trace_vfio_msi_disable(vdev->vbasedev.name); |
65501a74 AW |
749 | } |
750 | ||
9ee27d73 | 751 | static void vfio_update_msi(VFIOPCIDevice *vdev) |
c7679d45 AW |
752 | { |
753 | int i; | |
754 | ||
755 | for (i = 0; i < vdev->nr_vectors; i++) { | |
756 | VFIOMSIVector *vector = &vdev->msi_vectors[i]; | |
757 | MSIMessage msg; | |
758 | ||
759 | if (!vector->use || vector->virq < 0) { | |
760 | continue; | |
761 | } | |
762 | ||
763 | msg = msi_get_message(&vdev->pdev, i); | |
dc9f06ca | 764 | vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev); |
c7679d45 AW |
765 | } |
766 | } | |
767 | ||
9ee27d73 | 768 | static void vfio_pci_load_rom(VFIOPCIDevice *vdev) |
6f864e6e AW |
769 | { |
770 | struct vfio_region_info reg_info = { | |
771 | .argsz = sizeof(reg_info), | |
772 | .index = VFIO_PCI_ROM_REGION_INDEX | |
773 | }; | |
774 | uint64_t size; | |
775 | off_t off = 0; | |
7d489dcd | 776 | ssize_t bytes; |
6f864e6e | 777 | |
5546a621 | 778 | if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) { |
6f864e6e AW |
779 | error_report("vfio: Error getting ROM info: %m"); |
780 | return; | |
781 | } | |
782 | ||
df92ee44 | 783 | trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info.size, |
385f57cf EA |
784 | (unsigned long)reg_info.offset, |
785 | (unsigned long)reg_info.flags); | |
6f864e6e AW |
786 | |
787 | vdev->rom_size = size = reg_info.size; | |
788 | vdev->rom_offset = reg_info.offset; | |
789 | ||
790 | if (!vdev->rom_size) { | |
e638073c | 791 | vdev->rom_read_failed = true; |
d20b43df | 792 | error_report("vfio-pci: Cannot read device rom at " |
df92ee44 | 793 | "%s", vdev->vbasedev.name); |
d20b43df BD |
794 | error_printf("Device option ROM contents are probably invalid " |
795 | "(check dmesg).\nSkip option ROM probe with rombar=0, " | |
796 | "or load from file with romfile=\n"); | |
6f864e6e AW |
797 | return; |
798 | } | |
799 | ||
800 | vdev->rom = g_malloc(size); | |
801 | memset(vdev->rom, 0xff, size); | |
802 | ||
803 | while (size) { | |
5546a621 EA |
804 | bytes = pread(vdev->vbasedev.fd, vdev->rom + off, |
805 | size, vdev->rom_offset + off); | |
6f864e6e AW |
806 | if (bytes == 0) { |
807 | break; | |
808 | } else if (bytes > 0) { | |
809 | off += bytes; | |
810 | size -= bytes; | |
811 | } else { | |
812 | if (errno == EINTR || errno == EAGAIN) { | |
813 | continue; | |
814 | } | |
815 | error_report("vfio: Error reading device ROM: %m"); | |
816 | break; | |
817 | } | |
818 | } | |
819 | } | |
820 | ||
821 | static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size) | |
822 | { | |
9ee27d73 | 823 | VFIOPCIDevice *vdev = opaque; |
75bd0c72 ND |
824 | union { |
825 | uint8_t byte; | |
826 | uint16_t word; | |
827 | uint32_t dword; | |
828 | uint64_t qword; | |
829 | } val; | |
830 | uint64_t data = 0; | |
6f864e6e AW |
831 | |
832 | /* Load the ROM lazily when the guest tries to read it */ | |
db01eedb | 833 | if (unlikely(!vdev->rom && !vdev->rom_read_failed)) { |
6f864e6e AW |
834 | vfio_pci_load_rom(vdev); |
835 | } | |
836 | ||
6758008e | 837 | memcpy(&val, vdev->rom + addr, |
6f864e6e AW |
838 | (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0); |
839 | ||
75bd0c72 ND |
840 | switch (size) { |
841 | case 1: | |
842 | data = val.byte; | |
843 | break; | |
844 | case 2: | |
845 | data = le16_to_cpu(val.word); | |
846 | break; | |
847 | case 4: | |
848 | data = le32_to_cpu(val.dword); | |
849 | break; | |
850 | default: | |
851 | hw_error("vfio: unsupported read size, %d bytes\n", size); | |
852 | break; | |
853 | } | |
854 | ||
df92ee44 | 855 | trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data); |
6f864e6e | 856 | |
75bd0c72 | 857 | return data; |
6f864e6e AW |
858 | } |
859 | ||
64fa25a0 AW |
860 | static void vfio_rom_write(void *opaque, hwaddr addr, |
861 | uint64_t data, unsigned size) | |
862 | { | |
863 | } | |
864 | ||
6f864e6e AW |
865 | static const MemoryRegionOps vfio_rom_ops = { |
866 | .read = vfio_rom_read, | |
64fa25a0 | 867 | .write = vfio_rom_write, |
6758008e | 868 | .endianness = DEVICE_LITTLE_ENDIAN, |
6f864e6e AW |
869 | }; |
870 | ||
9ee27d73 | 871 | static void vfio_pci_size_rom(VFIOPCIDevice *vdev) |
6f864e6e | 872 | { |
b1c50c5f | 873 | uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK); |
6f864e6e | 874 | off_t offset = vdev->config_offset + PCI_ROM_ADDRESS; |
4b943029 | 875 | DeviceState *dev = DEVICE(vdev); |
6f864e6e | 876 | char name[32]; |
5546a621 | 877 | int fd = vdev->vbasedev.fd; |
6f864e6e AW |
878 | |
879 | if (vdev->pdev.romfile || !vdev->pdev.rom_bar) { | |
4b943029 BD |
880 | /* Since pci handles romfile, just print a message and return */ |
881 | if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) { | |
882 | error_printf("Warning : Device at %04x:%02x:%02x.%x " | |
883 | "is known to cause system instability issues during " | |
884 | "option rom execution. " | |
885 | "Proceeding anyway since user specified romfile\n", | |
886 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
887 | vdev->host.function); | |
888 | } | |
6f864e6e AW |
889 | return; |
890 | } | |
891 | ||
892 | /* | |
893 | * Use the same size ROM BAR as the physical device. The contents | |
894 | * will get filled in later when the guest tries to read it. | |
895 | */ | |
5546a621 EA |
896 | if (pread(fd, &orig, 4, offset) != 4 || |
897 | pwrite(fd, &size, 4, offset) != 4 || | |
898 | pread(fd, &size, 4, offset) != 4 || | |
899 | pwrite(fd, &orig, 4, offset) != 4) { | |
6f864e6e AW |
900 | error_report("%s(%04x:%02x:%02x.%x) failed: %m", |
901 | __func__, vdev->host.domain, vdev->host.bus, | |
902 | vdev->host.slot, vdev->host.function); | |
903 | return; | |
904 | } | |
905 | ||
b1c50c5f | 906 | size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1; |
6f864e6e AW |
907 | |
908 | if (!size) { | |
909 | return; | |
910 | } | |
911 | ||
4b943029 BD |
912 | if (vfio_blacklist_opt_rom(vdev)) { |
913 | if (dev->opts && qemu_opt_get(dev->opts, "rombar")) { | |
914 | error_printf("Warning : Device at %04x:%02x:%02x.%x " | |
915 | "is known to cause system instability issues during " | |
916 | "option rom execution. " | |
917 | "Proceeding anyway since user specified non zero value for " | |
918 | "rombar\n", | |
919 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
920 | vdev->host.function); | |
921 | } else { | |
922 | error_printf("Warning : Rom loading for device at " | |
923 | "%04x:%02x:%02x.%x has been disabled due to " | |
924 | "system instability issues. " | |
925 | "Specify rombar=1 or romfile to force\n", | |
926 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
927 | vdev->host.function); | |
928 | return; | |
929 | } | |
930 | } | |
931 | ||
df92ee44 | 932 | trace_vfio_pci_size_rom(vdev->vbasedev.name, size); |
6f864e6e AW |
933 | |
934 | snprintf(name, sizeof(name), "vfio[%04x:%02x:%02x.%x].rom", | |
935 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
936 | vdev->host.function); | |
937 | ||
938 | memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev), | |
939 | &vfio_rom_ops, vdev, name, size); | |
940 | ||
941 | pci_register_bar(&vdev->pdev, PCI_ROM_SLOT, | |
942 | PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom); | |
943 | ||
944 | vdev->pdev.has_rom = true; | |
e638073c | 945 | vdev->rom_read_failed = false; |
6f864e6e AW |
946 | } |
947 | ||
c00d61d8 | 948 | void vfio_vga_write(void *opaque, hwaddr addr, |
f15689c7 AW |
949 | uint64_t data, unsigned size) |
950 | { | |
951 | VFIOVGARegion *region = opaque; | |
952 | VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); | |
953 | union { | |
954 | uint8_t byte; | |
955 | uint16_t word; | |
956 | uint32_t dword; | |
957 | uint64_t qword; | |
958 | } buf; | |
959 | off_t offset = vga->fd_offset + region->offset + addr; | |
960 | ||
961 | switch (size) { | |
962 | case 1: | |
963 | buf.byte = data; | |
964 | break; | |
965 | case 2: | |
966 | buf.word = cpu_to_le16(data); | |
967 | break; | |
968 | case 4: | |
969 | buf.dword = cpu_to_le32(data); | |
970 | break; | |
971 | default: | |
4e505ddd | 972 | hw_error("vfio: unsupported write size, %d bytes", size); |
f15689c7 AW |
973 | break; |
974 | } | |
975 | ||
976 | if (pwrite(vga->fd, &buf, size, offset) != size) { | |
977 | error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m", | |
978 | __func__, region->offset + addr, data, size); | |
979 | } | |
980 | ||
385f57cf | 981 | trace_vfio_vga_write(region->offset + addr, data, size); |
f15689c7 AW |
982 | } |
983 | ||
c00d61d8 | 984 | uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size) |
f15689c7 AW |
985 | { |
986 | VFIOVGARegion *region = opaque; | |
987 | VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]); | |
988 | union { | |
989 | uint8_t byte; | |
990 | uint16_t word; | |
991 | uint32_t dword; | |
992 | uint64_t qword; | |
993 | } buf; | |
994 | uint64_t data = 0; | |
995 | off_t offset = vga->fd_offset + region->offset + addr; | |
996 | ||
997 | if (pread(vga->fd, &buf, size, offset) != size) { | |
998 | error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m", | |
999 | __func__, region->offset + addr, size); | |
1000 | return (uint64_t)-1; | |
1001 | } | |
1002 | ||
1003 | switch (size) { | |
1004 | case 1: | |
1005 | data = buf.byte; | |
1006 | break; | |
1007 | case 2: | |
1008 | data = le16_to_cpu(buf.word); | |
1009 | break; | |
1010 | case 4: | |
1011 | data = le32_to_cpu(buf.dword); | |
1012 | break; | |
1013 | default: | |
4e505ddd | 1014 | hw_error("vfio: unsupported read size, %d bytes", size); |
f15689c7 AW |
1015 | break; |
1016 | } | |
1017 | ||
385f57cf | 1018 | trace_vfio_vga_read(region->offset + addr, size, data); |
f15689c7 AW |
1019 | |
1020 | return data; | |
1021 | } | |
1022 | ||
1023 | static const MemoryRegionOps vfio_vga_ops = { | |
1024 | .read = vfio_vga_read, | |
1025 | .write = vfio_vga_write, | |
1026 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1027 | }; | |
1028 | ||
65501a74 AW |
1029 | /* |
1030 | * PCI config space | |
1031 | */ | |
c00d61d8 | 1032 | uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len) |
65501a74 | 1033 | { |
9ee27d73 | 1034 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); |
4b5d5e87 | 1035 | uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val; |
65501a74 | 1036 | |
4b5d5e87 AW |
1037 | memcpy(&emu_bits, vdev->emulated_config_bits + addr, len); |
1038 | emu_bits = le32_to_cpu(emu_bits); | |
65501a74 | 1039 | |
4b5d5e87 AW |
1040 | if (emu_bits) { |
1041 | emu_val = pci_default_read_config(pdev, addr, len); | |
1042 | } | |
1043 | ||
1044 | if (~emu_bits & (0xffffffffU >> (32 - len * 8))) { | |
1045 | ssize_t ret; | |
1046 | ||
5546a621 EA |
1047 | ret = pread(vdev->vbasedev.fd, &phys_val, len, |
1048 | vdev->config_offset + addr); | |
4b5d5e87 | 1049 | if (ret != len) { |
312fd5f2 | 1050 | error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m", |
65501a74 AW |
1051 | __func__, vdev->host.domain, vdev->host.bus, |
1052 | vdev->host.slot, vdev->host.function, addr, len); | |
1053 | return -errno; | |
1054 | } | |
4b5d5e87 | 1055 | phys_val = le32_to_cpu(phys_val); |
65501a74 AW |
1056 | } |
1057 | ||
4b5d5e87 | 1058 | val = (emu_val & emu_bits) | (phys_val & ~emu_bits); |
65501a74 | 1059 | |
df92ee44 | 1060 | trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val); |
65501a74 AW |
1061 | |
1062 | return val; | |
1063 | } | |
1064 | ||
c00d61d8 AW |
1065 | void vfio_pci_write_config(PCIDevice *pdev, |
1066 | uint32_t addr, uint32_t val, int len) | |
65501a74 | 1067 | { |
9ee27d73 | 1068 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); |
65501a74 AW |
1069 | uint32_t val_le = cpu_to_le32(val); |
1070 | ||
df92ee44 | 1071 | trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len); |
65501a74 AW |
1072 | |
1073 | /* Write everything to VFIO, let it filter out what we can't write */ | |
5546a621 EA |
1074 | if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr) |
1075 | != len) { | |
312fd5f2 | 1076 | error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m", |
65501a74 AW |
1077 | __func__, vdev->host.domain, vdev->host.bus, |
1078 | vdev->host.slot, vdev->host.function, addr, val, len); | |
1079 | } | |
1080 | ||
65501a74 AW |
1081 | /* MSI/MSI-X Enabling/Disabling */ |
1082 | if (pdev->cap_present & QEMU_PCI_CAP_MSI && | |
1083 | ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) { | |
1084 | int is_enabled, was_enabled = msi_enabled(pdev); | |
1085 | ||
1086 | pci_default_write_config(pdev, addr, val, len); | |
1087 | ||
1088 | is_enabled = msi_enabled(pdev); | |
1089 | ||
c7679d45 AW |
1090 | if (!was_enabled) { |
1091 | if (is_enabled) { | |
0de70dc7 | 1092 | vfio_msi_enable(vdev); |
c7679d45 AW |
1093 | } |
1094 | } else { | |
1095 | if (!is_enabled) { | |
0de70dc7 | 1096 | vfio_msi_disable(vdev); |
c7679d45 AW |
1097 | } else { |
1098 | vfio_update_msi(vdev); | |
1099 | } | |
65501a74 | 1100 | } |
4b5d5e87 | 1101 | } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX && |
65501a74 AW |
1102 | ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) { |
1103 | int is_enabled, was_enabled = msix_enabled(pdev); | |
1104 | ||
1105 | pci_default_write_config(pdev, addr, val, len); | |
1106 | ||
1107 | is_enabled = msix_enabled(pdev); | |
1108 | ||
1109 | if (!was_enabled && is_enabled) { | |
0de70dc7 | 1110 | vfio_msix_enable(vdev); |
65501a74 | 1111 | } else if (was_enabled && !is_enabled) { |
0de70dc7 | 1112 | vfio_msix_disable(vdev); |
65501a74 | 1113 | } |
4b5d5e87 AW |
1114 | } else { |
1115 | /* Write everything to QEMU to keep emulated bits correct */ | |
1116 | pci_default_write_config(pdev, addr, val, len); | |
65501a74 AW |
1117 | } |
1118 | } | |
1119 | ||
65501a74 AW |
1120 | /* |
1121 | * Interrupt setup | |
1122 | */ | |
9ee27d73 | 1123 | static void vfio_disable_interrupts(VFIOPCIDevice *vdev) |
65501a74 | 1124 | { |
b3e27c3a AW |
1125 | /* |
1126 | * More complicated than it looks. Disabling MSI/X transitions the | |
1127 | * device to INTx mode (if supported). Therefore we need to first | |
1128 | * disable MSI/X and then cleanup by disabling INTx. | |
1129 | */ | |
1130 | if (vdev->interrupt == VFIO_INT_MSIX) { | |
0de70dc7 | 1131 | vfio_msix_disable(vdev); |
b3e27c3a | 1132 | } else if (vdev->interrupt == VFIO_INT_MSI) { |
0de70dc7 | 1133 | vfio_msi_disable(vdev); |
b3e27c3a AW |
1134 | } |
1135 | ||
1136 | if (vdev->interrupt == VFIO_INT_INTx) { | |
870cb6f1 | 1137 | vfio_intx_disable(vdev); |
65501a74 AW |
1138 | } |
1139 | } | |
1140 | ||
0de70dc7 | 1141 | static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos) |
65501a74 AW |
1142 | { |
1143 | uint16_t ctrl; | |
1144 | bool msi_64bit, msi_maskbit; | |
1145 | int ret, entries; | |
1146 | ||
5546a621 | 1147 | if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl), |
65501a74 AW |
1148 | vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) { |
1149 | return -errno; | |
1150 | } | |
1151 | ctrl = le16_to_cpu(ctrl); | |
1152 | ||
1153 | msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT); | |
1154 | msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT); | |
1155 | entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1); | |
1156 | ||
0de70dc7 | 1157 | trace_vfio_msi_setup(vdev->vbasedev.name, pos); |
65501a74 AW |
1158 | |
1159 | ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit); | |
1160 | if (ret < 0) { | |
e43b9a5a AW |
1161 | if (ret == -ENOTSUP) { |
1162 | return 0; | |
1163 | } | |
312fd5f2 | 1164 | error_report("vfio: msi_init failed"); |
65501a74 AW |
1165 | return ret; |
1166 | } | |
1167 | vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0); | |
1168 | ||
1169 | return 0; | |
1170 | } | |
1171 | ||
1172 | /* | |
1173 | * We don't have any control over how pci_add_capability() inserts | |
1174 | * capabilities into the chain. In order to setup MSI-X we need a | |
1175 | * MemoryRegion for the BAR. In order to setup the BAR and not | |
1176 | * attempt to mmap the MSI-X table area, which VFIO won't allow, we | |
1177 | * need to first look for where the MSI-X table lives. So we | |
1178 | * unfortunately split MSI-X setup across two functions. | |
1179 | */ | |
0de70dc7 | 1180 | static int vfio_msix_early_setup(VFIOPCIDevice *vdev) |
65501a74 AW |
1181 | { |
1182 | uint8_t pos; | |
1183 | uint16_t ctrl; | |
1184 | uint32_t table, pba; | |
5546a621 | 1185 | int fd = vdev->vbasedev.fd; |
b5bd049f | 1186 | VFIOMSIXInfo *msix; |
65501a74 AW |
1187 | |
1188 | pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX); | |
1189 | if (!pos) { | |
1190 | return 0; | |
1191 | } | |
1192 | ||
5546a621 | 1193 | if (pread(fd, &ctrl, sizeof(ctrl), |
65501a74 AW |
1194 | vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) { |
1195 | return -errno; | |
1196 | } | |
1197 | ||
5546a621 | 1198 | if (pread(fd, &table, sizeof(table), |
65501a74 AW |
1199 | vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) { |
1200 | return -errno; | |
1201 | } | |
1202 | ||
5546a621 | 1203 | if (pread(fd, &pba, sizeof(pba), |
65501a74 AW |
1204 | vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) { |
1205 | return -errno; | |
1206 | } | |
1207 | ||
1208 | ctrl = le16_to_cpu(ctrl); | |
1209 | table = le32_to_cpu(table); | |
1210 | pba = le32_to_cpu(pba); | |
1211 | ||
b5bd049f AW |
1212 | msix = g_malloc0(sizeof(*msix)); |
1213 | msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK; | |
1214 | msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK; | |
1215 | msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; | |
1216 | msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; | |
1217 | msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; | |
65501a74 | 1218 | |
43302969 GL |
1219 | /* |
1220 | * Test the size of the pba_offset variable and catch if it extends outside | |
1221 | * of the specified BAR. If it is the case, we need to apply a hardware | |
1222 | * specific quirk if the device is known or we have a broken configuration. | |
1223 | */ | |
b5bd049f | 1224 | if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) { |
43302969 GL |
1225 | /* |
1226 | * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5 | |
1227 | * adapters. The T5 hardware returns an incorrect value of 0x8000 for | |
1228 | * the VF PBA offset while the BAR itself is only 8k. The correct value | |
1229 | * is 0x1000, so we hard code that here. | |
1230 | */ | |
ff635e37 AW |
1231 | if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO && |
1232 | (vdev->device_id & 0xff00) == 0x5800) { | |
b5bd049f | 1233 | msix->pba_offset = 0x1000; |
43302969 GL |
1234 | } else { |
1235 | error_report("vfio: Hardware reports invalid configuration, " | |
1236 | "MSIX PBA outside of specified BAR"); | |
b5bd049f | 1237 | g_free(msix); |
43302969 GL |
1238 | return -EINVAL; |
1239 | } | |
1240 | } | |
1241 | ||
0de70dc7 | 1242 | trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar, |
b5bd049f AW |
1243 | msix->table_offset, msix->entries); |
1244 | vdev->msix = msix; | |
65501a74 AW |
1245 | |
1246 | return 0; | |
1247 | } | |
1248 | ||
0de70dc7 | 1249 | static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos) |
65501a74 AW |
1250 | { |
1251 | int ret; | |
1252 | ||
65501a74 | 1253 | ret = msix_init(&vdev->pdev, vdev->msix->entries, |
a664477d | 1254 | &vdev->bars[vdev->msix->table_bar].region.mem, |
65501a74 | 1255 | vdev->msix->table_bar, vdev->msix->table_offset, |
a664477d | 1256 | &vdev->bars[vdev->msix->pba_bar].region.mem, |
65501a74 AW |
1257 | vdev->msix->pba_bar, vdev->msix->pba_offset, pos); |
1258 | if (ret < 0) { | |
e43b9a5a AW |
1259 | if (ret == -ENOTSUP) { |
1260 | return 0; | |
1261 | } | |
312fd5f2 | 1262 | error_report("vfio: msix_init failed"); |
65501a74 AW |
1263 | return ret; |
1264 | } | |
1265 | ||
65501a74 AW |
1266 | return 0; |
1267 | } | |
1268 | ||
9ee27d73 | 1269 | static void vfio_teardown_msi(VFIOPCIDevice *vdev) |
65501a74 AW |
1270 | { |
1271 | msi_uninit(&vdev->pdev); | |
1272 | ||
1273 | if (vdev->msix) { | |
a664477d EA |
1274 | msix_uninit(&vdev->pdev, |
1275 | &vdev->bars[vdev->msix->table_bar].region.mem, | |
1276 | &vdev->bars[vdev->msix->pba_bar].region.mem); | |
65501a74 AW |
1277 | } |
1278 | } | |
1279 | ||
1280 | /* | |
1281 | * Resource setup | |
1282 | */ | |
9ee27d73 | 1283 | static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled) |
65501a74 AW |
1284 | { |
1285 | int i; | |
1286 | ||
1287 | for (i = 0; i < PCI_ROM_SLOT; i++) { | |
1288 | VFIOBAR *bar = &vdev->bars[i]; | |
1289 | ||
a664477d | 1290 | if (!bar->region.size) { |
65501a74 AW |
1291 | continue; |
1292 | } | |
1293 | ||
a664477d | 1294 | memory_region_set_enabled(&bar->region.mmap_mem, enabled); |
65501a74 AW |
1295 | if (vdev->msix && vdev->msix->table_bar == i) { |
1296 | memory_region_set_enabled(&vdev->msix->mmap_mem, enabled); | |
1297 | } | |
1298 | } | |
1299 | } | |
1300 | ||
ba5e6bfa | 1301 | static void vfio_unregister_bar(VFIOPCIDevice *vdev, int nr) |
65501a74 AW |
1302 | { |
1303 | VFIOBAR *bar = &vdev->bars[nr]; | |
1304 | ||
a664477d | 1305 | if (!bar->region.size) { |
65501a74 AW |
1306 | return; |
1307 | } | |
1308 | ||
7076eabc AW |
1309 | vfio_bar_quirk_teardown(vdev, nr); |
1310 | ||
a664477d | 1311 | memory_region_del_subregion(&bar->region.mem, &bar->region.mmap_mem); |
65501a74 AW |
1312 | |
1313 | if (vdev->msix && vdev->msix->table_bar == nr) { | |
a664477d | 1314 | memory_region_del_subregion(&bar->region.mem, &vdev->msix->mmap_mem); |
ba5e6bfa PB |
1315 | } |
1316 | } | |
1317 | ||
1318 | static void vfio_unmap_bar(VFIOPCIDevice *vdev, int nr) | |
1319 | { | |
1320 | VFIOBAR *bar = &vdev->bars[nr]; | |
1321 | ||
1322 | if (!bar->region.size) { | |
1323 | return; | |
1324 | } | |
1325 | ||
1326 | vfio_bar_quirk_free(vdev, nr); | |
1327 | ||
1328 | munmap(bar->region.mmap, memory_region_size(&bar->region.mmap_mem)); | |
1329 | ||
1330 | if (vdev->msix && vdev->msix->table_bar == nr) { | |
65501a74 AW |
1331 | munmap(vdev->msix->mmap, memory_region_size(&vdev->msix->mmap_mem)); |
1332 | } | |
65501a74 AW |
1333 | } |
1334 | ||
9ee27d73 | 1335 | static void vfio_map_bar(VFIOPCIDevice *vdev, int nr) |
65501a74 AW |
1336 | { |
1337 | VFIOBAR *bar = &vdev->bars[nr]; | |
29c6e6df | 1338 | uint64_t size = bar->region.size; |
65501a74 AW |
1339 | char name[64]; |
1340 | uint32_t pci_bar; | |
1341 | uint8_t type; | |
1342 | int ret; | |
1343 | ||
1344 | /* Skip both unimplemented BARs and the upper half of 64bit BARS. */ | |
1345 | if (!size) { | |
1346 | return; | |
1347 | } | |
1348 | ||
1349 | snprintf(name, sizeof(name), "VFIO %04x:%02x:%02x.%x BAR %d", | |
1350 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
1351 | vdev->host.function, nr); | |
1352 | ||
1353 | /* Determine what type of BAR this is for registration */ | |
5546a621 | 1354 | ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar), |
65501a74 AW |
1355 | vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr)); |
1356 | if (ret != sizeof(pci_bar)) { | |
312fd5f2 | 1357 | error_report("vfio: Failed to read BAR %d (%m)", nr); |
65501a74 AW |
1358 | return; |
1359 | } | |
1360 | ||
1361 | pci_bar = le32_to_cpu(pci_bar); | |
39360f0b AW |
1362 | bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO); |
1363 | bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64); | |
1364 | type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK : | |
1365 | ~PCI_BASE_ADDRESS_MEM_MASK); | |
65501a74 AW |
1366 | |
1367 | /* A "slow" read/write mapping underlies all BARs */ | |
a664477d | 1368 | memory_region_init_io(&bar->region.mem, OBJECT(vdev), &vfio_region_ops, |
39360f0b | 1369 | bar, name, size); |
a664477d | 1370 | pci_register_bar(&vdev->pdev, nr, type, &bar->region.mem); |
65501a74 AW |
1371 | |
1372 | /* | |
1373 | * We can't mmap areas overlapping the MSIX vector table, so we | |
1374 | * potentially insert a direct-mapped subregion before and after it. | |
1375 | */ | |
1376 | if (vdev->msix && vdev->msix->table_bar == nr) { | |
f7ceed19 | 1377 | size = vdev->msix->table_offset & qemu_real_host_page_mask; |
65501a74 AW |
1378 | } |
1379 | ||
1380 | strncat(name, " mmap", sizeof(name) - strlen(name) - 1); | |
a664477d EA |
1381 | if (vfio_mmap_region(OBJECT(vdev), &bar->region, &bar->region.mem, |
1382 | &bar->region.mmap_mem, &bar->region.mmap, | |
1383 | size, 0, name)) { | |
312fd5f2 | 1384 | error_report("%s unsupported. Performance may be slow", name); |
65501a74 AW |
1385 | } |
1386 | ||
1387 | if (vdev->msix && vdev->msix->table_bar == nr) { | |
29c6e6df | 1388 | uint64_t start; |
65501a74 | 1389 | |
f7ceed19 PC |
1390 | start = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset + |
1391 | (vdev->msix->entries * | |
1392 | PCI_MSIX_ENTRY_SIZE)); | |
65501a74 | 1393 | |
a664477d | 1394 | size = start < bar->region.size ? bar->region.size - start : 0; |
65501a74 AW |
1395 | strncat(name, " msix-hi", sizeof(name) - strlen(name) - 1); |
1396 | /* VFIOMSIXInfo contains another MemoryRegion for this mapping */ | |
a664477d EA |
1397 | if (vfio_mmap_region(OBJECT(vdev), &bar->region, &bar->region.mem, |
1398 | &vdev->msix->mmap_mem, | |
65501a74 | 1399 | &vdev->msix->mmap, size, start, name)) { |
312fd5f2 | 1400 | error_report("%s unsupported. Performance may be slow", name); |
65501a74 AW |
1401 | } |
1402 | } | |
7076eabc AW |
1403 | |
1404 | vfio_bar_quirk_setup(vdev, nr); | |
65501a74 AW |
1405 | } |
1406 | ||
9ee27d73 | 1407 | static void vfio_map_bars(VFIOPCIDevice *vdev) |
65501a74 AW |
1408 | { |
1409 | int i; | |
1410 | ||
1411 | for (i = 0; i < PCI_ROM_SLOT; i++) { | |
1412 | vfio_map_bar(vdev, i); | |
1413 | } | |
f15689c7 AW |
1414 | |
1415 | if (vdev->has_vga) { | |
3c161542 PB |
1416 | memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem, |
1417 | OBJECT(vdev), &vfio_vga_ops, | |
f15689c7 AW |
1418 | &vdev->vga.region[QEMU_PCI_VGA_MEM], |
1419 | "vfio-vga-mmio@0xa0000", | |
1420 | QEMU_PCI_VGA_MEM_SIZE); | |
3c161542 PB |
1421 | memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem, |
1422 | OBJECT(vdev), &vfio_vga_ops, | |
f15689c7 AW |
1423 | &vdev->vga.region[QEMU_PCI_VGA_IO_LO], |
1424 | "vfio-vga-io@0x3b0", | |
1425 | QEMU_PCI_VGA_IO_LO_SIZE); | |
3c161542 PB |
1426 | memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem, |
1427 | OBJECT(vdev), &vfio_vga_ops, | |
f15689c7 AW |
1428 | &vdev->vga.region[QEMU_PCI_VGA_IO_HI], |
1429 | "vfio-vga-io@0x3c0", | |
1430 | QEMU_PCI_VGA_IO_HI_SIZE); | |
1431 | ||
1432 | pci_register_vga(&vdev->pdev, &vdev->vga.region[QEMU_PCI_VGA_MEM].mem, | |
1433 | &vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem, | |
1434 | &vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem); | |
7076eabc | 1435 | vfio_vga_quirk_setup(vdev); |
f15689c7 | 1436 | } |
65501a74 AW |
1437 | } |
1438 | ||
ba5e6bfa | 1439 | static void vfio_unregister_bars(VFIOPCIDevice *vdev) |
65501a74 AW |
1440 | { |
1441 | int i; | |
1442 | ||
1443 | for (i = 0; i < PCI_ROM_SLOT; i++) { | |
ba5e6bfa | 1444 | vfio_unregister_bar(vdev, i); |
65501a74 | 1445 | } |
f15689c7 AW |
1446 | |
1447 | if (vdev->has_vga) { | |
7076eabc | 1448 | vfio_vga_quirk_teardown(vdev); |
f15689c7 | 1449 | pci_unregister_vga(&vdev->pdev); |
f15689c7 | 1450 | } |
65501a74 AW |
1451 | } |
1452 | ||
ba5e6bfa PB |
1453 | static void vfio_unmap_bars(VFIOPCIDevice *vdev) |
1454 | { | |
1455 | int i; | |
1456 | ||
1457 | for (i = 0; i < PCI_ROM_SLOT; i++) { | |
1458 | vfio_unmap_bar(vdev, i); | |
1459 | } | |
1460 | ||
1461 | if (vdev->has_vga) { | |
1462 | vfio_vga_quirk_free(vdev); | |
1463 | } | |
1464 | } | |
1465 | ||
65501a74 AW |
1466 | /* |
1467 | * General setup | |
1468 | */ | |
1469 | static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos) | |
1470 | { | |
1471 | uint8_t tmp, next = 0xff; | |
1472 | ||
1473 | for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp; | |
1474 | tmp = pdev->config[tmp + 1]) { | |
1475 | if (tmp > pos && tmp < next) { | |
1476 | next = tmp; | |
1477 | } | |
1478 | } | |
1479 | ||
1480 | return next - pos; | |
1481 | } | |
1482 | ||
96adc5c7 AW |
1483 | static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask) |
1484 | { | |
1485 | pci_set_word(buf, (pci_get_word(buf) & ~mask) | val); | |
1486 | } | |
1487 | ||
9ee27d73 | 1488 | static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos, |
96adc5c7 AW |
1489 | uint16_t val, uint16_t mask) |
1490 | { | |
1491 | vfio_set_word_bits(vdev->pdev.config + pos, val, mask); | |
1492 | vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask); | |
1493 | vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask); | |
1494 | } | |
1495 | ||
1496 | static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask) | |
1497 | { | |
1498 | pci_set_long(buf, (pci_get_long(buf) & ~mask) | val); | |
1499 | } | |
1500 | ||
9ee27d73 | 1501 | static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos, |
96adc5c7 AW |
1502 | uint32_t val, uint32_t mask) |
1503 | { | |
1504 | vfio_set_long_bits(vdev->pdev.config + pos, val, mask); | |
1505 | vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask); | |
1506 | vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask); | |
1507 | } | |
1508 | ||
9ee27d73 | 1509 | static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size) |
96adc5c7 AW |
1510 | { |
1511 | uint16_t flags; | |
1512 | uint8_t type; | |
1513 | ||
1514 | flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS); | |
1515 | type = (flags & PCI_EXP_FLAGS_TYPE) >> 4; | |
1516 | ||
1517 | if (type != PCI_EXP_TYPE_ENDPOINT && | |
1518 | type != PCI_EXP_TYPE_LEG_END && | |
1519 | type != PCI_EXP_TYPE_RC_END) { | |
1520 | ||
1521 | error_report("vfio: Assignment of PCIe type 0x%x " | |
1522 | "devices is not currently supported", type); | |
1523 | return -EINVAL; | |
1524 | } | |
1525 | ||
1526 | if (!pci_bus_is_express(vdev->pdev.bus)) { | |
1527 | /* | |
1528 | * Use express capability as-is on PCI bus. It doesn't make much | |
1529 | * sense to even expose, but some drivers (ex. tg3) depend on it | |
1530 | * and guests don't seem to be particular about it. We'll need | |
1531 | * to revist this or force express devices to express buses if we | |
1532 | * ever expose an IOMMU to the guest. | |
1533 | */ | |
1534 | } else if (pci_bus_is_root(vdev->pdev.bus)) { | |
1535 | /* | |
1536 | * On a Root Complex bus Endpoints become Root Complex Integrated | |
1537 | * Endpoints, which changes the type and clears the LNK & LNK2 fields. | |
1538 | */ | |
1539 | if (type == PCI_EXP_TYPE_ENDPOINT) { | |
1540 | vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, | |
1541 | PCI_EXP_TYPE_RC_END << 4, | |
1542 | PCI_EXP_FLAGS_TYPE); | |
1543 | ||
1544 | /* Link Capabilities, Status, and Control goes away */ | |
1545 | if (size > PCI_EXP_LNKCTL) { | |
1546 | vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0); | |
1547 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); | |
1548 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0); | |
1549 | ||
1550 | #ifndef PCI_EXP_LNKCAP2 | |
1551 | #define PCI_EXP_LNKCAP2 44 | |
1552 | #endif | |
1553 | #ifndef PCI_EXP_LNKSTA2 | |
1554 | #define PCI_EXP_LNKSTA2 50 | |
1555 | #endif | |
1556 | /* Link 2 Capabilities, Status, and Control goes away */ | |
1557 | if (size > PCI_EXP_LNKCAP2) { | |
1558 | vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0); | |
1559 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0); | |
1560 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0); | |
1561 | } | |
1562 | } | |
1563 | ||
1564 | } else if (type == PCI_EXP_TYPE_LEG_END) { | |
1565 | /* | |
1566 | * Legacy endpoints don't belong on the root complex. Windows | |
1567 | * seems to be happier with devices if we skip the capability. | |
1568 | */ | |
1569 | return 0; | |
1570 | } | |
1571 | ||
1572 | } else { | |
1573 | /* | |
1574 | * Convert Root Complex Integrated Endpoints to regular endpoints. | |
1575 | * These devices don't support LNK/LNK2 capabilities, so make them up. | |
1576 | */ | |
1577 | if (type == PCI_EXP_TYPE_RC_END) { | |
1578 | vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS, | |
1579 | PCI_EXP_TYPE_ENDPOINT << 4, | |
1580 | PCI_EXP_FLAGS_TYPE); | |
1581 | vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, | |
1582 | PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0); | |
1583 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0); | |
1584 | } | |
1585 | ||
1586 | /* Mark the Link Status bits as emulated to allow virtual negotiation */ | |
1587 | vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, | |
1588 | pci_get_word(vdev->pdev.config + pos + | |
1589 | PCI_EXP_LNKSTA), | |
1590 | PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS); | |
1591 | } | |
1592 | ||
1593 | pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size); | |
1594 | if (pos >= 0) { | |
1595 | vdev->pdev.exp.exp_cap = pos; | |
1596 | } | |
1597 | ||
1598 | return pos; | |
1599 | } | |
1600 | ||
9ee27d73 | 1601 | static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos) |
befe5176 AW |
1602 | { |
1603 | uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP); | |
1604 | ||
1605 | if (cap & PCI_EXP_DEVCAP_FLR) { | |
df92ee44 | 1606 | trace_vfio_check_pcie_flr(vdev->vbasedev.name); |
befe5176 AW |
1607 | vdev->has_flr = true; |
1608 | } | |
1609 | } | |
1610 | ||
9ee27d73 | 1611 | static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos) |
befe5176 AW |
1612 | { |
1613 | uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL); | |
1614 | ||
1615 | if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) { | |
df92ee44 | 1616 | trace_vfio_check_pm_reset(vdev->vbasedev.name); |
befe5176 AW |
1617 | vdev->has_pm_reset = true; |
1618 | } | |
1619 | } | |
1620 | ||
9ee27d73 | 1621 | static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos) |
befe5176 AW |
1622 | { |
1623 | uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP); | |
1624 | ||
1625 | if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) { | |
df92ee44 | 1626 | trace_vfio_check_af_flr(vdev->vbasedev.name); |
befe5176 AW |
1627 | vdev->has_flr = true; |
1628 | } | |
1629 | } | |
1630 | ||
9ee27d73 | 1631 | static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos) |
65501a74 AW |
1632 | { |
1633 | PCIDevice *pdev = &vdev->pdev; | |
1634 | uint8_t cap_id, next, size; | |
1635 | int ret; | |
1636 | ||
1637 | cap_id = pdev->config[pos]; | |
1638 | next = pdev->config[pos + 1]; | |
1639 | ||
1640 | /* | |
1641 | * If it becomes important to configure capabilities to their actual | |
1642 | * size, use this as the default when it's something we don't recognize. | |
1643 | * Since QEMU doesn't actually handle many of the config accesses, | |
1644 | * exact size doesn't seem worthwhile. | |
1645 | */ | |
1646 | size = vfio_std_cap_max_size(pdev, pos); | |
1647 | ||
1648 | /* | |
1649 | * pci_add_capability always inserts the new capability at the head | |
1650 | * of the chain. Therefore to end up with a chain that matches the | |
1651 | * physical device, we insert from the end by making this recursive. | |
1652 | * This is also why we pre-caclulate size above as cached config space | |
1653 | * will be changed as we unwind the stack. | |
1654 | */ | |
1655 | if (next) { | |
1656 | ret = vfio_add_std_cap(vdev, next); | |
1657 | if (ret) { | |
1658 | return ret; | |
1659 | } | |
1660 | } else { | |
96adc5c7 AW |
1661 | /* Begin the rebuild, use QEMU emulated list bits */ |
1662 | pdev->config[PCI_CAPABILITY_LIST] = 0; | |
1663 | vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff; | |
1664 | vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST; | |
65501a74 AW |
1665 | } |
1666 | ||
96adc5c7 AW |
1667 | /* Use emulated next pointer to allow dropping caps */ |
1668 | pci_set_byte(vdev->emulated_config_bits + pos + 1, 0xff); | |
1669 | ||
65501a74 AW |
1670 | switch (cap_id) { |
1671 | case PCI_CAP_ID_MSI: | |
0de70dc7 | 1672 | ret = vfio_msi_setup(vdev, pos); |
65501a74 | 1673 | break; |
96adc5c7 | 1674 | case PCI_CAP_ID_EXP: |
befe5176 | 1675 | vfio_check_pcie_flr(vdev, pos); |
96adc5c7 AW |
1676 | ret = vfio_setup_pcie_cap(vdev, pos, size); |
1677 | break; | |
65501a74 | 1678 | case PCI_CAP_ID_MSIX: |
0de70dc7 | 1679 | ret = vfio_msix_setup(vdev, pos); |
65501a74 | 1680 | break; |
ba661818 | 1681 | case PCI_CAP_ID_PM: |
befe5176 | 1682 | vfio_check_pm_reset(vdev, pos); |
ba661818 | 1683 | vdev->pm_cap = pos; |
befe5176 AW |
1684 | ret = pci_add_capability(pdev, cap_id, pos, size); |
1685 | break; | |
1686 | case PCI_CAP_ID_AF: | |
1687 | vfio_check_af_flr(vdev, pos); | |
1688 | ret = pci_add_capability(pdev, cap_id, pos, size); | |
1689 | break; | |
65501a74 AW |
1690 | default: |
1691 | ret = pci_add_capability(pdev, cap_id, pos, size); | |
1692 | break; | |
1693 | } | |
1694 | ||
1695 | if (ret < 0) { | |
1696 | error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability " | |
312fd5f2 | 1697 | "0x%x[0x%x]@0x%x: %d", vdev->host.domain, |
65501a74 AW |
1698 | vdev->host.bus, vdev->host.slot, vdev->host.function, |
1699 | cap_id, size, pos, ret); | |
1700 | return ret; | |
1701 | } | |
1702 | ||
1703 | return 0; | |
1704 | } | |
1705 | ||
9ee27d73 | 1706 | static int vfio_add_capabilities(VFIOPCIDevice *vdev) |
65501a74 AW |
1707 | { |
1708 | PCIDevice *pdev = &vdev->pdev; | |
1709 | ||
1710 | if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) || | |
1711 | !pdev->config[PCI_CAPABILITY_LIST]) { | |
1712 | return 0; /* Nothing to add */ | |
1713 | } | |
1714 | ||
1715 | return vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]); | |
1716 | } | |
1717 | ||
9ee27d73 | 1718 | static void vfio_pci_pre_reset(VFIOPCIDevice *vdev) |
f16f39c3 AW |
1719 | { |
1720 | PCIDevice *pdev = &vdev->pdev; | |
1721 | uint16_t cmd; | |
1722 | ||
1723 | vfio_disable_interrupts(vdev); | |
1724 | ||
1725 | /* Make sure the device is in D0 */ | |
1726 | if (vdev->pm_cap) { | |
1727 | uint16_t pmcsr; | |
1728 | uint8_t state; | |
1729 | ||
1730 | pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2); | |
1731 | state = pmcsr & PCI_PM_CTRL_STATE_MASK; | |
1732 | if (state) { | |
1733 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | |
1734 | vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2); | |
1735 | /* vfio handles the necessary delay here */ | |
1736 | pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2); | |
1737 | state = pmcsr & PCI_PM_CTRL_STATE_MASK; | |
1738 | if (state) { | |
4e505ddd | 1739 | error_report("vfio: Unable to power on device, stuck in D%d", |
f16f39c3 AW |
1740 | state); |
1741 | } | |
1742 | } | |
1743 | } | |
1744 | ||
1745 | /* | |
1746 | * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master. | |
1747 | * Also put INTx Disable in known state. | |
1748 | */ | |
1749 | cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2); | |
1750 | cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | | |
1751 | PCI_COMMAND_INTX_DISABLE); | |
1752 | vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2); | |
1753 | } | |
1754 | ||
9ee27d73 | 1755 | static void vfio_pci_post_reset(VFIOPCIDevice *vdev) |
f16f39c3 | 1756 | { |
870cb6f1 | 1757 | vfio_intx_enable(vdev); |
f16f39c3 AW |
1758 | } |
1759 | ||
1760 | static bool vfio_pci_host_match(PCIHostDeviceAddress *host1, | |
1761 | PCIHostDeviceAddress *host2) | |
1762 | { | |
1763 | return (host1->domain == host2->domain && host1->bus == host2->bus && | |
1764 | host1->slot == host2->slot && host1->function == host2->function); | |
1765 | } | |
1766 | ||
9ee27d73 | 1767 | static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single) |
f16f39c3 AW |
1768 | { |
1769 | VFIOGroup *group; | |
1770 | struct vfio_pci_hot_reset_info *info; | |
1771 | struct vfio_pci_dependent_device *devices; | |
1772 | struct vfio_pci_hot_reset *reset; | |
1773 | int32_t *fds; | |
1774 | int ret, i, count; | |
1775 | bool multi = false; | |
1776 | ||
df92ee44 | 1777 | trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); |
f16f39c3 AW |
1778 | |
1779 | vfio_pci_pre_reset(vdev); | |
b47d8efa | 1780 | vdev->vbasedev.needs_reset = false; |
f16f39c3 AW |
1781 | |
1782 | info = g_malloc0(sizeof(*info)); | |
1783 | info->argsz = sizeof(*info); | |
1784 | ||
5546a621 | 1785 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); |
f16f39c3 AW |
1786 | if (ret && errno != ENOSPC) { |
1787 | ret = -errno; | |
1788 | if (!vdev->has_pm_reset) { | |
1789 | error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, " | |
1790 | "no available reset mechanism.", vdev->host.domain, | |
1791 | vdev->host.bus, vdev->host.slot, vdev->host.function); | |
1792 | } | |
1793 | goto out_single; | |
1794 | } | |
1795 | ||
1796 | count = info->count; | |
1797 | info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices))); | |
1798 | info->argsz = sizeof(*info) + (count * sizeof(*devices)); | |
1799 | devices = &info->devices[0]; | |
1800 | ||
5546a621 | 1801 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); |
f16f39c3 AW |
1802 | if (ret) { |
1803 | ret = -errno; | |
1804 | error_report("vfio: hot reset info failed: %m"); | |
1805 | goto out_single; | |
1806 | } | |
1807 | ||
df92ee44 | 1808 | trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); |
f16f39c3 AW |
1809 | |
1810 | /* Verify that we have all the groups required */ | |
1811 | for (i = 0; i < info->count; i++) { | |
1812 | PCIHostDeviceAddress host; | |
9ee27d73 | 1813 | VFIOPCIDevice *tmp; |
b47d8efa | 1814 | VFIODevice *vbasedev_iter; |
f16f39c3 AW |
1815 | |
1816 | host.domain = devices[i].segment; | |
1817 | host.bus = devices[i].bus; | |
1818 | host.slot = PCI_SLOT(devices[i].devfn); | |
1819 | host.function = PCI_FUNC(devices[i].devfn); | |
1820 | ||
385f57cf | 1821 | trace_vfio_pci_hot_reset_dep_devices(host.domain, |
f16f39c3 AW |
1822 | host.bus, host.slot, host.function, devices[i].group_id); |
1823 | ||
1824 | if (vfio_pci_host_match(&host, &vdev->host)) { | |
1825 | continue; | |
1826 | } | |
1827 | ||
62356b72 | 1828 | QLIST_FOREACH(group, &vfio_group_list, next) { |
f16f39c3 AW |
1829 | if (group->groupid == devices[i].group_id) { |
1830 | break; | |
1831 | } | |
1832 | } | |
1833 | ||
1834 | if (!group) { | |
1835 | if (!vdev->has_pm_reset) { | |
df92ee44 | 1836 | error_report("vfio: Cannot reset device %s, " |
f16f39c3 | 1837 | "depends on group %d which is not owned.", |
df92ee44 | 1838 | vdev->vbasedev.name, devices[i].group_id); |
f16f39c3 AW |
1839 | } |
1840 | ret = -EPERM; | |
1841 | goto out; | |
1842 | } | |
1843 | ||
1844 | /* Prep dependent devices for reset and clear our marker. */ | |
b47d8efa EA |
1845 | QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { |
1846 | if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { | |
1847 | continue; | |
1848 | } | |
1849 | tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); | |
f16f39c3 AW |
1850 | if (vfio_pci_host_match(&host, &tmp->host)) { |
1851 | if (single) { | |
f16f39c3 AW |
1852 | ret = -EINVAL; |
1853 | goto out_single; | |
1854 | } | |
1855 | vfio_pci_pre_reset(tmp); | |
b47d8efa | 1856 | tmp->vbasedev.needs_reset = false; |
f16f39c3 AW |
1857 | multi = true; |
1858 | break; | |
1859 | } | |
1860 | } | |
1861 | } | |
1862 | ||
1863 | if (!single && !multi) { | |
f16f39c3 AW |
1864 | ret = -EINVAL; |
1865 | goto out_single; | |
1866 | } | |
1867 | ||
1868 | /* Determine how many group fds need to be passed */ | |
1869 | count = 0; | |
62356b72 | 1870 | QLIST_FOREACH(group, &vfio_group_list, next) { |
f16f39c3 AW |
1871 | for (i = 0; i < info->count; i++) { |
1872 | if (group->groupid == devices[i].group_id) { | |
1873 | count++; | |
1874 | break; | |
1875 | } | |
1876 | } | |
1877 | } | |
1878 | ||
1879 | reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds))); | |
1880 | reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); | |
1881 | fds = &reset->group_fds[0]; | |
1882 | ||
1883 | /* Fill in group fds */ | |
62356b72 | 1884 | QLIST_FOREACH(group, &vfio_group_list, next) { |
f16f39c3 AW |
1885 | for (i = 0; i < info->count; i++) { |
1886 | if (group->groupid == devices[i].group_id) { | |
1887 | fds[reset->count++] = group->fd; | |
1888 | break; | |
1889 | } | |
1890 | } | |
1891 | } | |
1892 | ||
1893 | /* Bus reset! */ | |
5546a621 | 1894 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); |
f16f39c3 AW |
1895 | g_free(reset); |
1896 | ||
df92ee44 | 1897 | trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, |
385f57cf | 1898 | ret ? "%m" : "Success"); |
f16f39c3 AW |
1899 | |
1900 | out: | |
1901 | /* Re-enable INTx on affected devices */ | |
1902 | for (i = 0; i < info->count; i++) { | |
1903 | PCIHostDeviceAddress host; | |
9ee27d73 | 1904 | VFIOPCIDevice *tmp; |
b47d8efa | 1905 | VFIODevice *vbasedev_iter; |
f16f39c3 AW |
1906 | |
1907 | host.domain = devices[i].segment; | |
1908 | host.bus = devices[i].bus; | |
1909 | host.slot = PCI_SLOT(devices[i].devfn); | |
1910 | host.function = PCI_FUNC(devices[i].devfn); | |
1911 | ||
1912 | if (vfio_pci_host_match(&host, &vdev->host)) { | |
1913 | continue; | |
1914 | } | |
1915 | ||
62356b72 | 1916 | QLIST_FOREACH(group, &vfio_group_list, next) { |
f16f39c3 AW |
1917 | if (group->groupid == devices[i].group_id) { |
1918 | break; | |
1919 | } | |
1920 | } | |
1921 | ||
1922 | if (!group) { | |
1923 | break; | |
1924 | } | |
1925 | ||
b47d8efa EA |
1926 | QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { |
1927 | if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { | |
1928 | continue; | |
1929 | } | |
1930 | tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); | |
f16f39c3 AW |
1931 | if (vfio_pci_host_match(&host, &tmp->host)) { |
1932 | vfio_pci_post_reset(tmp); | |
1933 | break; | |
1934 | } | |
1935 | } | |
1936 | } | |
1937 | out_single: | |
1938 | vfio_pci_post_reset(vdev); | |
1939 | g_free(info); | |
1940 | ||
1941 | return ret; | |
1942 | } | |
1943 | ||
1944 | /* | |
1945 | * We want to differentiate hot reset of mulitple in-use devices vs hot reset | |
1946 | * of a single in-use device. VFIO_DEVICE_RESET will already handle the case | |
1947 | * of doing hot resets when there is only a single device per bus. The in-use | |
1948 | * here refers to how many VFIODevices are affected. A hot reset that affects | |
1949 | * multiple devices, but only a single in-use device, means that we can call | |
1950 | * it from our bus ->reset() callback since the extent is effectively a single | |
1951 | * device. This allows us to make use of it in the hotplug path. When there | |
1952 | * are multiple in-use devices, we can only trigger the hot reset during a | |
1953 | * system reset and thus from our reset handler. We separate _one vs _multi | |
1954 | * here so that we don't overlap and do a double reset on the system reset | |
1955 | * path where both our reset handler and ->reset() callback are used. Calling | |
1956 | * _one() will only do a hot reset for the one in-use devices case, calling | |
1957 | * _multi() will do nothing if a _one() would have been sufficient. | |
1958 | */ | |
9ee27d73 | 1959 | static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev) |
f16f39c3 AW |
1960 | { |
1961 | return vfio_pci_hot_reset(vdev, true); | |
1962 | } | |
1963 | ||
b47d8efa | 1964 | static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev) |
f16f39c3 | 1965 | { |
b47d8efa | 1966 | VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); |
f16f39c3 AW |
1967 | return vfio_pci_hot_reset(vdev, false); |
1968 | } | |
1969 | ||
b47d8efa EA |
1970 | static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev) |
1971 | { | |
1972 | VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); | |
1973 | if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) { | |
1974 | vbasedev->needs_reset = true; | |
1975 | } | |
1976 | } | |
1977 | ||
1978 | static VFIODeviceOps vfio_pci_ops = { | |
1979 | .vfio_compute_needs_reset = vfio_pci_compute_needs_reset, | |
1980 | .vfio_hot_reset_multi = vfio_pci_hot_reset_multi, | |
870cb6f1 | 1981 | .vfio_eoi = vfio_intx_eoi, |
b47d8efa EA |
1982 | }; |
1983 | ||
217e9fdc | 1984 | static int vfio_populate_device(VFIOPCIDevice *vdev) |
65501a74 | 1985 | { |
217e9fdc | 1986 | VFIODevice *vbasedev = &vdev->vbasedev; |
65501a74 | 1987 | struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) }; |
7b4b0e9e | 1988 | struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) }; |
d13dd2d7 | 1989 | int i, ret = -1; |
65501a74 AW |
1990 | |
1991 | /* Sanity check device */ | |
d13dd2d7 | 1992 | if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) { |
312fd5f2 | 1993 | error_report("vfio: Um, this isn't a PCI device"); |
65501a74 AW |
1994 | goto error; |
1995 | } | |
1996 | ||
d13dd2d7 | 1997 | if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) { |
312fd5f2 | 1998 | error_report("vfio: unexpected number of io regions %u", |
d13dd2d7 | 1999 | vbasedev->num_regions); |
65501a74 AW |
2000 | goto error; |
2001 | } | |
2002 | ||
d13dd2d7 EA |
2003 | if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) { |
2004 | error_report("vfio: unexpected number of irqs %u", vbasedev->num_irqs); | |
65501a74 AW |
2005 | goto error; |
2006 | } | |
2007 | ||
2008 | for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) { | |
2009 | reg_info.index = i; | |
2010 | ||
d13dd2d7 | 2011 | ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); |
65501a74 | 2012 | if (ret) { |
312fd5f2 | 2013 | error_report("vfio: Error getting region %d info: %m", i); |
65501a74 AW |
2014 | goto error; |
2015 | } | |
2016 | ||
d13dd2d7 EA |
2017 | trace_vfio_populate_device_region(vbasedev->name, i, |
2018 | (unsigned long)reg_info.size, | |
2019 | (unsigned long)reg_info.offset, | |
2020 | (unsigned long)reg_info.flags); | |
65501a74 | 2021 | |
d13dd2d7 | 2022 | vdev->bars[i].region.vbasedev = vbasedev; |
a664477d EA |
2023 | vdev->bars[i].region.flags = reg_info.flags; |
2024 | vdev->bars[i].region.size = reg_info.size; | |
2025 | vdev->bars[i].region.fd_offset = reg_info.offset; | |
2026 | vdev->bars[i].region.nr = i; | |
7076eabc | 2027 | QLIST_INIT(&vdev->bars[i].quirks); |
65501a74 AW |
2028 | } |
2029 | ||
65501a74 AW |
2030 | reg_info.index = VFIO_PCI_CONFIG_REGION_INDEX; |
2031 | ||
5546a621 | 2032 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); |
65501a74 | 2033 | if (ret) { |
312fd5f2 | 2034 | error_report("vfio: Error getting config info: %m"); |
65501a74 AW |
2035 | goto error; |
2036 | } | |
2037 | ||
d13dd2d7 EA |
2038 | trace_vfio_populate_device_config(vdev->vbasedev.name, |
2039 | (unsigned long)reg_info.size, | |
2040 | (unsigned long)reg_info.offset, | |
2041 | (unsigned long)reg_info.flags); | |
65501a74 AW |
2042 | |
2043 | vdev->config_size = reg_info.size; | |
6a659bbf AW |
2044 | if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) { |
2045 | vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS; | |
2046 | } | |
65501a74 AW |
2047 | vdev->config_offset = reg_info.offset; |
2048 | ||
f15689c7 | 2049 | if ((vdev->features & VFIO_FEATURE_ENABLE_VGA) && |
d13dd2d7 | 2050 | vbasedev->num_regions > VFIO_PCI_VGA_REGION_INDEX) { |
f15689c7 AW |
2051 | struct vfio_region_info vga_info = { |
2052 | .argsz = sizeof(vga_info), | |
2053 | .index = VFIO_PCI_VGA_REGION_INDEX, | |
2054 | }; | |
2055 | ||
5546a621 | 2056 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, &vga_info); |
f15689c7 AW |
2057 | if (ret) { |
2058 | error_report( | |
2059 | "vfio: Device does not support requested feature x-vga"); | |
2060 | goto error; | |
2061 | } | |
2062 | ||
2063 | if (!(vga_info.flags & VFIO_REGION_INFO_FLAG_READ) || | |
2064 | !(vga_info.flags & VFIO_REGION_INFO_FLAG_WRITE) || | |
2065 | vga_info.size < 0xbffff + 1) { | |
2066 | error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx", | |
2067 | (unsigned long)vga_info.flags, | |
2068 | (unsigned long)vga_info.size); | |
2069 | goto error; | |
2070 | } | |
2071 | ||
2072 | vdev->vga.fd_offset = vga_info.offset; | |
5546a621 | 2073 | vdev->vga.fd = vdev->vbasedev.fd; |
f15689c7 AW |
2074 | |
2075 | vdev->vga.region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE; | |
2076 | vdev->vga.region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM; | |
7076eabc | 2077 | QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_MEM].quirks); |
f15689c7 AW |
2078 | |
2079 | vdev->vga.region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE; | |
2080 | vdev->vga.region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO; | |
7076eabc | 2081 | QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].quirks); |
f15689c7 AW |
2082 | |
2083 | vdev->vga.region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE; | |
2084 | vdev->vga.region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI; | |
7076eabc | 2085 | QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks); |
f15689c7 AW |
2086 | |
2087 | vdev->has_vga = true; | |
2088 | } | |
47cbe50c | 2089 | |
7b4b0e9e VMP |
2090 | irq_info.index = VFIO_PCI_ERR_IRQ_INDEX; |
2091 | ||
5546a621 | 2092 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info); |
7b4b0e9e VMP |
2093 | if (ret) { |
2094 | /* This can fail for an old kernel or legacy PCI dev */ | |
d13dd2d7 | 2095 | trace_vfio_populate_device_get_irq_info_failure(); |
7b4b0e9e VMP |
2096 | ret = 0; |
2097 | } else if (irq_info.count == 1) { | |
2098 | vdev->pci_aer = true; | |
2099 | } else { | |
df92ee44 | 2100 | error_report("vfio: %s " |
8fbf47c3 | 2101 | "Could not enable error recovery for the device", |
df92ee44 | 2102 | vbasedev->name); |
7b4b0e9e | 2103 | } |
f15689c7 | 2104 | |
d13dd2d7 EA |
2105 | error: |
2106 | return ret; | |
2107 | } | |
2108 | ||
9ee27d73 | 2109 | static void vfio_put_device(VFIOPCIDevice *vdev) |
65501a74 | 2110 | { |
462037c9 | 2111 | g_free(vdev->vbasedev.name); |
65501a74 | 2112 | if (vdev->msix) { |
3a4dbe6a | 2113 | object_unparent(OBJECT(&vdev->msix->mmap_mem)); |
65501a74 AW |
2114 | g_free(vdev->msix); |
2115 | vdev->msix = NULL; | |
2116 | } | |
d13dd2d7 | 2117 | vfio_put_base_device(&vdev->vbasedev); |
65501a74 AW |
2118 | } |
2119 | ||
7b4b0e9e VMP |
2120 | static void vfio_err_notifier_handler(void *opaque) |
2121 | { | |
9ee27d73 | 2122 | VFIOPCIDevice *vdev = opaque; |
7b4b0e9e VMP |
2123 | |
2124 | if (!event_notifier_test_and_clear(&vdev->err_notifier)) { | |
2125 | return; | |
2126 | } | |
2127 | ||
2128 | /* | |
2129 | * TBD. Retrieve the error details and decide what action | |
2130 | * needs to be taken. One of the actions could be to pass | |
2131 | * the error to the guest and have the guest driver recover | |
2132 | * from the error. This requires that PCIe capabilities be | |
2133 | * exposed to the guest. For now, we just terminate the | |
2134 | * guest to contain the error. | |
2135 | */ | |
2136 | ||
8fbf47c3 AW |
2137 | error_report("%s(%04x:%02x:%02x.%x) Unrecoverable error detected. " |
2138 | "Please collect any data possible and then kill the guest", | |
2139 | __func__, vdev->host.domain, vdev->host.bus, | |
2140 | vdev->host.slot, vdev->host.function); | |
7b4b0e9e | 2141 | |
ba29776f | 2142 | vm_stop(RUN_STATE_INTERNAL_ERROR); |
7b4b0e9e VMP |
2143 | } |
2144 | ||
2145 | /* | |
2146 | * Registers error notifier for devices supporting error recovery. | |
2147 | * If we encounter a failure in this function, we report an error | |
2148 | * and continue after disabling error recovery support for the | |
2149 | * device. | |
2150 | */ | |
9ee27d73 | 2151 | static void vfio_register_err_notifier(VFIOPCIDevice *vdev) |
7b4b0e9e VMP |
2152 | { |
2153 | int ret; | |
2154 | int argsz; | |
2155 | struct vfio_irq_set *irq_set; | |
2156 | int32_t *pfd; | |
2157 | ||
2158 | if (!vdev->pci_aer) { | |
2159 | return; | |
2160 | } | |
2161 | ||
2162 | if (event_notifier_init(&vdev->err_notifier, 0)) { | |
8fbf47c3 | 2163 | error_report("vfio: Unable to init event notifier for error detection"); |
7b4b0e9e VMP |
2164 | vdev->pci_aer = false; |
2165 | return; | |
2166 | } | |
2167 | ||
2168 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
2169 | ||
2170 | irq_set = g_malloc0(argsz); | |
2171 | irq_set->argsz = argsz; | |
2172 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
2173 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
2174 | irq_set->index = VFIO_PCI_ERR_IRQ_INDEX; | |
2175 | irq_set->start = 0; | |
2176 | irq_set->count = 1; | |
2177 | pfd = (int32_t *)&irq_set->data; | |
2178 | ||
2179 | *pfd = event_notifier_get_fd(&vdev->err_notifier); | |
2180 | qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev); | |
2181 | ||
5546a621 | 2182 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); |
7b4b0e9e | 2183 | if (ret) { |
8fbf47c3 | 2184 | error_report("vfio: Failed to set up error notification"); |
7b4b0e9e VMP |
2185 | qemu_set_fd_handler(*pfd, NULL, NULL, vdev); |
2186 | event_notifier_cleanup(&vdev->err_notifier); | |
2187 | vdev->pci_aer = false; | |
2188 | } | |
2189 | g_free(irq_set); | |
2190 | } | |
2191 | ||
9ee27d73 | 2192 | static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev) |
7b4b0e9e VMP |
2193 | { |
2194 | int argsz; | |
2195 | struct vfio_irq_set *irq_set; | |
2196 | int32_t *pfd; | |
2197 | int ret; | |
2198 | ||
2199 | if (!vdev->pci_aer) { | |
2200 | return; | |
2201 | } | |
2202 | ||
2203 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
2204 | ||
2205 | irq_set = g_malloc0(argsz); | |
2206 | irq_set->argsz = argsz; | |
2207 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
2208 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
2209 | irq_set->index = VFIO_PCI_ERR_IRQ_INDEX; | |
2210 | irq_set->start = 0; | |
2211 | irq_set->count = 1; | |
2212 | pfd = (int32_t *)&irq_set->data; | |
2213 | *pfd = -1; | |
2214 | ||
5546a621 | 2215 | ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); |
7b4b0e9e | 2216 | if (ret) { |
8fbf47c3 | 2217 | error_report("vfio: Failed to de-assign error fd: %m"); |
7b4b0e9e VMP |
2218 | } |
2219 | g_free(irq_set); | |
2220 | qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier), | |
2221 | NULL, NULL, vdev); | |
2222 | event_notifier_cleanup(&vdev->err_notifier); | |
2223 | } | |
2224 | ||
47cbe50c AW |
2225 | static void vfio_req_notifier_handler(void *opaque) |
2226 | { | |
2227 | VFIOPCIDevice *vdev = opaque; | |
2228 | ||
2229 | if (!event_notifier_test_and_clear(&vdev->req_notifier)) { | |
2230 | return; | |
2231 | } | |
2232 | ||
2233 | qdev_unplug(&vdev->pdev.qdev, NULL); | |
2234 | } | |
2235 | ||
2236 | static void vfio_register_req_notifier(VFIOPCIDevice *vdev) | |
2237 | { | |
2238 | struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info), | |
2239 | .index = VFIO_PCI_REQ_IRQ_INDEX }; | |
2240 | int argsz; | |
2241 | struct vfio_irq_set *irq_set; | |
2242 | int32_t *pfd; | |
2243 | ||
2244 | if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) { | |
2245 | return; | |
2246 | } | |
2247 | ||
2248 | if (ioctl(vdev->vbasedev.fd, | |
2249 | VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) { | |
2250 | return; | |
2251 | } | |
2252 | ||
2253 | if (event_notifier_init(&vdev->req_notifier, 0)) { | |
2254 | error_report("vfio: Unable to init event notifier for device request"); | |
2255 | return; | |
2256 | } | |
2257 | ||
2258 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
2259 | ||
2260 | irq_set = g_malloc0(argsz); | |
2261 | irq_set->argsz = argsz; | |
2262 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
2263 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
2264 | irq_set->index = VFIO_PCI_REQ_IRQ_INDEX; | |
2265 | irq_set->start = 0; | |
2266 | irq_set->count = 1; | |
2267 | pfd = (int32_t *)&irq_set->data; | |
2268 | ||
2269 | *pfd = event_notifier_get_fd(&vdev->req_notifier); | |
2270 | qemu_set_fd_handler(*pfd, vfio_req_notifier_handler, NULL, vdev); | |
2271 | ||
2272 | if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) { | |
2273 | error_report("vfio: Failed to set up device request notification"); | |
2274 | qemu_set_fd_handler(*pfd, NULL, NULL, vdev); | |
2275 | event_notifier_cleanup(&vdev->req_notifier); | |
2276 | } else { | |
2277 | vdev->req_enabled = true; | |
2278 | } | |
2279 | ||
2280 | g_free(irq_set); | |
2281 | } | |
2282 | ||
2283 | static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev) | |
2284 | { | |
2285 | int argsz; | |
2286 | struct vfio_irq_set *irq_set; | |
2287 | int32_t *pfd; | |
2288 | ||
2289 | if (!vdev->req_enabled) { | |
2290 | return; | |
2291 | } | |
2292 | ||
2293 | argsz = sizeof(*irq_set) + sizeof(*pfd); | |
2294 | ||
2295 | irq_set = g_malloc0(argsz); | |
2296 | irq_set->argsz = argsz; | |
2297 | irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | | |
2298 | VFIO_IRQ_SET_ACTION_TRIGGER; | |
2299 | irq_set->index = VFIO_PCI_REQ_IRQ_INDEX; | |
2300 | irq_set->start = 0; | |
2301 | irq_set->count = 1; | |
2302 | pfd = (int32_t *)&irq_set->data; | |
2303 | *pfd = -1; | |
2304 | ||
2305 | if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) { | |
2306 | error_report("vfio: Failed to de-assign device request fd: %m"); | |
2307 | } | |
2308 | g_free(irq_set); | |
2309 | qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier), | |
2310 | NULL, NULL, vdev); | |
2311 | event_notifier_cleanup(&vdev->req_notifier); | |
2312 | ||
2313 | vdev->req_enabled = false; | |
2314 | } | |
2315 | ||
65501a74 AW |
2316 | static int vfio_initfn(PCIDevice *pdev) |
2317 | { | |
b47d8efa EA |
2318 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); |
2319 | VFIODevice *vbasedev_iter; | |
65501a74 AW |
2320 | VFIOGroup *group; |
2321 | char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name; | |
2322 | ssize_t len; | |
2323 | struct stat st; | |
2324 | int groupid; | |
2325 | int ret; | |
2326 | ||
2327 | /* Check that the host device exists */ | |
2328 | snprintf(path, sizeof(path), | |
2329 | "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/", | |
2330 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2331 | vdev->host.function); | |
2332 | if (stat(path, &st) < 0) { | |
312fd5f2 | 2333 | error_report("vfio: error: no such host device: %s", path); |
65501a74 AW |
2334 | return -errno; |
2335 | } | |
2336 | ||
b47d8efa EA |
2337 | vdev->vbasedev.ops = &vfio_pci_ops; |
2338 | ||
462037c9 EA |
2339 | vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI; |
2340 | vdev->vbasedev.name = g_strdup_printf("%04x:%02x:%02x.%01x", | |
2341 | vdev->host.domain, vdev->host.bus, | |
2342 | vdev->host.slot, vdev->host.function); | |
2343 | ||
65501a74 AW |
2344 | strncat(path, "iommu_group", sizeof(path) - strlen(path) - 1); |
2345 | ||
13665a2d MA |
2346 | len = readlink(path, iommu_group_path, sizeof(path)); |
2347 | if (len <= 0 || len >= sizeof(path)) { | |
312fd5f2 | 2348 | error_report("vfio: error no iommu_group for device"); |
c6d231e2 | 2349 | return len < 0 ? -errno : -ENAMETOOLONG; |
65501a74 AW |
2350 | } |
2351 | ||
2352 | iommu_group_path[len] = 0; | |
2353 | group_name = basename(iommu_group_path); | |
2354 | ||
2355 | if (sscanf(group_name, "%d", &groupid) != 1) { | |
312fd5f2 | 2356 | error_report("vfio: error reading %s: %m", path); |
65501a74 AW |
2357 | return -errno; |
2358 | } | |
2359 | ||
df92ee44 | 2360 | trace_vfio_initfn(vdev->vbasedev.name, groupid); |
65501a74 | 2361 | |
0688448b | 2362 | group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev)); |
65501a74 | 2363 | if (!group) { |
312fd5f2 | 2364 | error_report("vfio: failed to get group %d", groupid); |
65501a74 AW |
2365 | return -ENOENT; |
2366 | } | |
2367 | ||
2368 | snprintf(path, sizeof(path), "%04x:%02x:%02x.%01x", | |
2369 | vdev->host.domain, vdev->host.bus, vdev->host.slot, | |
2370 | vdev->host.function); | |
2371 | ||
b47d8efa EA |
2372 | QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { |
2373 | if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) { | |
312fd5f2 | 2374 | error_report("vfio: error: device %s is already attached", path); |
65501a74 AW |
2375 | vfio_put_group(group); |
2376 | return -EBUSY; | |
2377 | } | |
2378 | } | |
2379 | ||
d13dd2d7 | 2380 | ret = vfio_get_device(group, path, &vdev->vbasedev); |
65501a74 | 2381 | if (ret) { |
312fd5f2 | 2382 | error_report("vfio: failed to get device %s", path); |
65501a74 AW |
2383 | vfio_put_group(group); |
2384 | return ret; | |
2385 | } | |
2386 | ||
217e9fdc PB |
2387 | ret = vfio_populate_device(vdev); |
2388 | if (ret) { | |
77a10d04 | 2389 | return ret; |
217e9fdc PB |
2390 | } |
2391 | ||
65501a74 | 2392 | /* Get a copy of config space */ |
5546a621 | 2393 | ret = pread(vdev->vbasedev.fd, vdev->pdev.config, |
65501a74 AW |
2394 | MIN(pci_config_size(&vdev->pdev), vdev->config_size), |
2395 | vdev->config_offset); | |
2396 | if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) { | |
2397 | ret = ret < 0 ? -errno : -EFAULT; | |
312fd5f2 | 2398 | error_report("vfio: Failed to read device config space"); |
77a10d04 | 2399 | return ret; |
65501a74 AW |
2400 | } |
2401 | ||
4b5d5e87 AW |
2402 | /* vfio emulates a lot for us, but some bits need extra love */ |
2403 | vdev->emulated_config_bits = g_malloc0(vdev->config_size); | |
2404 | ||
2405 | /* QEMU can choose to expose the ROM or not */ | |
2406 | memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4); | |
2407 | ||
89dcccc5 AW |
2408 | /* |
2409 | * The PCI spec reserves vendor ID 0xffff as an invalid value. The | |
2410 | * device ID is managed by the vendor and need only be a 16-bit value. | |
2411 | * Allow any 16-bit value for subsystem so they can be hidden or changed. | |
2412 | */ | |
2413 | if (vdev->vendor_id != PCI_ANY_ID) { | |
2414 | if (vdev->vendor_id >= 0xffff) { | |
2415 | error_report("vfio: Invalid PCI vendor ID provided"); | |
2416 | return -EINVAL; | |
2417 | } | |
2418 | vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0); | |
2419 | trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id); | |
2420 | } else { | |
2421 | vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID); | |
2422 | } | |
2423 | ||
2424 | if (vdev->device_id != PCI_ANY_ID) { | |
2425 | if (vdev->device_id > 0xffff) { | |
2426 | error_report("vfio: Invalid PCI device ID provided"); | |
2427 | return -EINVAL; | |
2428 | } | |
2429 | vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0); | |
2430 | trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id); | |
2431 | } else { | |
2432 | vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID); | |
2433 | } | |
2434 | ||
2435 | if (vdev->sub_vendor_id != PCI_ANY_ID) { | |
2436 | if (vdev->sub_vendor_id > 0xffff) { | |
2437 | error_report("vfio: Invalid PCI subsystem vendor ID provided"); | |
2438 | return -EINVAL; | |
2439 | } | |
2440 | vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID, | |
2441 | vdev->sub_vendor_id, ~0); | |
2442 | trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name, | |
2443 | vdev->sub_vendor_id); | |
2444 | } | |
2445 | ||
2446 | if (vdev->sub_device_id != PCI_ANY_ID) { | |
2447 | if (vdev->sub_device_id > 0xffff) { | |
2448 | error_report("vfio: Invalid PCI subsystem device ID provided"); | |
2449 | return -EINVAL; | |
2450 | } | |
2451 | vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0); | |
2452 | trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name, | |
2453 | vdev->sub_device_id); | |
2454 | } | |
ff635e37 | 2455 | |
4b5d5e87 AW |
2456 | /* QEMU can change multi-function devices to single function, or reverse */ |
2457 | vdev->emulated_config_bits[PCI_HEADER_TYPE] = | |
2458 | PCI_HEADER_TYPE_MULTI_FUNCTION; | |
2459 | ||
187d6232 AW |
2460 | /* Restore or clear multifunction, this is always controlled by QEMU */ |
2461 | if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { | |
2462 | vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; | |
2463 | } else { | |
2464 | vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION; | |
2465 | } | |
2466 | ||
65501a74 AW |
2467 | /* |
2468 | * Clear host resource mapping info. If we choose not to register a | |
2469 | * BAR, such as might be the case with the option ROM, we can get | |
2470 | * confusing, unwritable, residual addresses from the host here. | |
2471 | */ | |
2472 | memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24); | |
2473 | memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4); | |
2474 | ||
6f864e6e | 2475 | vfio_pci_size_rom(vdev); |
65501a74 | 2476 | |
0de70dc7 | 2477 | ret = vfio_msix_early_setup(vdev); |
65501a74 | 2478 | if (ret) { |
77a10d04 | 2479 | return ret; |
65501a74 AW |
2480 | } |
2481 | ||
2482 | vfio_map_bars(vdev); | |
2483 | ||
2484 | ret = vfio_add_capabilities(vdev); | |
2485 | if (ret) { | |
2486 | goto out_teardown; | |
2487 | } | |
2488 | ||
4b5d5e87 AW |
2489 | /* QEMU emulates all of MSI & MSIX */ |
2490 | if (pdev->cap_present & QEMU_PCI_CAP_MSIX) { | |
2491 | memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff, | |
2492 | MSIX_CAP_LENGTH); | |
2493 | } | |
2494 | ||
2495 | if (pdev->cap_present & QEMU_PCI_CAP_MSI) { | |
2496 | memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff, | |
2497 | vdev->msi_cap_size); | |
2498 | } | |
2499 | ||
65501a74 | 2500 | if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) { |
bc72ad67 | 2501 | vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, |
ea486926 | 2502 | vfio_intx_mmap_enable, vdev); |
870cb6f1 AW |
2503 | pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_update); |
2504 | ret = vfio_intx_enable(vdev); | |
65501a74 AW |
2505 | if (ret) { |
2506 | goto out_teardown; | |
2507 | } | |
2508 | } | |
2509 | ||
7b4b0e9e | 2510 | vfio_register_err_notifier(vdev); |
47cbe50c | 2511 | vfio_register_req_notifier(vdev); |
c9c50009 | 2512 | vfio_setup_resetfn_quirk(vdev); |
c29029dd | 2513 | |
65501a74 AW |
2514 | return 0; |
2515 | ||
2516 | out_teardown: | |
2517 | pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); | |
2518 | vfio_teardown_msi(vdev); | |
ba5e6bfa | 2519 | vfio_unregister_bars(vdev); |
77a10d04 PB |
2520 | return ret; |
2521 | } | |
2522 | ||
2523 | static void vfio_instance_finalize(Object *obj) | |
2524 | { | |
2525 | PCIDevice *pci_dev = PCI_DEVICE(obj); | |
2526 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pci_dev); | |
2527 | VFIOGroup *group = vdev->vbasedev.group; | |
2528 | ||
ba5e6bfa | 2529 | vfio_unmap_bars(vdev); |
4b5d5e87 | 2530 | g_free(vdev->emulated_config_bits); |
77a10d04 | 2531 | g_free(vdev->rom); |
65501a74 AW |
2532 | vfio_put_device(vdev); |
2533 | vfio_put_group(group); | |
65501a74 AW |
2534 | } |
2535 | ||
2536 | static void vfio_exitfn(PCIDevice *pdev) | |
2537 | { | |
9ee27d73 | 2538 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); |
65501a74 | 2539 | |
47cbe50c | 2540 | vfio_unregister_req_notifier(vdev); |
7b4b0e9e | 2541 | vfio_unregister_err_notifier(vdev); |
65501a74 AW |
2542 | pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); |
2543 | vfio_disable_interrupts(vdev); | |
ea486926 | 2544 | if (vdev->intx.mmap_timer) { |
bc72ad67 | 2545 | timer_free(vdev->intx.mmap_timer); |
ea486926 | 2546 | } |
65501a74 | 2547 | vfio_teardown_msi(vdev); |
ba5e6bfa | 2548 | vfio_unregister_bars(vdev); |
65501a74 AW |
2549 | } |
2550 | ||
2551 | static void vfio_pci_reset(DeviceState *dev) | |
2552 | { | |
2553 | PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev); | |
9ee27d73 | 2554 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); |
65501a74 | 2555 | |
df92ee44 | 2556 | trace_vfio_pci_reset(vdev->vbasedev.name); |
5834a83f | 2557 | |
f16f39c3 | 2558 | vfio_pci_pre_reset(vdev); |
ba661818 | 2559 | |
5655f931 AW |
2560 | if (vdev->resetfn && !vdev->resetfn(vdev)) { |
2561 | goto post_reset; | |
2562 | } | |
2563 | ||
b47d8efa EA |
2564 | if (vdev->vbasedev.reset_works && |
2565 | (vdev->has_flr || !vdev->has_pm_reset) && | |
5546a621 | 2566 | !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) { |
df92ee44 | 2567 | trace_vfio_pci_reset_flr(vdev->vbasedev.name); |
f16f39c3 | 2568 | goto post_reset; |
ba661818 AW |
2569 | } |
2570 | ||
f16f39c3 AW |
2571 | /* See if we can do our own bus reset */ |
2572 | if (!vfio_pci_hot_reset_one(vdev)) { | |
2573 | goto post_reset; | |
2574 | } | |
5834a83f | 2575 | |
f16f39c3 | 2576 | /* If nothing else works and the device supports PM reset, use it */ |
b47d8efa | 2577 | if (vdev->vbasedev.reset_works && vdev->has_pm_reset && |
5546a621 | 2578 | !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) { |
df92ee44 | 2579 | trace_vfio_pci_reset_pm(vdev->vbasedev.name); |
f16f39c3 | 2580 | goto post_reset; |
65501a74 | 2581 | } |
5834a83f | 2582 | |
f16f39c3 AW |
2583 | post_reset: |
2584 | vfio_pci_post_reset(vdev); | |
65501a74 AW |
2585 | } |
2586 | ||
abc5b3bf GA |
2587 | static void vfio_instance_init(Object *obj) |
2588 | { | |
2589 | PCIDevice *pci_dev = PCI_DEVICE(obj); | |
9ee27d73 | 2590 | VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, PCI_DEVICE(obj)); |
abc5b3bf GA |
2591 | |
2592 | device_add_bootindex_property(obj, &vdev->bootindex, | |
2593 | "bootindex", NULL, | |
2594 | &pci_dev->qdev, NULL); | |
2595 | } | |
2596 | ||
65501a74 | 2597 | static Property vfio_pci_dev_properties[] = { |
9ee27d73 EA |
2598 | DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host), |
2599 | DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice, | |
ea486926 | 2600 | intx.mmap_timeout, 1100), |
9ee27d73 | 2601 | DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features, |
f15689c7 | 2602 | VFIO_FEATURE_ENABLE_VGA_BIT, false), |
47cbe50c AW |
2603 | DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features, |
2604 | VFIO_FEATURE_ENABLE_REQ_BIT, true), | |
5e15d79b | 2605 | DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false), |
46746dba AW |
2606 | DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false), |
2607 | DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false), | |
2608 | DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false), | |
89dcccc5 AW |
2609 | DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID), |
2610 | DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID), | |
2611 | DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice, | |
2612 | sub_vendor_id, PCI_ANY_ID), | |
2613 | DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice, | |
2614 | sub_device_id, PCI_ANY_ID), | |
65501a74 AW |
2615 | /* |
2616 | * TODO - support passed fds... is this necessary? | |
9ee27d73 EA |
2617 | * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name), |
2618 | * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name), | |
65501a74 AW |
2619 | */ |
2620 | DEFINE_PROP_END_OF_LIST(), | |
2621 | }; | |
2622 | ||
d9f0e638 AW |
2623 | static const VMStateDescription vfio_pci_vmstate = { |
2624 | .name = "vfio-pci", | |
2625 | .unmigratable = 1, | |
2626 | }; | |
65501a74 AW |
2627 | |
2628 | static void vfio_pci_dev_class_init(ObjectClass *klass, void *data) | |
2629 | { | |
2630 | DeviceClass *dc = DEVICE_CLASS(klass); | |
2631 | PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass); | |
2632 | ||
2633 | dc->reset = vfio_pci_reset; | |
2634 | dc->props = vfio_pci_dev_properties; | |
d9f0e638 AW |
2635 | dc->vmsd = &vfio_pci_vmstate; |
2636 | dc->desc = "VFIO-based PCI device assignment"; | |
125ee0ed | 2637 | set_bit(DEVICE_CATEGORY_MISC, dc->categories); |
65501a74 AW |
2638 | pdc->init = vfio_initfn; |
2639 | pdc->exit = vfio_exitfn; | |
2640 | pdc->config_read = vfio_pci_read_config; | |
2641 | pdc->config_write = vfio_pci_write_config; | |
6a659bbf | 2642 | pdc->is_express = 1; /* We might be */ |
65501a74 AW |
2643 | } |
2644 | ||
2645 | static const TypeInfo vfio_pci_dev_info = { | |
2646 | .name = "vfio-pci", | |
2647 | .parent = TYPE_PCI_DEVICE, | |
9ee27d73 | 2648 | .instance_size = sizeof(VFIOPCIDevice), |
65501a74 | 2649 | .class_init = vfio_pci_dev_class_init, |
abc5b3bf | 2650 | .instance_init = vfio_instance_init, |
77a10d04 | 2651 | .instance_finalize = vfio_instance_finalize, |
65501a74 AW |
2652 | }; |
2653 | ||
2654 | static void register_vfio_pci_dev_type(void) | |
2655 | { | |
2656 | type_register_static(&vfio_pci_dev_info); | |
2657 | } | |
2658 | ||
2659 | type_init(register_vfio_pci_dev_type) |