]> Git Repo - qemu.git/blame - hw/vfio/pci.c
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
[qemu.git] / hw / vfio / pci.c
CommitLineData
65501a74
AW
1/*
2 * vfio based device assignment support
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Alex Williamson <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik ([email protected])
15 * Copyright (c) 2007, Neocleus, Guy Zana ([email protected])
16 * Copyright (C) 2008, Qumranet, Amit Shah ([email protected])
17 * Copyright (C) 2008, Red Hat, Amit Shah ([email protected])
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda ([email protected])
19 */
20
c6eacb1a 21#include "qemu/osdep.h"
6dcfdbad 22#include <linux/vfio.h>
65501a74 23#include <sys/ioctl.h>
65501a74 24
83c9f4ca
PB
25#include "hw/pci/msi.h"
26#include "hw/pci/msix.h"
0282abf0 27#include "hw/pci/pci_bridge.h"
1de7afc9 28#include "qemu/error-report.h"
1de7afc9 29#include "qemu/range.h"
6dcfdbad
AW
30#include "sysemu/kvm.h"
31#include "sysemu/sysemu.h"
78f33d2b 32#include "pci.h"
385f57cf 33#include "trace.h"
1108b2f8 34#include "qapi/error.h"
4b943029 35
65501a74
AW
36#define MSIX_CAP_LENGTH 12
37
9ee27d73 38static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
9ee27d73 39static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
65501a74 40
ea486926
AW
41/*
42 * Disabling BAR mmaping can be slow, but toggling it around INTx can
43 * also be a huge overhead. We try to get the best of both worlds by
44 * waiting until an interrupt to disable mmaps (subsequent transitions
45 * to the same state are effectively no overhead). If the interrupt has
46 * been serviced and the time gap is long enough, we re-enable mmaps for
47 * performance. This works well for things like graphics cards, which
48 * may not use their interrupt at all and are penalized to an unusable
49 * level by read/write BAR traps. Other devices, like NICs, have more
50 * regular interrupts and see much better latency by staying in non-mmap
51 * mode. We therefore set the default mmap_timeout such that a ping
52 * is just enough to keep the mmap disabled. Users can experiment with
53 * other options with the x-intx-mmap-timeout-ms parameter (a value of
54 * zero disables the timer).
55 */
56static void vfio_intx_mmap_enable(void *opaque)
57{
9ee27d73 58 VFIOPCIDevice *vdev = opaque;
ea486926
AW
59
60 if (vdev->intx.pending) {
bc72ad67
AB
61 timer_mod(vdev->intx.mmap_timer,
62 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
ea486926
AW
63 return;
64 }
65
66 vfio_mmap_set_enabled(vdev, true);
67}
68
65501a74
AW
69static void vfio_intx_interrupt(void *opaque)
70{
9ee27d73 71 VFIOPCIDevice *vdev = opaque;
65501a74
AW
72
73 if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
74 return;
75 }
76
df92ee44 77 trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
65501a74
AW
78
79 vdev->intx.pending = true;
68919cac 80 pci_irq_assert(&vdev->pdev);
ea486926
AW
81 vfio_mmap_set_enabled(vdev, false);
82 if (vdev->intx.mmap_timeout) {
bc72ad67
AB
83 timer_mod(vdev->intx.mmap_timer,
84 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
ea486926 85 }
65501a74
AW
86}
87
870cb6f1 88static void vfio_intx_eoi(VFIODevice *vbasedev)
65501a74 89{
a664477d
EA
90 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
91
65501a74
AW
92 if (!vdev->intx.pending) {
93 return;
94 }
95
870cb6f1 96 trace_vfio_intx_eoi(vbasedev->name);
65501a74
AW
97
98 vdev->intx.pending = false;
68919cac 99 pci_irq_deassert(&vdev->pdev);
a664477d 100 vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
65501a74
AW
101}
102
7dfb3424 103static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
e1d1e586
AW
104{
105#ifdef CONFIG_KVM
106 struct kvm_irqfd irqfd = {
107 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
108 .gsi = vdev->intx.route.irq,
109 .flags = KVM_IRQFD_FLAG_RESAMPLE,
110 };
111 struct vfio_irq_set *irq_set;
112 int ret, argsz;
113 int32_t *pfd;
114
46746dba 115 if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
e1d1e586 116 vdev->intx.route.mode != PCI_INTX_ENABLED ||
9fc0e2d8 117 !kvm_resamplefds_enabled()) {
e1d1e586
AW
118 return;
119 }
120
121 /* Get to a known interrupt state */
122 qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
5546a621 123 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
e1d1e586 124 vdev->intx.pending = false;
68919cac 125 pci_irq_deassert(&vdev->pdev);
e1d1e586
AW
126
127 /* Get an eventfd for resample/unmask */
128 if (event_notifier_init(&vdev->intx.unmask, 0)) {
7dfb3424 129 error_setg(errp, "event_notifier_init failed eoi");
e1d1e586
AW
130 goto fail;
131 }
132
133 /* KVM triggers it, VFIO listens for it */
134 irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
135
136 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
7dfb3424 137 error_setg_errno(errp, errno, "failed to setup resample irqfd");
e1d1e586
AW
138 goto fail_irqfd;
139 }
140
141 argsz = sizeof(*irq_set) + sizeof(*pfd);
142
143 irq_set = g_malloc0(argsz);
144 irq_set->argsz = argsz;
145 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
146 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
147 irq_set->start = 0;
148 irq_set->count = 1;
149 pfd = (int32_t *)&irq_set->data;
150
151 *pfd = irqfd.resamplefd;
152
5546a621 153 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
e1d1e586
AW
154 g_free(irq_set);
155 if (ret) {
7dfb3424 156 error_setg_errno(errp, -ret, "failed to setup INTx unmask fd");
e1d1e586
AW
157 goto fail_vfio;
158 }
159
160 /* Let'em rip */
5546a621 161 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
e1d1e586
AW
162
163 vdev->intx.kvm_accel = true;
164
870cb6f1 165 trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
e1d1e586
AW
166
167 return;
168
169fail_vfio:
170 irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
171 kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
172fail_irqfd:
173 event_notifier_cleanup(&vdev->intx.unmask);
174fail:
175 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
5546a621 176 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
e1d1e586
AW
177#endif
178}
179
870cb6f1 180static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
e1d1e586
AW
181{
182#ifdef CONFIG_KVM
183 struct kvm_irqfd irqfd = {
184 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
185 .gsi = vdev->intx.route.irq,
186 .flags = KVM_IRQFD_FLAG_DEASSIGN,
187 };
188
189 if (!vdev->intx.kvm_accel) {
190 return;
191 }
192
193 /*
194 * Get to a known state, hardware masked, QEMU ready to accept new
195 * interrupts, QEMU IRQ de-asserted.
196 */
5546a621 197 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
e1d1e586 198 vdev->intx.pending = false;
68919cac 199 pci_irq_deassert(&vdev->pdev);
e1d1e586
AW
200
201 /* Tell KVM to stop listening for an INTx irqfd */
202 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
312fd5f2 203 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
e1d1e586
AW
204 }
205
206 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
207 event_notifier_cleanup(&vdev->intx.unmask);
208
209 /* QEMU starts listening for interrupt events. */
210 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
211
212 vdev->intx.kvm_accel = false;
213
214 /* If we've missed an event, let it re-fire through QEMU */
5546a621 215 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
e1d1e586 216
870cb6f1 217 trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
e1d1e586
AW
218#endif
219}
220
870cb6f1 221static void vfio_intx_update(PCIDevice *pdev)
e1d1e586 222{
9ee27d73 223 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
e1d1e586 224 PCIINTxRoute route;
7dfb3424 225 Error *err = NULL;
e1d1e586
AW
226
227 if (vdev->interrupt != VFIO_INT_INTx) {
228 return;
229 }
230
231 route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
232
233 if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
234 return; /* Nothing changed */
235 }
236
870cb6f1
AW
237 trace_vfio_intx_update(vdev->vbasedev.name,
238 vdev->intx.route.irq, route.irq);
e1d1e586 239
870cb6f1 240 vfio_intx_disable_kvm(vdev);
e1d1e586
AW
241
242 vdev->intx.route = route;
243
244 if (route.mode != PCI_INTX_ENABLED) {
245 return;
246 }
247
7dfb3424
EA
248 vfio_intx_enable_kvm(vdev, &err);
249 if (err) {
250 error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name);
251 }
e1d1e586
AW
252
253 /* Re-enable the interrupt in cased we missed an EOI */
870cb6f1 254 vfio_intx_eoi(&vdev->vbasedev);
e1d1e586
AW
255}
256
7dfb3424 257static int vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
65501a74 258{
65501a74 259 uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
1a403133
AW
260 int ret, argsz;
261 struct vfio_irq_set *irq_set;
262 int32_t *pfd;
7dfb3424 263 Error *err = NULL;
65501a74 264
ea486926 265 if (!pin) {
65501a74
AW
266 return 0;
267 }
268
269 vfio_disable_interrupts(vdev);
270
271 vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
68919cac 272 pci_config_set_interrupt_pin(vdev->pdev.config, pin);
e1d1e586
AW
273
274#ifdef CONFIG_KVM
275 /*
276 * Only conditional to avoid generating error messages on platforms
277 * where we won't actually use the result anyway.
278 */
9fc0e2d8 279 if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
e1d1e586
AW
280 vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
281 vdev->intx.pin);
282 }
283#endif
284
65501a74
AW
285 ret = event_notifier_init(&vdev->intx.interrupt, 0);
286 if (ret) {
7dfb3424 287 error_setg_errno(errp, -ret, "event_notifier_init failed");
65501a74
AW
288 return ret;
289 }
290
1a403133
AW
291 argsz = sizeof(*irq_set) + sizeof(*pfd);
292
293 irq_set = g_malloc0(argsz);
294 irq_set->argsz = argsz;
295 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
296 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
297 irq_set->start = 0;
298 irq_set->count = 1;
299 pfd = (int32_t *)&irq_set->data;
300
301 *pfd = event_notifier_get_fd(&vdev->intx.interrupt);
302 qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
65501a74 303
5546a621 304 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
1a403133
AW
305 g_free(irq_set);
306 if (ret) {
7dfb3424 307 error_setg_errno(errp, -ret, "failed to setup INTx fd");
1a403133 308 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
ce59af2d 309 event_notifier_cleanup(&vdev->intx.interrupt);
65501a74
AW
310 return -errno;
311 }
312
7dfb3424
EA
313 vfio_intx_enable_kvm(vdev, &err);
314 if (err) {
315 error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name);
316 }
e1d1e586 317
65501a74
AW
318 vdev->interrupt = VFIO_INT_INTx;
319
870cb6f1 320 trace_vfio_intx_enable(vdev->vbasedev.name);
65501a74
AW
321
322 return 0;
323}
324
870cb6f1 325static void vfio_intx_disable(VFIOPCIDevice *vdev)
65501a74
AW
326{
327 int fd;
328
bc72ad67 329 timer_del(vdev->intx.mmap_timer);
870cb6f1 330 vfio_intx_disable_kvm(vdev);
5546a621 331 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
65501a74 332 vdev->intx.pending = false;
68919cac 333 pci_irq_deassert(&vdev->pdev);
65501a74
AW
334 vfio_mmap_set_enabled(vdev, true);
335
336 fd = event_notifier_get_fd(&vdev->intx.interrupt);
337 qemu_set_fd_handler(fd, NULL, NULL, vdev);
338 event_notifier_cleanup(&vdev->intx.interrupt);
339
340 vdev->interrupt = VFIO_INT_NONE;
341
870cb6f1 342 trace_vfio_intx_disable(vdev->vbasedev.name);
65501a74
AW
343}
344
345/*
346 * MSI/X
347 */
348static void vfio_msi_interrupt(void *opaque)
349{
350 VFIOMSIVector *vector = opaque;
9ee27d73 351 VFIOPCIDevice *vdev = vector->vdev;
0de70dc7
AW
352 MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector);
353 void (*notify)(PCIDevice *dev, unsigned vector);
354 MSIMessage msg;
65501a74
AW
355 int nr = vector - vdev->msi_vectors;
356
357 if (!event_notifier_test_and_clear(&vector->interrupt)) {
358 return;
359 }
360
b3ebc10c 361 if (vdev->interrupt == VFIO_INT_MSIX) {
0de70dc7
AW
362 get_msg = msix_get_message;
363 notify = msix_notify;
95239e16
AW
364
365 /* A masked vector firing needs to use the PBA, enable it */
366 if (msix_is_masked(&vdev->pdev, nr)) {
367 set_bit(nr, vdev->msix->pending);
368 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true);
369 trace_vfio_msix_pba_enable(vdev->vbasedev.name);
370 }
9035f8c0 371 } else if (vdev->interrupt == VFIO_INT_MSI) {
0de70dc7
AW
372 get_msg = msi_get_message;
373 notify = msi_notify;
b3ebc10c
AW
374 } else {
375 abort();
376 }
377
0de70dc7 378 msg = get_msg(&vdev->pdev, nr);
bc5baffa 379 trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data);
0de70dc7 380 notify(&vdev->pdev, nr);
65501a74
AW
381}
382
9ee27d73 383static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
65501a74
AW
384{
385 struct vfio_irq_set *irq_set;
386 int ret = 0, i, argsz;
387 int32_t *fds;
388
389 argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
390
391 irq_set = g_malloc0(argsz);
392 irq_set->argsz = argsz;
393 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
394 irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
395 irq_set->start = 0;
396 irq_set->count = vdev->nr_vectors;
397 fds = (int32_t *)&irq_set->data;
398
399 for (i = 0; i < vdev->nr_vectors; i++) {
c048be5c
AW
400 int fd = -1;
401
402 /*
403 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
404 * bits, therefore we always use the KVM signaling path when setup.
405 * MSI-X mask and pending bits are emulated, so we want to use the
406 * KVM signaling path only when configured and unmasked.
407 */
408 if (vdev->msi_vectors[i].use) {
409 if (vdev->msi_vectors[i].virq < 0 ||
410 (msix && msix_is_masked(&vdev->pdev, i))) {
411 fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
412 } else {
413 fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
414 }
65501a74 415 }
c048be5c
AW
416
417 fds[i] = fd;
65501a74
AW
418 }
419
5546a621 420 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
65501a74
AW
421
422 g_free(irq_set);
423
65501a74
AW
424 return ret;
425}
426
46746dba 427static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
d1f6af6a 428 int vector_n, bool msix)
f4d45d47
AW
429{
430 int virq;
431
d1f6af6a 432 if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) {
f4d45d47
AW
433 return;
434 }
435
436 if (event_notifier_init(&vector->kvm_interrupt, 0)) {
437 return;
438 }
439
d1f6af6a 440 virq = kvm_irqchip_add_msi_route(kvm_state, vector_n, &vdev->pdev);
f4d45d47
AW
441 if (virq < 0) {
442 event_notifier_cleanup(&vector->kvm_interrupt);
443 return;
444 }
445
1c9b71a7 446 if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
f4d45d47
AW
447 NULL, virq) < 0) {
448 kvm_irqchip_release_virq(kvm_state, virq);
449 event_notifier_cleanup(&vector->kvm_interrupt);
450 return;
451 }
452
f4d45d47
AW
453 vector->virq = virq;
454}
455
456static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
457{
1c9b71a7
EA
458 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
459 vector->virq);
f4d45d47
AW
460 kvm_irqchip_release_virq(kvm_state, vector->virq);
461 vector->virq = -1;
462 event_notifier_cleanup(&vector->kvm_interrupt);
463}
464
dc9f06ca
PF
465static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
466 PCIDevice *pdev)
f4d45d47 467{
dc9f06ca 468 kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev);
3f1fea0f 469 kvm_irqchip_commit_routes(kvm_state);
f4d45d47
AW
470}
471
b0223e29
AW
472static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
473 MSIMessage *msg, IOHandler *handler)
65501a74 474{
9ee27d73 475 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
65501a74
AW
476 VFIOMSIVector *vector;
477 int ret;
478
df92ee44 479 trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
65501a74 480
65501a74 481 vector = &vdev->msi_vectors[nr];
65501a74 482
f4d45d47
AW
483 if (!vector->use) {
484 vector->vdev = vdev;
485 vector->virq = -1;
486 if (event_notifier_init(&vector->interrupt, 0)) {
487 error_report("vfio: Error: event_notifier_init failed");
488 }
489 vector->use = true;
490 msix_vector_use(pdev, nr);
65501a74
AW
491 }
492
f4d45d47
AW
493 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
494 handler, NULL, vector);
495
65501a74
AW
496 /*
497 * Attempt to enable route through KVM irqchip,
498 * default to userspace handling if unavailable.
499 */
f4d45d47
AW
500 if (vector->virq >= 0) {
501 if (!msg) {
502 vfio_remove_kvm_msi_virq(vector);
503 } else {
dc9f06ca 504 vfio_update_kvm_msi_virq(vector, *msg, pdev);
65501a74 505 }
f4d45d47 506 } else {
6d17a018
DG
507 if (msg) {
508 vfio_add_kvm_msi_virq(vdev, vector, nr, true);
509 }
65501a74
AW
510 }
511
512 /*
513 * We don't want to have the host allocate all possible MSI vectors
514 * for a device if they're not in use, so we shutdown and incrementally
515 * increase them as needed.
516 */
517 if (vdev->nr_vectors < nr + 1) {
5546a621 518 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
65501a74
AW
519 vdev->nr_vectors = nr + 1;
520 ret = vfio_enable_vectors(vdev, true);
521 if (ret) {
312fd5f2 522 error_report("vfio: failed to enable vectors, %d", ret);
65501a74 523 }
65501a74 524 } else {
1a403133
AW
525 int argsz;
526 struct vfio_irq_set *irq_set;
527 int32_t *pfd;
528
529 argsz = sizeof(*irq_set) + sizeof(*pfd);
530
531 irq_set = g_malloc0(argsz);
532 irq_set->argsz = argsz;
533 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
534 VFIO_IRQ_SET_ACTION_TRIGGER;
535 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
536 irq_set->start = nr;
537 irq_set->count = 1;
538 pfd = (int32_t *)&irq_set->data;
539
f4d45d47
AW
540 if (vector->virq >= 0) {
541 *pfd = event_notifier_get_fd(&vector->kvm_interrupt);
542 } else {
543 *pfd = event_notifier_get_fd(&vector->interrupt);
544 }
1a403133 545
5546a621 546 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
1a403133 547 g_free(irq_set);
65501a74 548 if (ret) {
312fd5f2 549 error_report("vfio: failed to modify vector, %d", ret);
65501a74 550 }
65501a74
AW
551 }
552
95239e16
AW
553 /* Disable PBA emulation when nothing more is pending. */
554 clear_bit(nr, vdev->msix->pending);
555 if (find_first_bit(vdev->msix->pending,
556 vdev->nr_vectors) == vdev->nr_vectors) {
557 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
558 trace_vfio_msix_pba_disable(vdev->vbasedev.name);
559 }
560
65501a74
AW
561 return 0;
562}
563
b0223e29
AW
564static int vfio_msix_vector_use(PCIDevice *pdev,
565 unsigned int nr, MSIMessage msg)
566{
567 return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
568}
569
65501a74
AW
570static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
571{
9ee27d73 572 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
65501a74 573 VFIOMSIVector *vector = &vdev->msi_vectors[nr];
65501a74 574
df92ee44 575 trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
65501a74
AW
576
577 /*
f4d45d47
AW
578 * There are still old guests that mask and unmask vectors on every
579 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
580 * the KVM setup in place, simply switch VFIO to use the non-bypass
581 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
582 * core will mask the interrupt and set pending bits, allowing it to
583 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
65501a74 584 */
f4d45d47
AW
585 if (vector->virq >= 0) {
586 int argsz;
587 struct vfio_irq_set *irq_set;
588 int32_t *pfd;
1a403133 589
f4d45d47 590 argsz = sizeof(*irq_set) + sizeof(*pfd);
1a403133 591
f4d45d47
AW
592 irq_set = g_malloc0(argsz);
593 irq_set->argsz = argsz;
594 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
595 VFIO_IRQ_SET_ACTION_TRIGGER;
596 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
597 irq_set->start = nr;
598 irq_set->count = 1;
599 pfd = (int32_t *)&irq_set->data;
1a403133 600
f4d45d47 601 *pfd = event_notifier_get_fd(&vector->interrupt);
1a403133 602
5546a621 603 ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
65501a74 604
f4d45d47 605 g_free(irq_set);
65501a74 606 }
65501a74
AW
607}
608
0de70dc7 609static void vfio_msix_enable(VFIOPCIDevice *vdev)
fd704adc
AW
610{
611 vfio_disable_interrupts(vdev);
612
bdd81add 613 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
fd704adc
AW
614
615 vdev->interrupt = VFIO_INT_MSIX;
616
b0223e29
AW
617 /*
618 * Some communication channels between VF & PF or PF & fw rely on the
619 * physical state of the device and expect that enabling MSI-X from the
620 * guest enables the same on the host. When our guest is Linux, the
621 * guest driver call to pci_enable_msix() sets the enabling bit in the
622 * MSI-X capability, but leaves the vector table masked. We therefore
623 * can't rely on a vector_use callback (from request_irq() in the guest)
624 * to switch the physical device into MSI-X mode because that may come a
625 * long time after pci_enable_msix(). This code enables vector 0 with
626 * triggering to userspace, then immediately release the vector, leaving
627 * the physical device with no vectors enabled, but MSI-X enabled, just
628 * like the guest view.
629 */
630 vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
631 vfio_msix_vector_release(&vdev->pdev, 0);
632
fd704adc 633 if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
bbef882c 634 vfio_msix_vector_release, NULL)) {
312fd5f2 635 error_report("vfio: msix_set_vector_notifiers failed");
fd704adc
AW
636 }
637
0de70dc7 638 trace_vfio_msix_enable(vdev->vbasedev.name);
fd704adc
AW
639}
640
0de70dc7 641static void vfio_msi_enable(VFIOPCIDevice *vdev)
65501a74
AW
642{
643 int ret, i;
644
645 vfio_disable_interrupts(vdev);
646
647 vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
648retry:
bdd81add 649 vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
65501a74
AW
650
651 for (i = 0; i < vdev->nr_vectors; i++) {
65501a74
AW
652 VFIOMSIVector *vector = &vdev->msi_vectors[i];
653
654 vector->vdev = vdev;
f4d45d47 655 vector->virq = -1;
65501a74
AW
656 vector->use = true;
657
658 if (event_notifier_init(&vector->interrupt, 0)) {
312fd5f2 659 error_report("vfio: Error: event_notifier_init failed");
65501a74
AW
660 }
661
f4d45d47
AW
662 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
663 vfio_msi_interrupt, NULL, vector);
664
65501a74
AW
665 /*
666 * Attempt to enable route through KVM irqchip,
667 * default to userspace handling if unavailable.
668 */
d1f6af6a 669 vfio_add_kvm_msi_virq(vdev, vector, i, false);
65501a74
AW
670 }
671
f4d45d47
AW
672 /* Set interrupt type prior to possible interrupts */
673 vdev->interrupt = VFIO_INT_MSI;
674
65501a74
AW
675 ret = vfio_enable_vectors(vdev, false);
676 if (ret) {
677 if (ret < 0) {
312fd5f2 678 error_report("vfio: Error: Failed to setup MSI fds: %m");
65501a74
AW
679 } else if (ret != vdev->nr_vectors) {
680 error_report("vfio: Error: Failed to enable %d "
312fd5f2 681 "MSI vectors, retry with %d", vdev->nr_vectors, ret);
65501a74
AW
682 }
683
684 for (i = 0; i < vdev->nr_vectors; i++) {
685 VFIOMSIVector *vector = &vdev->msi_vectors[i];
686 if (vector->virq >= 0) {
f4d45d47 687 vfio_remove_kvm_msi_virq(vector);
65501a74 688 }
f4d45d47
AW
689 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
690 NULL, NULL, NULL);
65501a74
AW
691 event_notifier_cleanup(&vector->interrupt);
692 }
693
694 g_free(vdev->msi_vectors);
695
696 if (ret > 0 && ret != vdev->nr_vectors) {
697 vdev->nr_vectors = ret;
698 goto retry;
699 }
700 vdev->nr_vectors = 0;
701
f4d45d47
AW
702 /*
703 * Failing to setup MSI doesn't really fall within any specification.
704 * Let's try leaving interrupts disabled and hope the guest figures
705 * out to fall back to INTx for this device.
706 */
707 error_report("vfio: Error: Failed to enable MSI");
708 vdev->interrupt = VFIO_INT_NONE;
709
65501a74
AW
710 return;
711 }
712
0de70dc7 713 trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
65501a74
AW
714}
715
0de70dc7 716static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
fd704adc 717{
7dfb3424 718 Error *err = NULL;
f4d45d47
AW
719 int i;
720
721 for (i = 0; i < vdev->nr_vectors; i++) {
722 VFIOMSIVector *vector = &vdev->msi_vectors[i];
723 if (vdev->msi_vectors[i].use) {
724 if (vector->virq >= 0) {
725 vfio_remove_kvm_msi_virq(vector);
726 }
727 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
728 NULL, NULL, NULL);
729 event_notifier_cleanup(&vector->interrupt);
730 }
731 }
732
fd704adc
AW
733 g_free(vdev->msi_vectors);
734 vdev->msi_vectors = NULL;
735 vdev->nr_vectors = 0;
736 vdev->interrupt = VFIO_INT_NONE;
737
7dfb3424
EA
738 vfio_intx_enable(vdev, &err);
739 if (err) {
740 error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name);
741 }
fd704adc
AW
742}
743
0de70dc7 744static void vfio_msix_disable(VFIOPCIDevice *vdev)
fd704adc 745{
3e40ba0f
AW
746 int i;
747
fd704adc
AW
748 msix_unset_vector_notifiers(&vdev->pdev);
749
3e40ba0f
AW
750 /*
751 * MSI-X will only release vectors if MSI-X is still enabled on the
752 * device, check through the rest and release it ourselves if necessary.
753 */
754 for (i = 0; i < vdev->nr_vectors; i++) {
755 if (vdev->msi_vectors[i].use) {
756 vfio_msix_vector_release(&vdev->pdev, i);
f4d45d47 757 msix_vector_unuse(&vdev->pdev, i);
3e40ba0f
AW
758 }
759 }
760
fd704adc 761 if (vdev->nr_vectors) {
5546a621 762 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
fd704adc
AW
763 }
764
0de70dc7 765 vfio_msi_disable_common(vdev);
fd704adc 766
95239e16
AW
767 memset(vdev->msix->pending, 0,
768 BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long));
769
0de70dc7 770 trace_vfio_msix_disable(vdev->vbasedev.name);
fd704adc
AW
771}
772
0de70dc7 773static void vfio_msi_disable(VFIOPCIDevice *vdev)
65501a74 774{
5546a621 775 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
0de70dc7 776 vfio_msi_disable_common(vdev);
65501a74 777
0de70dc7 778 trace_vfio_msi_disable(vdev->vbasedev.name);
65501a74
AW
779}
780
9ee27d73 781static void vfio_update_msi(VFIOPCIDevice *vdev)
c7679d45
AW
782{
783 int i;
784
785 for (i = 0; i < vdev->nr_vectors; i++) {
786 VFIOMSIVector *vector = &vdev->msi_vectors[i];
787 MSIMessage msg;
788
789 if (!vector->use || vector->virq < 0) {
790 continue;
791 }
792
793 msg = msi_get_message(&vdev->pdev, i);
dc9f06ca 794 vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev);
c7679d45
AW
795 }
796}
797
9ee27d73 798static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
6f864e6e 799{
46900226 800 struct vfio_region_info *reg_info;
6f864e6e
AW
801 uint64_t size;
802 off_t off = 0;
7d489dcd 803 ssize_t bytes;
6f864e6e 804
46900226
AW
805 if (vfio_get_region_info(&vdev->vbasedev,
806 VFIO_PCI_ROM_REGION_INDEX, &reg_info)) {
6f864e6e
AW
807 error_report("vfio: Error getting ROM info: %m");
808 return;
809 }
810
46900226
AW
811 trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size,
812 (unsigned long)reg_info->offset,
813 (unsigned long)reg_info->flags);
814
815 vdev->rom_size = size = reg_info->size;
816 vdev->rom_offset = reg_info->offset;
6f864e6e 817
46900226 818 g_free(reg_info);
6f864e6e
AW
819
820 if (!vdev->rom_size) {
e638073c 821 vdev->rom_read_failed = true;
d20b43df 822 error_report("vfio-pci: Cannot read device rom at "
df92ee44 823 "%s", vdev->vbasedev.name);
d20b43df
BD
824 error_printf("Device option ROM contents are probably invalid "
825 "(check dmesg).\nSkip option ROM probe with rombar=0, "
826 "or load from file with romfile=\n");
6f864e6e
AW
827 return;
828 }
829
830 vdev->rom = g_malloc(size);
831 memset(vdev->rom, 0xff, size);
832
833 while (size) {
5546a621
EA
834 bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
835 size, vdev->rom_offset + off);
6f864e6e
AW
836 if (bytes == 0) {
837 break;
838 } else if (bytes > 0) {
839 off += bytes;
840 size -= bytes;
841 } else {
842 if (errno == EINTR || errno == EAGAIN) {
843 continue;
844 }
845 error_report("vfio: Error reading device ROM: %m");
846 break;
847 }
848 }
e2e5ee9c
AW
849
850 /*
851 * Test the ROM signature against our device, if the vendor is correct
852 * but the device ID doesn't match, store the correct device ID and
853 * recompute the checksum. Intel IGD devices need this and are known
854 * to have bogus checksums so we can't simply adjust the checksum.
855 */
856 if (pci_get_word(vdev->rom) == 0xaa55 &&
857 pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size &&
858 !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) {
859 uint16_t vid, did;
860
861 vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4);
862 did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6);
863
864 if (vid == vdev->vendor_id && did != vdev->device_id) {
865 int i;
866 uint8_t csum, *data = vdev->rom;
867
868 pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6,
869 vdev->device_id);
870 data[6] = 0;
871
872 for (csum = 0, i = 0; i < vdev->rom_size; i++) {
873 csum += data[i];
874 }
875
876 data[6] = -csum;
877 }
878 }
6f864e6e
AW
879}
880
881static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
882{
9ee27d73 883 VFIOPCIDevice *vdev = opaque;
75bd0c72
ND
884 union {
885 uint8_t byte;
886 uint16_t word;
887 uint32_t dword;
888 uint64_t qword;
889 } val;
890 uint64_t data = 0;
6f864e6e
AW
891
892 /* Load the ROM lazily when the guest tries to read it */
db01eedb 893 if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
6f864e6e
AW
894 vfio_pci_load_rom(vdev);
895 }
896
6758008e 897 memcpy(&val, vdev->rom + addr,
6f864e6e
AW
898 (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);
899
75bd0c72
ND
900 switch (size) {
901 case 1:
902 data = val.byte;
903 break;
904 case 2:
905 data = le16_to_cpu(val.word);
906 break;
907 case 4:
908 data = le32_to_cpu(val.dword);
909 break;
910 default:
911 hw_error("vfio: unsupported read size, %d bytes\n", size);
912 break;
913 }
914
df92ee44 915 trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
6f864e6e 916
75bd0c72 917 return data;
6f864e6e
AW
918}
919
64fa25a0
AW
920static void vfio_rom_write(void *opaque, hwaddr addr,
921 uint64_t data, unsigned size)
922{
923}
924
6f864e6e
AW
925static const MemoryRegionOps vfio_rom_ops = {
926 .read = vfio_rom_read,
64fa25a0 927 .write = vfio_rom_write,
6758008e 928 .endianness = DEVICE_LITTLE_ENDIAN,
6f864e6e
AW
929};
930
9ee27d73 931static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
6f864e6e 932{
b1c50c5f 933 uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
6f864e6e 934 off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
4b943029 935 DeviceState *dev = DEVICE(vdev);
062ed5d8 936 char *name;
5546a621 937 int fd = vdev->vbasedev.fd;
6f864e6e
AW
938
939 if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
4b943029
BD
940 /* Since pci handles romfile, just print a message and return */
941 if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) {
7df9381b
AW
942 error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified romfile\n",
943 vdev->vbasedev.name);
4b943029 944 }
6f864e6e
AW
945 return;
946 }
947
948 /*
949 * Use the same size ROM BAR as the physical device. The contents
950 * will get filled in later when the guest tries to read it.
951 */
5546a621
EA
952 if (pread(fd, &orig, 4, offset) != 4 ||
953 pwrite(fd, &size, 4, offset) != 4 ||
954 pread(fd, &size, 4, offset) != 4 ||
955 pwrite(fd, &orig, 4, offset) != 4) {
7df9381b 956 error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name);
6f864e6e
AW
957 return;
958 }
959
b1c50c5f 960 size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
6f864e6e
AW
961
962 if (!size) {
963 return;
964 }
965
4b943029
BD
966 if (vfio_blacklist_opt_rom(vdev)) {
967 if (dev->opts && qemu_opt_get(dev->opts, "rombar")) {
7df9381b
AW
968 error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified non zero value for rombar\n",
969 vdev->vbasedev.name);
4b943029 970 } else {
7df9381b
AW
971 error_printf("Warning : Rom loading for device at %s has been disabled due to system instability issues. Specify rombar=1 or romfile to force\n",
972 vdev->vbasedev.name);
4b943029
BD
973 return;
974 }
975 }
976
df92ee44 977 trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
6f864e6e 978
062ed5d8 979 name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name);
6f864e6e
AW
980
981 memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
982 &vfio_rom_ops, vdev, name, size);
062ed5d8 983 g_free(name);
6f864e6e
AW
984
985 pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
986 PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);
987
988 vdev->pdev.has_rom = true;
e638073c 989 vdev->rom_read_failed = false;
6f864e6e
AW
990}
991
c00d61d8 992void vfio_vga_write(void *opaque, hwaddr addr,
f15689c7
AW
993 uint64_t data, unsigned size)
994{
995 VFIOVGARegion *region = opaque;
996 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
997 union {
998 uint8_t byte;
999 uint16_t word;
1000 uint32_t dword;
1001 uint64_t qword;
1002 } buf;
1003 off_t offset = vga->fd_offset + region->offset + addr;
1004
1005 switch (size) {
1006 case 1:
1007 buf.byte = data;
1008 break;
1009 case 2:
1010 buf.word = cpu_to_le16(data);
1011 break;
1012 case 4:
1013 buf.dword = cpu_to_le32(data);
1014 break;
1015 default:
4e505ddd 1016 hw_error("vfio: unsupported write size, %d bytes", size);
f15689c7
AW
1017 break;
1018 }
1019
1020 if (pwrite(vga->fd, &buf, size, offset) != size) {
1021 error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
1022 __func__, region->offset + addr, data, size);
1023 }
1024
385f57cf 1025 trace_vfio_vga_write(region->offset + addr, data, size);
f15689c7
AW
1026}
1027
c00d61d8 1028uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
f15689c7
AW
1029{
1030 VFIOVGARegion *region = opaque;
1031 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1032 union {
1033 uint8_t byte;
1034 uint16_t word;
1035 uint32_t dword;
1036 uint64_t qword;
1037 } buf;
1038 uint64_t data = 0;
1039 off_t offset = vga->fd_offset + region->offset + addr;
1040
1041 if (pread(vga->fd, &buf, size, offset) != size) {
1042 error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
1043 __func__, region->offset + addr, size);
1044 return (uint64_t)-1;
1045 }
1046
1047 switch (size) {
1048 case 1:
1049 data = buf.byte;
1050 break;
1051 case 2:
1052 data = le16_to_cpu(buf.word);
1053 break;
1054 case 4:
1055 data = le32_to_cpu(buf.dword);
1056 break;
1057 default:
4e505ddd 1058 hw_error("vfio: unsupported read size, %d bytes", size);
f15689c7
AW
1059 break;
1060 }
1061
385f57cf 1062 trace_vfio_vga_read(region->offset + addr, size, data);
f15689c7
AW
1063
1064 return data;
1065}
1066
1067static const MemoryRegionOps vfio_vga_ops = {
1068 .read = vfio_vga_read,
1069 .write = vfio_vga_write,
1070 .endianness = DEVICE_LITTLE_ENDIAN,
1071};
1072
95251725
YX
1073/*
1074 * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
1075 * size if the BAR is in an exclusive page in host so that we could map
1076 * this BAR to guest. But this sub-page BAR may not occupy an exclusive
1077 * page in guest. So we should set the priority of the expanded memory
1078 * region to zero in case of overlap with BARs which share the same page
1079 * with the sub-page BAR in guest. Besides, we should also recover the
1080 * size of this sub-page BAR when its base address is changed in guest
1081 * and not page aligned any more.
1082 */
1083static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar)
1084{
1085 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1086 VFIORegion *region = &vdev->bars[bar].region;
1087 MemoryRegion *mmap_mr, *mr;
1088 PCIIORegion *r;
1089 pcibus_t bar_addr;
1090 uint64_t size = region->size;
1091
1092 /* Make sure that the whole region is allowed to be mmapped */
1093 if (region->nr_mmaps != 1 || !region->mmaps[0].mmap ||
1094 region->mmaps[0].size != region->size) {
1095 return;
1096 }
1097
1098 r = &pdev->io_regions[bar];
1099 bar_addr = r->addr;
1100 mr = region->mem;
1101 mmap_mr = &region->mmaps[0].mem;
1102
1103 /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
1104 if (bar_addr != PCI_BAR_UNMAPPED &&
1105 !(bar_addr & ~qemu_real_host_page_mask)) {
1106 size = qemu_real_host_page_size;
1107 }
1108
1109 memory_region_transaction_begin();
1110
1111 memory_region_set_size(mr, size);
1112 memory_region_set_size(mmap_mr, size);
1113 if (size != region->size && memory_region_is_mapped(mr)) {
1114 memory_region_del_subregion(r->address_space, mr);
1115 memory_region_add_subregion_overlap(r->address_space,
1116 bar_addr, mr, 0);
1117 }
1118
1119 memory_region_transaction_commit();
1120}
1121
65501a74
AW
1122/*
1123 * PCI config space
1124 */
c00d61d8 1125uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
65501a74 1126{
9ee27d73 1127 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
4b5d5e87 1128 uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
65501a74 1129
4b5d5e87
AW
1130 memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
1131 emu_bits = le32_to_cpu(emu_bits);
65501a74 1132
4b5d5e87
AW
1133 if (emu_bits) {
1134 emu_val = pci_default_read_config(pdev, addr, len);
1135 }
1136
1137 if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
1138 ssize_t ret;
1139
5546a621
EA
1140 ret = pread(vdev->vbasedev.fd, &phys_val, len,
1141 vdev->config_offset + addr);
4b5d5e87 1142 if (ret != len) {
7df9381b
AW
1143 error_report("%s(%s, 0x%x, 0x%x) failed: %m",
1144 __func__, vdev->vbasedev.name, addr, len);
65501a74
AW
1145 return -errno;
1146 }
4b5d5e87 1147 phys_val = le32_to_cpu(phys_val);
65501a74
AW
1148 }
1149
4b5d5e87 1150 val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
65501a74 1151
df92ee44 1152 trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
65501a74
AW
1153
1154 return val;
1155}
1156
c00d61d8
AW
1157void vfio_pci_write_config(PCIDevice *pdev,
1158 uint32_t addr, uint32_t val, int len)
65501a74 1159{
9ee27d73 1160 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
65501a74
AW
1161 uint32_t val_le = cpu_to_le32(val);
1162
df92ee44 1163 trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
65501a74
AW
1164
1165 /* Write everything to VFIO, let it filter out what we can't write */
5546a621
EA
1166 if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
1167 != len) {
7df9381b
AW
1168 error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
1169 __func__, vdev->vbasedev.name, addr, val, len);
65501a74
AW
1170 }
1171
65501a74
AW
1172 /* MSI/MSI-X Enabling/Disabling */
1173 if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
1174 ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
1175 int is_enabled, was_enabled = msi_enabled(pdev);
1176
1177 pci_default_write_config(pdev, addr, val, len);
1178
1179 is_enabled = msi_enabled(pdev);
1180
c7679d45
AW
1181 if (!was_enabled) {
1182 if (is_enabled) {
0de70dc7 1183 vfio_msi_enable(vdev);
c7679d45
AW
1184 }
1185 } else {
1186 if (!is_enabled) {
0de70dc7 1187 vfio_msi_disable(vdev);
c7679d45
AW
1188 } else {
1189 vfio_update_msi(vdev);
1190 }
65501a74 1191 }
4b5d5e87 1192 } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
65501a74
AW
1193 ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
1194 int is_enabled, was_enabled = msix_enabled(pdev);
1195
1196 pci_default_write_config(pdev, addr, val, len);
1197
1198 is_enabled = msix_enabled(pdev);
1199
1200 if (!was_enabled && is_enabled) {
0de70dc7 1201 vfio_msix_enable(vdev);
65501a74 1202 } else if (was_enabled && !is_enabled) {
0de70dc7 1203 vfio_msix_disable(vdev);
65501a74 1204 }
95251725
YX
1205 } else if (ranges_overlap(addr, len, PCI_BASE_ADDRESS_0, 24) ||
1206 range_covers_byte(addr, len, PCI_COMMAND)) {
1207 pcibus_t old_addr[PCI_NUM_REGIONS - 1];
1208 int bar;
1209
1210 for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
1211 old_addr[bar] = pdev->io_regions[bar].addr;
1212 }
1213
1214 pci_default_write_config(pdev, addr, val, len);
1215
1216 for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
1217 if (old_addr[bar] != pdev->io_regions[bar].addr &&
1218 pdev->io_regions[bar].size > 0 &&
1219 pdev->io_regions[bar].size < qemu_real_host_page_size) {
1220 vfio_sub_page_bar_update_mapping(pdev, bar);
1221 }
1222 }
4b5d5e87
AW
1223 } else {
1224 /* Write everything to QEMU to keep emulated bits correct */
1225 pci_default_write_config(pdev, addr, val, len);
65501a74
AW
1226 }
1227}
1228
65501a74
AW
1229/*
1230 * Interrupt setup
1231 */
9ee27d73 1232static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
65501a74 1233{
b3e27c3a
AW
1234 /*
1235 * More complicated than it looks. Disabling MSI/X transitions the
1236 * device to INTx mode (if supported). Therefore we need to first
1237 * disable MSI/X and then cleanup by disabling INTx.
1238 */
1239 if (vdev->interrupt == VFIO_INT_MSIX) {
0de70dc7 1240 vfio_msix_disable(vdev);
b3e27c3a 1241 } else if (vdev->interrupt == VFIO_INT_MSI) {
0de70dc7 1242 vfio_msi_disable(vdev);
b3e27c3a
AW
1243 }
1244
1245 if (vdev->interrupt == VFIO_INT_INTx) {
870cb6f1 1246 vfio_intx_disable(vdev);
65501a74
AW
1247 }
1248}
1249
7ef165b9 1250static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
65501a74
AW
1251{
1252 uint16_t ctrl;
1253 bool msi_64bit, msi_maskbit;
1254 int ret, entries;
1108b2f8 1255 Error *err = NULL;
65501a74 1256
5546a621 1257 if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
65501a74 1258 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
7ef165b9 1259 error_setg_errno(errp, errno, "failed reading MSI PCI_CAP_FLAGS");
65501a74
AW
1260 return -errno;
1261 }
1262 ctrl = le16_to_cpu(ctrl);
1263
1264 msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
1265 msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
1266 entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
1267
0de70dc7 1268 trace_vfio_msi_setup(vdev->vbasedev.name, pos);
65501a74 1269
1108b2f8 1270 ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err);
65501a74 1271 if (ret < 0) {
e43b9a5a
AW
1272 if (ret == -ENOTSUP) {
1273 return 0;
1274 }
7ef165b9
EA
1275 error_prepend(&err, "msi_init failed: ");
1276 error_propagate(errp, err);
65501a74
AW
1277 return ret;
1278 }
1279 vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
1280
1281 return 0;
1282}
1283
db0da029
AW
1284static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
1285{
1286 off_t start, end;
1287 VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region;
1288
1289 /*
1290 * We expect to find a single mmap covering the whole BAR, anything else
1291 * means it's either unsupported or already setup.
1292 */
1293 if (region->nr_mmaps != 1 || region->mmaps[0].offset ||
1294 region->size != region->mmaps[0].size) {
1295 return;
1296 }
1297
1298 /* MSI-X table start and end aligned to host page size */
1299 start = vdev->msix->table_offset & qemu_real_host_page_mask;
1300 end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
1301 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
1302
1303 /*
1304 * Does the MSI-X table cover the beginning of the BAR? The whole BAR?
1305 * NB - Host page size is necessarily a power of two and so is the PCI
1306 * BAR (not counting EA yet), therefore if we have host page aligned
1307 * @start and @end, then any remainder of the BAR before or after those
1308 * must be at least host page sized and therefore mmap'able.
1309 */
1310 if (!start) {
1311 if (end >= region->size) {
1312 region->nr_mmaps = 0;
1313 g_free(region->mmaps);
1314 region->mmaps = NULL;
1315 trace_vfio_msix_fixup(vdev->vbasedev.name,
1316 vdev->msix->table_bar, 0, 0);
1317 } else {
1318 region->mmaps[0].offset = end;
1319 region->mmaps[0].size = region->size - end;
1320 trace_vfio_msix_fixup(vdev->vbasedev.name,
1321 vdev->msix->table_bar, region->mmaps[0].offset,
1322 region->mmaps[0].offset + region->mmaps[0].size);
1323 }
1324
1325 /* Maybe it's aligned at the end of the BAR */
1326 } else if (end >= region->size) {
1327 region->mmaps[0].size = start;
1328 trace_vfio_msix_fixup(vdev->vbasedev.name,
1329 vdev->msix->table_bar, region->mmaps[0].offset,
1330 region->mmaps[0].offset + region->mmaps[0].size);
1331
1332 /* Otherwise it must split the BAR */
1333 } else {
1334 region->nr_mmaps = 2;
1335 region->mmaps = g_renew(VFIOMmap, region->mmaps, 2);
1336
1337 memcpy(&region->mmaps[1], &region->mmaps[0], sizeof(VFIOMmap));
1338
1339 region->mmaps[0].size = start;
1340 trace_vfio_msix_fixup(vdev->vbasedev.name,
1341 vdev->msix->table_bar, region->mmaps[0].offset,
1342 region->mmaps[0].offset + region->mmaps[0].size);
1343
1344 region->mmaps[1].offset = end;
1345 region->mmaps[1].size = region->size - end;
1346 trace_vfio_msix_fixup(vdev->vbasedev.name,
1347 vdev->msix->table_bar, region->mmaps[1].offset,
1348 region->mmaps[1].offset + region->mmaps[1].size);
1349 }
1350}
1351
65501a74
AW
1352/*
1353 * We don't have any control over how pci_add_capability() inserts
1354 * capabilities into the chain. In order to setup MSI-X we need a
1355 * MemoryRegion for the BAR. In order to setup the BAR and not
1356 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1357 * need to first look for where the MSI-X table lives. So we
1358 * unfortunately split MSI-X setup across two functions.
1359 */
ec3bcf42 1360static void vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
65501a74
AW
1361{
1362 uint8_t pos;
1363 uint16_t ctrl;
1364 uint32_t table, pba;
5546a621 1365 int fd = vdev->vbasedev.fd;
b5bd049f 1366 VFIOMSIXInfo *msix;
65501a74
AW
1367
1368 pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
1369 if (!pos) {
ec3bcf42 1370 return;
65501a74
AW
1371 }
1372
5546a621 1373 if (pread(fd, &ctrl, sizeof(ctrl),
b58b17f7 1374 vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) {
008d0e2d 1375 error_setg_errno(errp, errno, "failed to read PCI MSIX FLAGS");
ec3bcf42 1376 return;
65501a74
AW
1377 }
1378
5546a621 1379 if (pread(fd, &table, sizeof(table),
65501a74 1380 vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
008d0e2d 1381 error_setg_errno(errp, errno, "failed to read PCI MSIX TABLE");
ec3bcf42 1382 return;
65501a74
AW
1383 }
1384
5546a621 1385 if (pread(fd, &pba, sizeof(pba),
65501a74 1386 vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
008d0e2d 1387 error_setg_errno(errp, errno, "failed to read PCI MSIX PBA");
ec3bcf42 1388 return;
65501a74
AW
1389 }
1390
1391 ctrl = le16_to_cpu(ctrl);
1392 table = le32_to_cpu(table);
1393 pba = le32_to_cpu(pba);
1394
b5bd049f
AW
1395 msix = g_malloc0(sizeof(*msix));
1396 msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
1397 msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
1398 msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
1399 msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
1400 msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
65501a74 1401
43302969
GL
1402 /*
1403 * Test the size of the pba_offset variable and catch if it extends outside
1404 * of the specified BAR. If it is the case, we need to apply a hardware
1405 * specific quirk if the device is known or we have a broken configuration.
1406 */
b5bd049f 1407 if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) {
43302969
GL
1408 /*
1409 * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1410 * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1411 * the VF PBA offset while the BAR itself is only 8k. The correct value
1412 * is 0x1000, so we hard code that here.
1413 */
ff635e37
AW
1414 if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO &&
1415 (vdev->device_id & 0xff00) == 0x5800) {
b5bd049f 1416 msix->pba_offset = 0x1000;
43302969 1417 } else {
008d0e2d
EA
1418 error_setg(errp, "hardware reports invalid configuration, "
1419 "MSIX PBA outside of specified BAR");
b5bd049f 1420 g_free(msix);
ec3bcf42 1421 return;
43302969
GL
1422 }
1423 }
1424
0de70dc7 1425 trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar,
b5bd049f
AW
1426 msix->table_offset, msix->entries);
1427 vdev->msix = msix;
65501a74 1428
db0da029 1429 vfio_pci_fixup_msix_region(vdev);
65501a74
AW
1430}
1431
7ef165b9 1432static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
65501a74
AW
1433{
1434 int ret;
ee640c62 1435 Error *err = NULL;
65501a74 1436
95239e16
AW
1437 vdev->msix->pending = g_malloc0(BITS_TO_LONGS(vdev->msix->entries) *
1438 sizeof(unsigned long));
65501a74 1439 ret = msix_init(&vdev->pdev, vdev->msix->entries,
db0da029 1440 vdev->bars[vdev->msix->table_bar].region.mem,
65501a74 1441 vdev->msix->table_bar, vdev->msix->table_offset,
db0da029 1442 vdev->bars[vdev->msix->pba_bar].region.mem,
ee640c62
C
1443 vdev->msix->pba_bar, vdev->msix->pba_offset, pos,
1444 &err);
65501a74 1445 if (ret < 0) {
e43b9a5a 1446 if (ret == -ENOTSUP) {
ee640c62 1447 error_report_err(err);
e43b9a5a
AW
1448 return 0;
1449 }
ee640c62
C
1450
1451 error_propagate(errp, err);
65501a74
AW
1452 return ret;
1453 }
1454
95239e16
AW
1455 /*
1456 * The PCI spec suggests that devices provide additional alignment for
1457 * MSI-X structures and avoid overlapping non-MSI-X related registers.
1458 * For an assigned device, this hopefully means that emulation of MSI-X
1459 * structures does not affect the performance of the device. If devices
1460 * fail to provide that alignment, a significant performance penalty may
1461 * result, for instance Mellanox MT27500 VFs:
1462 * http://www.spinics.net/lists/kvm/msg125881.html
1463 *
1464 * The PBA is simply not that important for such a serious regression and
1465 * most drivers do not appear to look at it. The solution for this is to
1466 * disable the PBA MemoryRegion unless it's being used. We disable it
1467 * here and only enable it if a masked vector fires through QEMU. As the
1468 * vector-use notifier is called, which occurs on unmask, we test whether
1469 * PBA emulation is needed and again disable if not.
1470 */
1471 memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
1472
65501a74
AW
1473 return 0;
1474}
1475
9ee27d73 1476static void vfio_teardown_msi(VFIOPCIDevice *vdev)
65501a74
AW
1477{
1478 msi_uninit(&vdev->pdev);
1479
1480 if (vdev->msix) {
a664477d 1481 msix_uninit(&vdev->pdev,
db0da029
AW
1482 vdev->bars[vdev->msix->table_bar].region.mem,
1483 vdev->bars[vdev->msix->pba_bar].region.mem);
95239e16 1484 g_free(vdev->msix->pending);
65501a74
AW
1485 }
1486}
1487
1488/*
1489 * Resource setup
1490 */
9ee27d73 1491static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
65501a74
AW
1492{
1493 int i;
1494
1495 for (i = 0; i < PCI_ROM_SLOT; i++) {
db0da029 1496 vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled);
65501a74
AW
1497 }
1498}
1499
2d82f8a3 1500static void vfio_bar_setup(VFIOPCIDevice *vdev, int nr)
65501a74
AW
1501{
1502 VFIOBAR *bar = &vdev->bars[nr];
1503
65501a74
AW
1504 uint32_t pci_bar;
1505 uint8_t type;
1506 int ret;
1507
1508 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
2d82f8a3 1509 if (!bar->region.size) {
65501a74
AW
1510 return;
1511 }
1512
65501a74 1513 /* Determine what type of BAR this is for registration */
5546a621 1514 ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
65501a74
AW
1515 vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
1516 if (ret != sizeof(pci_bar)) {
312fd5f2 1517 error_report("vfio: Failed to read BAR %d (%m)", nr);
65501a74
AW
1518 return;
1519 }
1520
1521 pci_bar = le32_to_cpu(pci_bar);
39360f0b
AW
1522 bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
1523 bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
1524 type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
1525 ~PCI_BASE_ADDRESS_MEM_MASK);
65501a74 1526
db0da029
AW
1527 if (vfio_region_mmap(&bar->region)) {
1528 error_report("Failed to mmap %s BAR %d. Performance may be slow",
1529 vdev->vbasedev.name, nr);
65501a74 1530 }
7076eabc 1531
2d82f8a3 1532 pci_register_bar(&vdev->pdev, nr, type, bar->region.mem);
65501a74
AW
1533}
1534
2d82f8a3 1535static void vfio_bars_setup(VFIOPCIDevice *vdev)
65501a74
AW
1536{
1537 int i;
1538
1539 for (i = 0; i < PCI_ROM_SLOT; i++) {
2d82f8a3 1540 vfio_bar_setup(vdev, i);
65501a74
AW
1541 }
1542}
1543
2d82f8a3 1544static void vfio_bars_exit(VFIOPCIDevice *vdev)
65501a74
AW
1545{
1546 int i;
1547
1548 for (i = 0; i < PCI_ROM_SLOT; i++) {
2d82f8a3
AW
1549 vfio_bar_quirk_exit(vdev, i);
1550 vfio_region_exit(&vdev->bars[i].region);
65501a74 1551 }
f15689c7 1552
2d82f8a3 1553 if (vdev->vga) {
f15689c7 1554 pci_unregister_vga(&vdev->pdev);
2d82f8a3 1555 vfio_vga_quirk_exit(vdev);
f15689c7 1556 }
65501a74
AW
1557}
1558
2d82f8a3 1559static void vfio_bars_finalize(VFIOPCIDevice *vdev)
ba5e6bfa
PB
1560{
1561 int i;
1562
1563 for (i = 0; i < PCI_ROM_SLOT; i++) {
2d82f8a3
AW
1564 vfio_bar_quirk_finalize(vdev, i);
1565 vfio_region_finalize(&vdev->bars[i].region);
ba5e6bfa
PB
1566 }
1567
2d82f8a3
AW
1568 if (vdev->vga) {
1569 vfio_vga_quirk_finalize(vdev);
1570 for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
1571 object_unparent(OBJECT(&vdev->vga->region[i].mem));
1572 }
1573 g_free(vdev->vga);
ba5e6bfa
PB
1574 }
1575}
1576
65501a74
AW
1577/*
1578 * General setup
1579 */
1580static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
1581{
88caf177
CF
1582 uint8_t tmp;
1583 uint16_t next = PCI_CONFIG_SPACE_SIZE;
65501a74
AW
1584
1585 for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
3fc1c182 1586 tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) {
65501a74
AW
1587 if (tmp > pos && tmp < next) {
1588 next = tmp;
1589 }
1590 }
1591
1592 return next - pos;
1593}
1594
325ae8d5
CF
1595
1596static uint16_t vfio_ext_cap_max_size(const uint8_t *config, uint16_t pos)
1597{
1598 uint16_t tmp, next = PCIE_CONFIG_SPACE_SIZE;
1599
1600 for (tmp = PCI_CONFIG_SPACE_SIZE; tmp;
1601 tmp = PCI_EXT_CAP_NEXT(pci_get_long(config + tmp))) {
1602 if (tmp > pos && tmp < next) {
1603 next = tmp;
1604 }
1605 }
1606
1607 return next - pos;
1608}
1609
96adc5c7
AW
1610static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
1611{
1612 pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
1613}
1614
9ee27d73 1615static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
96adc5c7
AW
1616 uint16_t val, uint16_t mask)
1617{
1618 vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
1619 vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
1620 vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
1621}
1622
1623static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
1624{
1625 pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
1626}
1627
9ee27d73 1628static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
96adc5c7
AW
1629 uint32_t val, uint32_t mask)
1630{
1631 vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
1632 vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
1633 vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
1634}
1635
7ef165b9
EA
1636static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
1637 Error **errp)
96adc5c7
AW
1638{
1639 uint16_t flags;
1640 uint8_t type;
1641
1642 flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
1643 type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
1644
1645 if (type != PCI_EXP_TYPE_ENDPOINT &&
1646 type != PCI_EXP_TYPE_LEG_END &&
1647 type != PCI_EXP_TYPE_RC_END) {
1648
7ef165b9
EA
1649 error_setg(errp, "assignment of PCIe type 0x%x "
1650 "devices is not currently supported", type);
96adc5c7
AW
1651 return -EINVAL;
1652 }
1653
1654 if (!pci_bus_is_express(vdev->pdev.bus)) {
0282abf0
AW
1655 PCIBus *bus = vdev->pdev.bus;
1656 PCIDevice *bridge;
1657
96adc5c7 1658 /*
0282abf0
AW
1659 * Traditionally PCI device assignment exposes the PCIe capability
1660 * as-is on non-express buses. The reason being that some drivers
1661 * simply assume that it's there, for example tg3. However when
1662 * we're running on a native PCIe machine type, like Q35, we need
1663 * to hide the PCIe capability. The reason for this is twofold;
1664 * first Windows guests get a Code 10 error when the PCIe capability
1665 * is exposed in this configuration. Therefore express devices won't
1666 * work at all unless they're attached to express buses in the VM.
1667 * Second, a native PCIe machine introduces the possibility of fine
1668 * granularity IOMMUs supporting both translation and isolation.
1669 * Guest code to discover the IOMMU visibility of a device, such as
1670 * IOMMU grouping code on Linux, is very aware of device types and
1671 * valid transitions between bus types. An express device on a non-
1672 * express bus is not a valid combination on bare metal systems.
1673 *
1674 * Drivers that require a PCIe capability to make the device
1675 * functional are simply going to need to have their devices placed
1676 * on a PCIe bus in the VM.
96adc5c7 1677 */
0282abf0
AW
1678 while (!pci_bus_is_root(bus)) {
1679 bridge = pci_bridge_get_device(bus);
1680 bus = bridge->bus;
1681 }
1682
1683 if (pci_bus_is_express(bus)) {
1684 return 0;
1685 }
1686
96adc5c7
AW
1687 } else if (pci_bus_is_root(vdev->pdev.bus)) {
1688 /*
1689 * On a Root Complex bus Endpoints become Root Complex Integrated
1690 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
1691 */
1692 if (type == PCI_EXP_TYPE_ENDPOINT) {
1693 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1694 PCI_EXP_TYPE_RC_END << 4,
1695 PCI_EXP_FLAGS_TYPE);
1696
1697 /* Link Capabilities, Status, and Control goes away */
1698 if (size > PCI_EXP_LNKCTL) {
1699 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
1700 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1701 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
1702
1703#ifndef PCI_EXP_LNKCAP2
1704#define PCI_EXP_LNKCAP2 44
1705#endif
1706#ifndef PCI_EXP_LNKSTA2
1707#define PCI_EXP_LNKSTA2 50
1708#endif
1709 /* Link 2 Capabilities, Status, and Control goes away */
1710 if (size > PCI_EXP_LNKCAP2) {
1711 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
1712 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
1713 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
1714 }
1715 }
1716
1717 } else if (type == PCI_EXP_TYPE_LEG_END) {
1718 /*
1719 * Legacy endpoints don't belong on the root complex. Windows
1720 * seems to be happier with devices if we skip the capability.
1721 */
1722 return 0;
1723 }
1724
1725 } else {
1726 /*
1727 * Convert Root Complex Integrated Endpoints to regular endpoints.
1728 * These devices don't support LNK/LNK2 capabilities, so make them up.
1729 */
1730 if (type == PCI_EXP_TYPE_RC_END) {
1731 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1732 PCI_EXP_TYPE_ENDPOINT << 4,
1733 PCI_EXP_FLAGS_TYPE);
1734 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
1735 PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
1736 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1737 }
1738
1739 /* Mark the Link Status bits as emulated to allow virtual negotiation */
1740 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA,
1741 pci_get_word(vdev->pdev.config + pos +
1742 PCI_EXP_LNKSTA),
1743 PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
1744 }
1745
9a7c2a59
MZ
1746 pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size,
1747 errp);
1748 if (pos < 0) {
1749 return pos;
96adc5c7
AW
1750 }
1751
9a7c2a59
MZ
1752 vdev->pdev.exp.exp_cap = pos;
1753
96adc5c7
AW
1754 return pos;
1755}
1756
9ee27d73 1757static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
befe5176
AW
1758{
1759 uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);
1760
1761 if (cap & PCI_EXP_DEVCAP_FLR) {
df92ee44 1762 trace_vfio_check_pcie_flr(vdev->vbasedev.name);
befe5176
AW
1763 vdev->has_flr = true;
1764 }
1765}
1766
9ee27d73 1767static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
befe5176
AW
1768{
1769 uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);
1770
1771 if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
df92ee44 1772 trace_vfio_check_pm_reset(vdev->vbasedev.name);
befe5176
AW
1773 vdev->has_pm_reset = true;
1774 }
1775}
1776
9ee27d73 1777static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
befe5176
AW
1778{
1779 uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);
1780
1781 if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
df92ee44 1782 trace_vfio_check_af_flr(vdev->vbasedev.name);
befe5176
AW
1783 vdev->has_flr = true;
1784 }
1785}
1786
7ef165b9 1787static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
65501a74
AW
1788{
1789 PCIDevice *pdev = &vdev->pdev;
1790 uint8_t cap_id, next, size;
1791 int ret;
1792
1793 cap_id = pdev->config[pos];
3fc1c182 1794 next = pdev->config[pos + PCI_CAP_LIST_NEXT];
65501a74
AW
1795
1796 /*
1797 * If it becomes important to configure capabilities to their actual
1798 * size, use this as the default when it's something we don't recognize.
1799 * Since QEMU doesn't actually handle many of the config accesses,
1800 * exact size doesn't seem worthwhile.
1801 */
1802 size = vfio_std_cap_max_size(pdev, pos);
1803
1804 /*
1805 * pci_add_capability always inserts the new capability at the head
1806 * of the chain. Therefore to end up with a chain that matches the
1807 * physical device, we insert from the end by making this recursive.
3fc1c182 1808 * This is also why we pre-calculate size above as cached config space
65501a74
AW
1809 * will be changed as we unwind the stack.
1810 */
1811 if (next) {
7ef165b9 1812 ret = vfio_add_std_cap(vdev, next, errp);
65501a74 1813 if (ret) {
7ef165b9 1814 goto out;
65501a74
AW
1815 }
1816 } else {
96adc5c7
AW
1817 /* Begin the rebuild, use QEMU emulated list bits */
1818 pdev->config[PCI_CAPABILITY_LIST] = 0;
1819 vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
1820 vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
65501a74
AW
1821 }
1822
96adc5c7 1823 /* Use emulated next pointer to allow dropping caps */
3fc1c182 1824 pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff);
96adc5c7 1825
65501a74
AW
1826 switch (cap_id) {
1827 case PCI_CAP_ID_MSI:
7ef165b9 1828 ret = vfio_msi_setup(vdev, pos, errp);
65501a74 1829 break;
96adc5c7 1830 case PCI_CAP_ID_EXP:
befe5176 1831 vfio_check_pcie_flr(vdev, pos);
7ef165b9 1832 ret = vfio_setup_pcie_cap(vdev, pos, size, errp);
96adc5c7 1833 break;
65501a74 1834 case PCI_CAP_ID_MSIX:
7ef165b9 1835 ret = vfio_msix_setup(vdev, pos, errp);
65501a74 1836 break;
ba661818 1837 case PCI_CAP_ID_PM:
befe5176 1838 vfio_check_pm_reset(vdev, pos);
ba661818 1839 vdev->pm_cap = pos;
27841278 1840 ret = pci_add_capability(pdev, cap_id, pos, size, errp);
befe5176
AW
1841 break;
1842 case PCI_CAP_ID_AF:
1843 vfio_check_af_flr(vdev, pos);
27841278 1844 ret = pci_add_capability(pdev, cap_id, pos, size, errp);
befe5176 1845 break;
65501a74 1846 default:
27841278 1847 ret = pci_add_capability(pdev, cap_id, pos, size, errp);
65501a74
AW
1848 break;
1849 }
7ef165b9 1850out:
65501a74 1851 if (ret < 0) {
7ef165b9
EA
1852 error_prepend(errp,
1853 "failed to add PCI capability 0x%x[0x%x]@0x%x: ",
1854 cap_id, size, pos);
65501a74
AW
1855 return ret;
1856 }
1857
1858 return 0;
1859}
1860
7ef165b9 1861static void vfio_add_ext_cap(VFIOPCIDevice *vdev)
325ae8d5
CF
1862{
1863 PCIDevice *pdev = &vdev->pdev;
1864 uint32_t header;
1865 uint16_t cap_id, next, size;
1866 uint8_t cap_ver;
1867 uint8_t *config;
1868
e37dac06
AW
1869 /* Only add extended caps if we have them and the guest can see them */
1870 if (!pci_is_express(pdev) || !pci_bus_is_express(pdev->bus) ||
1871 !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) {
7ef165b9 1872 return;
e37dac06
AW
1873 }
1874
325ae8d5
CF
1875 /*
1876 * pcie_add_capability always inserts the new capability at the tail
1877 * of the chain. Therefore to end up with a chain that matches the
1878 * physical device, we cache the config space to avoid overwriting
1879 * the original config space when we parse the extended capabilities.
1880 */
1881 config = g_memdup(pdev->config, vdev->config_size);
1882
e37dac06
AW
1883 /*
1884 * Extended capabilities are chained with each pointing to the next, so we
1885 * can drop anything other than the head of the chain simply by modifying
d0d1cd70
AW
1886 * the previous next pointer. Seed the head of the chain here such that
1887 * we can simply skip any capabilities we want to drop below, regardless
1888 * of their position in the chain. If this stub capability still exists
1889 * after we add the capabilities we want to expose, update the capability
1890 * ID to zero. Note that we cannot seed with the capability header being
1891 * zero as this conflicts with definition of an absent capability chain
1892 * and prevents capabilities beyond the head of the list from being added.
1893 * By replacing the dummy capability ID with zero after walking the device
1894 * chain, we also transparently mark extended capabilities as absent if
1895 * no capabilities were added. Note that the PCIe spec defines an absence
1896 * of extended capabilities to be determined by a value of zero for the
1897 * capability ID, version, AND next pointer. A non-zero next pointer
1898 * should be sufficient to indicate additional capabilities are present,
1899 * which will occur if we call pcie_add_capability() below. The entire
1900 * first dword is emulated to support this.
1901 *
1902 * NB. The kernel side does similar masking, so be prepared that our
1903 * view of the device may also contain a capability ID zero in the head
1904 * of the chain. Skip it for the same reason that we cannot seed the
1905 * chain with a zero capability.
e37dac06
AW
1906 */
1907 pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE,
1908 PCI_EXT_CAP(0xFFFF, 0, 0));
1909 pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
1910 pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0);
1911
325ae8d5
CF
1912 for (next = PCI_CONFIG_SPACE_SIZE; next;
1913 next = PCI_EXT_CAP_NEXT(pci_get_long(config + next))) {
1914 header = pci_get_long(config + next);
1915 cap_id = PCI_EXT_CAP_ID(header);
1916 cap_ver = PCI_EXT_CAP_VER(header);
1917
1918 /*
1919 * If it becomes important to configure extended capabilities to their
1920 * actual size, use this as the default when it's something we don't
1921 * recognize. Since QEMU doesn't actually handle many of the config
1922 * accesses, exact size doesn't seem worthwhile.
1923 */
1924 size = vfio_ext_cap_max_size(config, next);
1925
325ae8d5
CF
1926 /* Use emulated next pointer to allow dropping extended caps */
1927 pci_long_test_and_set_mask(vdev->emulated_config_bits + next,
1928 PCI_EXT_CAP_NEXT_MASK);
e37dac06
AW
1929
1930 switch (cap_id) {
d0d1cd70 1931 case 0: /* kernel masked capability */
e37dac06 1932 case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */
383a7af7 1933 case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */
e37dac06
AW
1934 trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next);
1935 break;
1936 default:
1937 pcie_add_capability(pdev, cap_id, cap_ver, next, size);
1938 }
1939
1940 }
1941
1942 /* Cleanup chain head ID if necessary */
1943 if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) {
1944 pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0);
325ae8d5
CF
1945 }
1946
1947 g_free(config);
7ef165b9 1948 return;
325ae8d5
CF
1949}
1950
7ef165b9 1951static int vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp)
65501a74
AW
1952{
1953 PCIDevice *pdev = &vdev->pdev;
325ae8d5 1954 int ret;
65501a74
AW
1955
1956 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
1957 !pdev->config[PCI_CAPABILITY_LIST]) {
1958 return 0; /* Nothing to add */
1959 }
1960
7ef165b9 1961 ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST], errp);
325ae8d5
CF
1962 if (ret) {
1963 return ret;
1964 }
1965
7ef165b9
EA
1966 vfio_add_ext_cap(vdev);
1967 return 0;
65501a74
AW
1968}
1969
9ee27d73 1970static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
f16f39c3
AW
1971{
1972 PCIDevice *pdev = &vdev->pdev;
1973 uint16_t cmd;
1974
1975 vfio_disable_interrupts(vdev);
1976
1977 /* Make sure the device is in D0 */
1978 if (vdev->pm_cap) {
1979 uint16_t pmcsr;
1980 uint8_t state;
1981
1982 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
1983 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1984 if (state) {
1985 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1986 vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
1987 /* vfio handles the necessary delay here */
1988 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
1989 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1990 if (state) {
4e505ddd 1991 error_report("vfio: Unable to power on device, stuck in D%d",
f16f39c3
AW
1992 state);
1993 }
1994 }
1995 }
1996
1997 /*
1998 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
1999 * Also put INTx Disable in known state.
2000 */
2001 cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
2002 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
2003 PCI_COMMAND_INTX_DISABLE);
2004 vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
2005}
2006
9ee27d73 2007static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
f16f39c3 2008{
7dfb3424 2009 Error *err = NULL;
a52a4c47 2010 int nr;
7dfb3424
EA
2011
2012 vfio_intx_enable(vdev, &err);
2013 if (err) {
2014 error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name);
2015 }
a52a4c47
IY
2016
2017 for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) {
2018 off_t addr = vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr);
2019 uint32_t val = 0;
2020 uint32_t len = sizeof(val);
2021
2022 if (pwrite(vdev->vbasedev.fd, &val, len, addr) != len) {
2023 error_report("%s(%s) reset bar %d failed: %m", __func__,
2024 vdev->vbasedev.name, nr);
2025 }
2026 }
f16f39c3
AW
2027}
2028
7df9381b 2029static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
f16f39c3 2030{
7df9381b
AW
2031 char tmp[13];
2032
2033 sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain,
2034 addr->bus, addr->slot, addr->function);
2035
2036 return (strcmp(tmp, name) == 0);
f16f39c3
AW
2037}
2038
9ee27d73 2039static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
f16f39c3
AW
2040{
2041 VFIOGroup *group;
2042 struct vfio_pci_hot_reset_info *info;
2043 struct vfio_pci_dependent_device *devices;
2044 struct vfio_pci_hot_reset *reset;
2045 int32_t *fds;
2046 int ret, i, count;
2047 bool multi = false;
2048
df92ee44 2049 trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
f16f39c3 2050
893bfc3c
C
2051 if (!single) {
2052 vfio_pci_pre_reset(vdev);
2053 }
b47d8efa 2054 vdev->vbasedev.needs_reset = false;
f16f39c3
AW
2055
2056 info = g_malloc0(sizeof(*info));
2057 info->argsz = sizeof(*info);
2058
5546a621 2059 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
f16f39c3
AW
2060 if (ret && errno != ENOSPC) {
2061 ret = -errno;
2062 if (!vdev->has_pm_reset) {
7df9381b
AW
2063 error_report("vfio: Cannot reset device %s, "
2064 "no available reset mechanism.", vdev->vbasedev.name);
f16f39c3
AW
2065 }
2066 goto out_single;
2067 }
2068
2069 count = info->count;
2070 info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
2071 info->argsz = sizeof(*info) + (count * sizeof(*devices));
2072 devices = &info->devices[0];
2073
5546a621 2074 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
f16f39c3
AW
2075 if (ret) {
2076 ret = -errno;
2077 error_report("vfio: hot reset info failed: %m");
2078 goto out_single;
2079 }
2080
df92ee44 2081 trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
f16f39c3
AW
2082
2083 /* Verify that we have all the groups required */
2084 for (i = 0; i < info->count; i++) {
2085 PCIHostDeviceAddress host;
9ee27d73 2086 VFIOPCIDevice *tmp;
b47d8efa 2087 VFIODevice *vbasedev_iter;
f16f39c3
AW
2088
2089 host.domain = devices[i].segment;
2090 host.bus = devices[i].bus;
2091 host.slot = PCI_SLOT(devices[i].devfn);
2092 host.function = PCI_FUNC(devices[i].devfn);
2093
385f57cf 2094 trace_vfio_pci_hot_reset_dep_devices(host.domain,
f16f39c3
AW
2095 host.bus, host.slot, host.function, devices[i].group_id);
2096
7df9381b 2097 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
f16f39c3
AW
2098 continue;
2099 }
2100
62356b72 2101 QLIST_FOREACH(group, &vfio_group_list, next) {
f16f39c3
AW
2102 if (group->groupid == devices[i].group_id) {
2103 break;
2104 }
2105 }
2106
2107 if (!group) {
2108 if (!vdev->has_pm_reset) {
df92ee44 2109 error_report("vfio: Cannot reset device %s, "
f16f39c3 2110 "depends on group %d which is not owned.",
df92ee44 2111 vdev->vbasedev.name, devices[i].group_id);
f16f39c3
AW
2112 }
2113 ret = -EPERM;
2114 goto out;
2115 }
2116
2117 /* Prep dependent devices for reset and clear our marker. */
b47d8efa
EA
2118 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2119 if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
2120 continue;
2121 }
2122 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
7df9381b 2123 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
f16f39c3 2124 if (single) {
f16f39c3
AW
2125 ret = -EINVAL;
2126 goto out_single;
2127 }
2128 vfio_pci_pre_reset(tmp);
b47d8efa 2129 tmp->vbasedev.needs_reset = false;
f16f39c3
AW
2130 multi = true;
2131 break;
2132 }
2133 }
2134 }
2135
2136 if (!single && !multi) {
f16f39c3
AW
2137 ret = -EINVAL;
2138 goto out_single;
2139 }
2140
2141 /* Determine how many group fds need to be passed */
2142 count = 0;
62356b72 2143 QLIST_FOREACH(group, &vfio_group_list, next) {
f16f39c3
AW
2144 for (i = 0; i < info->count; i++) {
2145 if (group->groupid == devices[i].group_id) {
2146 count++;
2147 break;
2148 }
2149 }
2150 }
2151
2152 reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
2153 reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
2154 fds = &reset->group_fds[0];
2155
2156 /* Fill in group fds */
62356b72 2157 QLIST_FOREACH(group, &vfio_group_list, next) {
f16f39c3
AW
2158 for (i = 0; i < info->count; i++) {
2159 if (group->groupid == devices[i].group_id) {
2160 fds[reset->count++] = group->fd;
2161 break;
2162 }
2163 }
2164 }
2165
2166 /* Bus reset! */
5546a621 2167 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
f16f39c3
AW
2168 g_free(reset);
2169
df92ee44 2170 trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
385f57cf 2171 ret ? "%m" : "Success");
f16f39c3
AW
2172
2173out:
2174 /* Re-enable INTx on affected devices */
2175 for (i = 0; i < info->count; i++) {
2176 PCIHostDeviceAddress host;
9ee27d73 2177 VFIOPCIDevice *tmp;
b47d8efa 2178 VFIODevice *vbasedev_iter;
f16f39c3
AW
2179
2180 host.domain = devices[i].segment;
2181 host.bus = devices[i].bus;
2182 host.slot = PCI_SLOT(devices[i].devfn);
2183 host.function = PCI_FUNC(devices[i].devfn);
2184
7df9381b 2185 if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
f16f39c3
AW
2186 continue;
2187 }
2188
62356b72 2189 QLIST_FOREACH(group, &vfio_group_list, next) {
f16f39c3
AW
2190 if (group->groupid == devices[i].group_id) {
2191 break;
2192 }
2193 }
2194
2195 if (!group) {
2196 break;
2197 }
2198
b47d8efa
EA
2199 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2200 if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
2201 continue;
2202 }
2203 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
7df9381b 2204 if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
f16f39c3
AW
2205 vfio_pci_post_reset(tmp);
2206 break;
2207 }
2208 }
2209 }
2210out_single:
893bfc3c
C
2211 if (!single) {
2212 vfio_pci_post_reset(vdev);
2213 }
f16f39c3
AW
2214 g_free(info);
2215
2216 return ret;
2217}
2218
2219/*
2220 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
2221 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
2222 * of doing hot resets when there is only a single device per bus. The in-use
2223 * here refers to how many VFIODevices are affected. A hot reset that affects
2224 * multiple devices, but only a single in-use device, means that we can call
2225 * it from our bus ->reset() callback since the extent is effectively a single
2226 * device. This allows us to make use of it in the hotplug path. When there
2227 * are multiple in-use devices, we can only trigger the hot reset during a
2228 * system reset and thus from our reset handler. We separate _one vs _multi
2229 * here so that we don't overlap and do a double reset on the system reset
2230 * path where both our reset handler and ->reset() callback are used. Calling
2231 * _one() will only do a hot reset for the one in-use devices case, calling
2232 * _multi() will do nothing if a _one() would have been sufficient.
2233 */
9ee27d73 2234static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
f16f39c3
AW
2235{
2236 return vfio_pci_hot_reset(vdev, true);
2237}
2238
b47d8efa 2239static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
f16f39c3 2240{
b47d8efa 2241 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
f16f39c3
AW
2242 return vfio_pci_hot_reset(vdev, false);
2243}
2244
b47d8efa
EA
2245static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
2246{
2247 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2248 if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
2249 vbasedev->needs_reset = true;
2250 }
2251}
2252
2253static VFIODeviceOps vfio_pci_ops = {
2254 .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
2255 .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
870cb6f1 2256 .vfio_eoi = vfio_intx_eoi,
b47d8efa
EA
2257};
2258
cde4279b 2259int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
e593c021
AW
2260{
2261 VFIODevice *vbasedev = &vdev->vbasedev;
2262 struct vfio_region_info *reg_info;
2263 int ret;
2264
4225f2b6
AW
2265 ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, &reg_info);
2266 if (ret) {
cde4279b
EA
2267 error_setg_errno(errp, -ret,
2268 "failed getting region info for VGA region index %d",
2269 VFIO_PCI_VGA_REGION_INDEX);
4225f2b6
AW
2270 return ret;
2271 }
e593c021 2272
4225f2b6
AW
2273 if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
2274 !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) ||
2275 reg_info->size < 0xbffff + 1) {
cde4279b
EA
2276 error_setg(errp, "unexpected VGA info, flags 0x%lx, size 0x%lx",
2277 (unsigned long)reg_info->flags,
2278 (unsigned long)reg_info->size);
4225f2b6
AW
2279 g_free(reg_info);
2280 return -EINVAL;
2281 }
e593c021 2282
4225f2b6 2283 vdev->vga = g_new0(VFIOVGA, 1);
e593c021 2284
4225f2b6
AW
2285 vdev->vga->fd_offset = reg_info->offset;
2286 vdev->vga->fd = vdev->vbasedev.fd;
e593c021 2287
4225f2b6 2288 g_free(reg_info);
e593c021 2289
4225f2b6
AW
2290 vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
2291 vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
2292 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks);
e593c021 2293
182bca45
AW
2294 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2295 OBJECT(vdev), &vfio_vga_ops,
2296 &vdev->vga->region[QEMU_PCI_VGA_MEM],
2297 "vfio-vga-mmio@0xa0000",
2298 QEMU_PCI_VGA_MEM_SIZE);
2299
4225f2b6
AW
2300 vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
2301 vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
2302 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks);
e593c021 2303
182bca45
AW
2304 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2305 OBJECT(vdev), &vfio_vga_ops,
2306 &vdev->vga->region[QEMU_PCI_VGA_IO_LO],
2307 "vfio-vga-io@0x3b0",
2308 QEMU_PCI_VGA_IO_LO_SIZE);
2309
4225f2b6
AW
2310 vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
2311 vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
2312 QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks);
e593c021 2313
182bca45
AW
2314 memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
2315 OBJECT(vdev), &vfio_vga_ops,
2316 &vdev->vga->region[QEMU_PCI_VGA_IO_HI],
2317 "vfio-vga-io@0x3c0",
2318 QEMU_PCI_VGA_IO_HI_SIZE);
2319
2320 pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2321 &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2322 &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem);
2323
e593c021
AW
2324 return 0;
2325}
2326
e04cff9d 2327static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
65501a74 2328{
217e9fdc 2329 VFIODevice *vbasedev = &vdev->vbasedev;
46900226 2330 struct vfio_region_info *reg_info;
7b4b0e9e 2331 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
d13dd2d7 2332 int i, ret = -1;
65501a74
AW
2333
2334 /* Sanity check device */
d13dd2d7 2335 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
2312d907 2336 error_setg(errp, "this isn't a PCI device");
e04cff9d 2337 return;
65501a74
AW
2338 }
2339
d13dd2d7 2340 if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
2312d907
EA
2341 error_setg(errp, "unexpected number of io regions %u",
2342 vbasedev->num_regions);
e04cff9d 2343 return;
65501a74
AW
2344 }
2345
d13dd2d7 2346 if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
2312d907 2347 error_setg(errp, "unexpected number of irqs %u", vbasedev->num_irqs);
e04cff9d 2348 return;
65501a74
AW
2349 }
2350
2351 for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
db0da029
AW
2352 char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i);
2353
2354 ret = vfio_region_setup(OBJECT(vdev), vbasedev,
2355 &vdev->bars[i].region, i, name);
2356 g_free(name);
2357
65501a74 2358 if (ret) {
2312d907 2359 error_setg_errno(errp, -ret, "failed to get region %d info", i);
e04cff9d 2360 return;
65501a74
AW
2361 }
2362
7076eabc 2363 QLIST_INIT(&vdev->bars[i].quirks);
46900226 2364 }
65501a74 2365
46900226
AW
2366 ret = vfio_get_region_info(vbasedev,
2367 VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
65501a74 2368 if (ret) {
2312d907 2369 error_setg_errno(errp, -ret, "failed to get config info");
e04cff9d 2370 return;
65501a74
AW
2371 }
2372
d13dd2d7 2373 trace_vfio_populate_device_config(vdev->vbasedev.name,
46900226
AW
2374 (unsigned long)reg_info->size,
2375 (unsigned long)reg_info->offset,
2376 (unsigned long)reg_info->flags);
65501a74 2377
46900226 2378 vdev->config_size = reg_info->size;
6a659bbf
AW
2379 if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
2380 vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
2381 }
46900226
AW
2382 vdev->config_offset = reg_info->offset;
2383
2384 g_free(reg_info);
65501a74 2385
e593c021 2386 if (vdev->features & VFIO_FEATURE_ENABLE_VGA) {
2312d907 2387 ret = vfio_populate_vga(vdev, errp);
f15689c7 2388 if (ret) {
2312d907 2389 error_append_hint(errp, "device does not support "
cde4279b 2390 "requested feature x-vga\n");
e04cff9d 2391 return;
f15689c7 2392 }
f15689c7 2393 }
47cbe50c 2394
7b4b0e9e
VMP
2395 irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
2396
5546a621 2397 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
7b4b0e9e
VMP
2398 if (ret) {
2399 /* This can fail for an old kernel or legacy PCI dev */
d13dd2d7 2400 trace_vfio_populate_device_get_irq_info_failure();
7b4b0e9e
VMP
2401 } else if (irq_info.count == 1) {
2402 vdev->pci_aer = true;
2403 } else {
2312d907 2404 error_report(WARN_PREFIX
8fbf47c3 2405 "Could not enable error recovery for the device",
df92ee44 2406 vbasedev->name);
7b4b0e9e 2407 }
d13dd2d7
EA
2408}
2409
9ee27d73 2410static void vfio_put_device(VFIOPCIDevice *vdev)
65501a74 2411{
462037c9 2412 g_free(vdev->vbasedev.name);
db0da029
AW
2413 g_free(vdev->msix);
2414
d13dd2d7 2415 vfio_put_base_device(&vdev->vbasedev);
65501a74
AW
2416}
2417
7b4b0e9e
VMP
2418static void vfio_err_notifier_handler(void *opaque)
2419{
9ee27d73 2420 VFIOPCIDevice *vdev = opaque;
7b4b0e9e
VMP
2421
2422 if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
2423 return;
2424 }
2425
2426 /*
2427 * TBD. Retrieve the error details and decide what action
2428 * needs to be taken. One of the actions could be to pass
2429 * the error to the guest and have the guest driver recover
2430 * from the error. This requires that PCIe capabilities be
2431 * exposed to the guest. For now, we just terminate the
2432 * guest to contain the error.
2433 */
2434
7df9381b 2435 error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name);
7b4b0e9e 2436
ba29776f 2437 vm_stop(RUN_STATE_INTERNAL_ERROR);
7b4b0e9e
VMP
2438}
2439
2440/*
2441 * Registers error notifier for devices supporting error recovery.
2442 * If we encounter a failure in this function, we report an error
2443 * and continue after disabling error recovery support for the
2444 * device.
2445 */
9ee27d73 2446static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
7b4b0e9e
VMP
2447{
2448 int ret;
2449 int argsz;
2450 struct vfio_irq_set *irq_set;
2451 int32_t *pfd;
2452
2453 if (!vdev->pci_aer) {
2454 return;
2455 }
2456
2457 if (event_notifier_init(&vdev->err_notifier, 0)) {
8fbf47c3 2458 error_report("vfio: Unable to init event notifier for error detection");
7b4b0e9e
VMP
2459 vdev->pci_aer = false;
2460 return;
2461 }
2462
2463 argsz = sizeof(*irq_set) + sizeof(*pfd);
2464
2465 irq_set = g_malloc0(argsz);
2466 irq_set->argsz = argsz;
2467 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2468 VFIO_IRQ_SET_ACTION_TRIGGER;
2469 irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
2470 irq_set->start = 0;
2471 irq_set->count = 1;
2472 pfd = (int32_t *)&irq_set->data;
2473
2474 *pfd = event_notifier_get_fd(&vdev->err_notifier);
2475 qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev);
2476
5546a621 2477 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
7b4b0e9e 2478 if (ret) {
8fbf47c3 2479 error_report("vfio: Failed to set up error notification");
7b4b0e9e
VMP
2480 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
2481 event_notifier_cleanup(&vdev->err_notifier);
2482 vdev->pci_aer = false;
2483 }
2484 g_free(irq_set);
2485}
2486
9ee27d73 2487static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
7b4b0e9e
VMP
2488{
2489 int argsz;
2490 struct vfio_irq_set *irq_set;
2491 int32_t *pfd;
2492 int ret;
2493
2494 if (!vdev->pci_aer) {
2495 return;
2496 }
2497
2498 argsz = sizeof(*irq_set) + sizeof(*pfd);
2499
2500 irq_set = g_malloc0(argsz);
2501 irq_set->argsz = argsz;
2502 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2503 VFIO_IRQ_SET_ACTION_TRIGGER;
2504 irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
2505 irq_set->start = 0;
2506 irq_set->count = 1;
2507 pfd = (int32_t *)&irq_set->data;
2508 *pfd = -1;
2509
5546a621 2510 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
7b4b0e9e 2511 if (ret) {
8fbf47c3 2512 error_report("vfio: Failed to de-assign error fd: %m");
7b4b0e9e
VMP
2513 }
2514 g_free(irq_set);
2515 qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
2516 NULL, NULL, vdev);
2517 event_notifier_cleanup(&vdev->err_notifier);
2518}
2519
47cbe50c
AW
2520static void vfio_req_notifier_handler(void *opaque)
2521{
2522 VFIOPCIDevice *vdev = opaque;
35c7cb4c 2523 Error *err = NULL;
47cbe50c
AW
2524
2525 if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
2526 return;
2527 }
2528
35c7cb4c
AW
2529 qdev_unplug(&vdev->pdev.qdev, &err);
2530 if (err) {
2531 error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name);
2532 }
47cbe50c
AW
2533}
2534
2535static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
2536{
2537 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info),
2538 .index = VFIO_PCI_REQ_IRQ_INDEX };
2539 int argsz;
2540 struct vfio_irq_set *irq_set;
2541 int32_t *pfd;
2542
2543 if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
2544 return;
2545 }
2546
2547 if (ioctl(vdev->vbasedev.fd,
2548 VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) {
2549 return;
2550 }
2551
2552 if (event_notifier_init(&vdev->req_notifier, 0)) {
2553 error_report("vfio: Unable to init event notifier for device request");
2554 return;
2555 }
2556
2557 argsz = sizeof(*irq_set) + sizeof(*pfd);
2558
2559 irq_set = g_malloc0(argsz);
2560 irq_set->argsz = argsz;
2561 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2562 VFIO_IRQ_SET_ACTION_TRIGGER;
2563 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
2564 irq_set->start = 0;
2565 irq_set->count = 1;
2566 pfd = (int32_t *)&irq_set->data;
2567
2568 *pfd = event_notifier_get_fd(&vdev->req_notifier);
2569 qemu_set_fd_handler(*pfd, vfio_req_notifier_handler, NULL, vdev);
2570
2571 if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
2572 error_report("vfio: Failed to set up device request notification");
2573 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
2574 event_notifier_cleanup(&vdev->req_notifier);
2575 } else {
2576 vdev->req_enabled = true;
2577 }
2578
2579 g_free(irq_set);
2580}
2581
2582static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
2583{
2584 int argsz;
2585 struct vfio_irq_set *irq_set;
2586 int32_t *pfd;
2587
2588 if (!vdev->req_enabled) {
2589 return;
2590 }
2591
2592 argsz = sizeof(*irq_set) + sizeof(*pfd);
2593
2594 irq_set = g_malloc0(argsz);
2595 irq_set->argsz = argsz;
2596 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2597 VFIO_IRQ_SET_ACTION_TRIGGER;
2598 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
2599 irq_set->start = 0;
2600 irq_set->count = 1;
2601 pfd = (int32_t *)&irq_set->data;
2602 *pfd = -1;
2603
2604 if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
2605 error_report("vfio: Failed to de-assign device request fd: %m");
2606 }
2607 g_free(irq_set);
2608 qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
2609 NULL, NULL, vdev);
2610 event_notifier_cleanup(&vdev->req_notifier);
2611
2612 vdev->req_enabled = false;
2613}
2614
1a22aca1 2615static void vfio_realize(PCIDevice *pdev, Error **errp)
65501a74 2616{
b47d8efa
EA
2617 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2618 VFIODevice *vbasedev_iter;
65501a74 2619 VFIOGroup *group;
7df9381b 2620 char *tmp, group_path[PATH_MAX], *group_name;
ec3bcf42 2621 Error *err = NULL;
65501a74
AW
2622 ssize_t len;
2623 struct stat st;
2624 int groupid;
581406e0 2625 int i, ret;
65501a74 2626
7df9381b 2627 if (!vdev->vbasedev.sysfsdev) {
4a946268
EA
2628 if (!(~vdev->host.domain || ~vdev->host.bus ||
2629 ~vdev->host.slot || ~vdev->host.function)) {
2630 error_setg(errp, "No provided host device");
6e4e6f0d
DJS
2631 error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F "
2632 "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
4a946268
EA
2633 return;
2634 }
7df9381b
AW
2635 vdev->vbasedev.sysfsdev =
2636 g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
2637 vdev->host.domain, vdev->host.bus,
2638 vdev->host.slot, vdev->host.function);
2639 }
2640
2641 if (stat(vdev->vbasedev.sysfsdev, &st) < 0) {
1a22aca1
EA
2642 error_setg_errno(errp, errno, "no such host device");
2643 error_prepend(errp, ERR_PREFIX, vdev->vbasedev.sysfsdev);
2644 return;
65501a74
AW
2645 }
2646
7df9381b 2647 vdev->vbasedev.name = g_strdup(basename(vdev->vbasedev.sysfsdev));
b47d8efa 2648 vdev->vbasedev.ops = &vfio_pci_ops;
462037c9 2649 vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;
462037c9 2650
7df9381b
AW
2651 tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev);
2652 len = readlink(tmp, group_path, sizeof(group_path));
2653 g_free(tmp);
65501a74 2654
7df9381b 2655 if (len <= 0 || len >= sizeof(group_path)) {
1a22aca1
EA
2656 error_setg_errno(errp, len < 0 ? errno : ENAMETOOLONG,
2657 "no iommu_group found");
426ec904 2658 goto error;
65501a74
AW
2659 }
2660
7df9381b 2661 group_path[len] = 0;
65501a74 2662
7df9381b 2663 group_name = basename(group_path);
65501a74 2664 if (sscanf(group_name, "%d", &groupid) != 1) {
1a22aca1 2665 error_setg_errno(errp, errno, "failed to read %s", group_path);
426ec904 2666 goto error;
65501a74
AW
2667 }
2668
1a22aca1 2669 trace_vfio_realize(vdev->vbasedev.name, groupid);
65501a74 2670
1a22aca1 2671 group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev), errp);
65501a74 2672 if (!group) {
426ec904 2673 goto error;
65501a74
AW
2674 }
2675
b47d8efa
EA
2676 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2677 if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
1a22aca1 2678 error_setg(errp, "device is already attached");
65501a74 2679 vfio_put_group(group);
426ec904 2680 goto error;
65501a74
AW
2681 }
2682 }
2683
1a22aca1 2684 ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev, errp);
65501a74 2685 if (ret) {
65501a74 2686 vfio_put_group(group);
426ec904 2687 goto error;
65501a74
AW
2688 }
2689
e04cff9d
EA
2690 vfio_populate_device(vdev, &err);
2691 if (err) {
2692 error_propagate(errp, err);
2312d907 2693 goto error;
217e9fdc
PB
2694 }
2695
65501a74 2696 /* Get a copy of config space */
5546a621 2697 ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
65501a74
AW
2698 MIN(pci_config_size(&vdev->pdev), vdev->config_size),
2699 vdev->config_offset);
2700 if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
2701 ret = ret < 0 ? -errno : -EFAULT;
1a22aca1 2702 error_setg_errno(errp, -ret, "failed to read device config space");
426ec904 2703 goto error;
65501a74
AW
2704 }
2705
4b5d5e87
AW
2706 /* vfio emulates a lot for us, but some bits need extra love */
2707 vdev->emulated_config_bits = g_malloc0(vdev->config_size);
2708
2709 /* QEMU can choose to expose the ROM or not */
2710 memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
2711
89dcccc5
AW
2712 /*
2713 * The PCI spec reserves vendor ID 0xffff as an invalid value. The
2714 * device ID is managed by the vendor and need only be a 16-bit value.
2715 * Allow any 16-bit value for subsystem so they can be hidden or changed.
2716 */
2717 if (vdev->vendor_id != PCI_ANY_ID) {
2718 if (vdev->vendor_id >= 0xffff) {
1a22aca1 2719 error_setg(errp, "invalid PCI vendor ID provided");
426ec904 2720 goto error;
89dcccc5
AW
2721 }
2722 vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
2723 trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id);
2724 } else {
2725 vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
2726 }
2727
2728 if (vdev->device_id != PCI_ANY_ID) {
2729 if (vdev->device_id > 0xffff) {
1a22aca1 2730 error_setg(errp, "invalid PCI device ID provided");
426ec904 2731 goto error;
89dcccc5
AW
2732 }
2733 vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
2734 trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id);
2735 } else {
2736 vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
2737 }
2738
2739 if (vdev->sub_vendor_id != PCI_ANY_ID) {
2740 if (vdev->sub_vendor_id > 0xffff) {
1a22aca1 2741 error_setg(errp, "invalid PCI subsystem vendor ID provided");
426ec904 2742 goto error;
89dcccc5
AW
2743 }
2744 vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
2745 vdev->sub_vendor_id, ~0);
2746 trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name,
2747 vdev->sub_vendor_id);
2748 }
2749
2750 if (vdev->sub_device_id != PCI_ANY_ID) {
2751 if (vdev->sub_device_id > 0xffff) {
1a22aca1 2752 error_setg(errp, "invalid PCI subsystem device ID provided");
426ec904 2753 goto error;
89dcccc5
AW
2754 }
2755 vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
2756 trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name,
2757 vdev->sub_device_id);
2758 }
ff635e37 2759
4b5d5e87
AW
2760 /* QEMU can change multi-function devices to single function, or reverse */
2761 vdev->emulated_config_bits[PCI_HEADER_TYPE] =
2762 PCI_HEADER_TYPE_MULTI_FUNCTION;
2763
187d6232
AW
2764 /* Restore or clear multifunction, this is always controlled by QEMU */
2765 if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
2766 vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
2767 } else {
2768 vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
2769 }
2770
65501a74
AW
2771 /*
2772 * Clear host resource mapping info. If we choose not to register a
2773 * BAR, such as might be the case with the option ROM, we can get
2774 * confusing, unwritable, residual addresses from the host here.
2775 */
2776 memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
2777 memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
2778
6f864e6e 2779 vfio_pci_size_rom(vdev);
65501a74 2780
ec3bcf42
EA
2781 vfio_msix_early_setup(vdev, &err);
2782 if (err) {
2783 error_propagate(errp, err);
008d0e2d 2784 goto error;
65501a74
AW
2785 }
2786
2d82f8a3 2787 vfio_bars_setup(vdev);
65501a74 2788
1a22aca1 2789 ret = vfio_add_capabilities(vdev, errp);
65501a74
AW
2790 if (ret) {
2791 goto out_teardown;
2792 }
2793
182bca45
AW
2794 if (vdev->vga) {
2795 vfio_vga_quirk_setup(vdev);
2796 }
2797
581406e0
AW
2798 for (i = 0; i < PCI_ROM_SLOT; i++) {
2799 vfio_bar_quirk_setup(vdev, i);
2800 }
2801
6ced0bba
AW
2802 if (!vdev->igd_opregion &&
2803 vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) {
2804 struct vfio_region_info *opregion;
2805
2806 if (vdev->pdev.qdev.hotplugged) {
1a22aca1 2807 error_setg(errp,
426ec904
EA
2808 "cannot support IGD OpRegion feature on hotplugged "
2809 "device");
6ced0bba
AW
2810 goto out_teardown;
2811 }
2812
2813 ret = vfio_get_dev_region_info(&vdev->vbasedev,
2814 VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
2815 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
2816 if (ret) {
1a22aca1 2817 error_setg_errno(errp, -ret,
426ec904 2818 "does not support requested IGD OpRegion feature");
6ced0bba
AW
2819 goto out_teardown;
2820 }
2821
1a22aca1 2822 ret = vfio_pci_igd_opregion_init(vdev, opregion, errp);
6ced0bba
AW
2823 g_free(opregion);
2824 if (ret) {
6ced0bba
AW
2825 goto out_teardown;
2826 }
2827 }
2828
4b5d5e87
AW
2829 /* QEMU emulates all of MSI & MSIX */
2830 if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
2831 memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
2832 MSIX_CAP_LENGTH);
2833 }
2834
2835 if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
2836 memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
2837 vdev->msi_cap_size);
2838 }
2839
65501a74 2840 if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
bc72ad67 2841 vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
ea486926 2842 vfio_intx_mmap_enable, vdev);
870cb6f1 2843 pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_update);
1a22aca1 2844 ret = vfio_intx_enable(vdev, errp);
65501a74
AW
2845 if (ret) {
2846 goto out_teardown;
2847 }
2848 }
2849
7b4b0e9e 2850 vfio_register_err_notifier(vdev);
47cbe50c 2851 vfio_register_req_notifier(vdev);
c9c50009 2852 vfio_setup_resetfn_quirk(vdev);
c29029dd 2853
1a22aca1 2854 return;
65501a74
AW
2855
2856out_teardown:
2857 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
2858 vfio_teardown_msi(vdev);
2d82f8a3 2859 vfio_bars_exit(vdev);
426ec904 2860error:
1a22aca1 2861 error_prepend(errp, ERR_PREFIX, vdev->vbasedev.name);
77a10d04
PB
2862}
2863
2864static void vfio_instance_finalize(Object *obj)
2865{
2866 PCIDevice *pci_dev = PCI_DEVICE(obj);
2867 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pci_dev);
2868 VFIOGroup *group = vdev->vbasedev.group;
2869
2d82f8a3 2870 vfio_bars_finalize(vdev);
4b5d5e87 2871 g_free(vdev->emulated_config_bits);
77a10d04 2872 g_free(vdev->rom);
c4c45e94
AW
2873 /*
2874 * XXX Leaking igd_opregion is not an oversight, we can't remove the
2875 * fw_cfg entry therefore leaking this allocation seems like the safest
2876 * option.
2877 *
2878 * g_free(vdev->igd_opregion);
2879 */
65501a74
AW
2880 vfio_put_device(vdev);
2881 vfio_put_group(group);
65501a74
AW
2882}
2883
2884static void vfio_exitfn(PCIDevice *pdev)
2885{
9ee27d73 2886 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
65501a74 2887
47cbe50c 2888 vfio_unregister_req_notifier(vdev);
7b4b0e9e 2889 vfio_unregister_err_notifier(vdev);
65501a74
AW
2890 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
2891 vfio_disable_interrupts(vdev);
ea486926 2892 if (vdev->intx.mmap_timer) {
bc72ad67 2893 timer_free(vdev->intx.mmap_timer);
ea486926 2894 }
65501a74 2895 vfio_teardown_msi(vdev);
2d82f8a3 2896 vfio_bars_exit(vdev);
65501a74
AW
2897}
2898
2899static void vfio_pci_reset(DeviceState *dev)
2900{
2901 PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev);
9ee27d73 2902 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
65501a74 2903
df92ee44 2904 trace_vfio_pci_reset(vdev->vbasedev.name);
5834a83f 2905
f16f39c3 2906 vfio_pci_pre_reset(vdev);
ba661818 2907
5655f931
AW
2908 if (vdev->resetfn && !vdev->resetfn(vdev)) {
2909 goto post_reset;
2910 }
2911
b47d8efa
EA
2912 if (vdev->vbasedev.reset_works &&
2913 (vdev->has_flr || !vdev->has_pm_reset) &&
5546a621 2914 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
df92ee44 2915 trace_vfio_pci_reset_flr(vdev->vbasedev.name);
f16f39c3 2916 goto post_reset;
ba661818
AW
2917 }
2918
f16f39c3
AW
2919 /* See if we can do our own bus reset */
2920 if (!vfio_pci_hot_reset_one(vdev)) {
2921 goto post_reset;
2922 }
5834a83f 2923
f16f39c3 2924 /* If nothing else works and the device supports PM reset, use it */
b47d8efa 2925 if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
5546a621 2926 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
df92ee44 2927 trace_vfio_pci_reset_pm(vdev->vbasedev.name);
f16f39c3 2928 goto post_reset;
65501a74 2929 }
5834a83f 2930
f16f39c3
AW
2931post_reset:
2932 vfio_pci_post_reset(vdev);
65501a74
AW
2933}
2934
abc5b3bf
GA
2935static void vfio_instance_init(Object *obj)
2936{
2937 PCIDevice *pci_dev = PCI_DEVICE(obj);
9ee27d73 2938 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, PCI_DEVICE(obj));
abc5b3bf
GA
2939
2940 device_add_bootindex_property(obj, &vdev->bootindex,
2941 "bootindex", NULL,
2942 &pci_dev->qdev, NULL);
4a946268
EA
2943 vdev->host.domain = ~0U;
2944 vdev->host.bus = ~0U;
2945 vdev->host.slot = ~0U;
2946 vdev->host.function = ~0U;
abc5b3bf
GA
2947}
2948
65501a74 2949static Property vfio_pci_dev_properties[] = {
9ee27d73 2950 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
7df9381b 2951 DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev),
9ee27d73 2952 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
ea486926 2953 intx.mmap_timeout, 1100),
9ee27d73 2954 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
f15689c7 2955 VFIO_FEATURE_ENABLE_VGA_BIT, false),
47cbe50c
AW
2956 DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
2957 VFIO_FEATURE_ENABLE_REQ_BIT, true),
6ced0bba
AW
2958 DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
2959 VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
5e15d79b 2960 DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
46746dba
AW
2961 DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
2962 DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
2963 DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
89dcccc5
AW
2964 DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID),
2965 DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID),
2966 DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
2967 sub_vendor_id, PCI_ANY_ID),
2968 DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
2969 sub_device_id, PCI_ANY_ID),
c4c45e94 2970 DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0),
65501a74
AW
2971 /*
2972 * TODO - support passed fds... is this necessary?
9ee27d73
EA
2973 * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
2974 * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
65501a74
AW
2975 */
2976 DEFINE_PROP_END_OF_LIST(),
2977};
2978
d9f0e638
AW
2979static const VMStateDescription vfio_pci_vmstate = {
2980 .name = "vfio-pci",
2981 .unmigratable = 1,
2982};
65501a74
AW
2983
2984static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
2985{
2986 DeviceClass *dc = DEVICE_CLASS(klass);
2987 PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
2988
2989 dc->reset = vfio_pci_reset;
2990 dc->props = vfio_pci_dev_properties;
d9f0e638
AW
2991 dc->vmsd = &vfio_pci_vmstate;
2992 dc->desc = "VFIO-based PCI device assignment";
125ee0ed 2993 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1a22aca1 2994 pdc->realize = vfio_realize;
65501a74
AW
2995 pdc->exit = vfio_exitfn;
2996 pdc->config_read = vfio_pci_read_config;
2997 pdc->config_write = vfio_pci_write_config;
6a659bbf 2998 pdc->is_express = 1; /* We might be */
65501a74
AW
2999}
3000
3001static const TypeInfo vfio_pci_dev_info = {
3002 .name = "vfio-pci",
3003 .parent = TYPE_PCI_DEVICE,
9ee27d73 3004 .instance_size = sizeof(VFIOPCIDevice),
65501a74 3005 .class_init = vfio_pci_dev_class_init,
abc5b3bf 3006 .instance_init = vfio_instance_init,
77a10d04 3007 .instance_finalize = vfio_instance_finalize,
65501a74
AW
3008};
3009
3010static void register_vfio_pci_dev_type(void)
3011{
3012 type_register_static(&vfio_pci_dev_info);
3013}
3014
3015type_init(register_vfio_pci_dev_type)
This page took 0.822626 seconds and 4 git commands to generate.