2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
26 #include <sys/types.h>
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "hw/pci/msi.h"
33 #include "hw/pci/msix.h"
34 #include "hw/pci/pci.h"
35 #include "qemu-common.h"
36 #include "qemu/error-report.h"
37 #include "qemu/event_notifier.h"
38 #include "qemu/queue.h"
39 #include "qemu/range.h"
40 #include "sysemu/kvm.h"
41 #include "sysemu/sysemu.h"
43 #include "hw/vfio/vfio.h"
45 /* Extra debugging, trap acceleration paths for more logging */
46 #define VFIO_ALLOW_MMAP 1
47 #define VFIO_ALLOW_KVM_INTX 1
48 #define VFIO_ALLOW_KVM_MSI 1
49 #define VFIO_ALLOW_KVM_MSIX 1
52 VFIO_DEVICE_TYPE_PCI = 0,
57 typedef struct VFIOQuirk {
59 struct VFIOPCIDevice *vdev;
60 QLIST_ENTRY(VFIOQuirk) next;
62 uint32_t base_offset:TARGET_PAGE_BITS;
63 uint32_t address_offset:TARGET_PAGE_BITS;
64 uint32_t address_size:3;
67 uint32_t address_match;
68 uint32_t address_mask;
70 uint32_t address_val:TARGET_PAGE_BITS;
71 uint32_t data_offset:TARGET_PAGE_BITS;
80 typedef struct VFIORegion {
81 struct VFIODevice *vbasedev;
82 off_t fd_offset; /* offset of region within device fd */
83 MemoryRegion mem; /* slow, read/write access */
84 MemoryRegion mmap_mem; /* direct mapped access */
87 uint32_t flags; /* VFIO region flags (rd/wr/mmap) */
88 uint8_t nr; /* cache the region number for debug */
91 typedef struct VFIOBAR {
95 QLIST_HEAD(, VFIOQuirk) quirks;
98 typedef struct VFIOVGARegion {
102 QLIST_HEAD(, VFIOQuirk) quirks;
105 typedef struct VFIOVGA {
108 VFIOVGARegion region[QEMU_PCI_VGA_NUM_REGIONS];
111 typedef struct VFIOINTx {
112 bool pending; /* interrupt pending */
113 bool kvm_accel; /* set when QEMU bypass through KVM enabled */
114 uint8_t pin; /* which pin to pull for qemu_set_irq */
115 EventNotifier interrupt; /* eventfd triggered on interrupt */
116 EventNotifier unmask; /* eventfd for unmask on QEMU bypass */
117 PCIINTxRoute route; /* routing info for QEMU bypass */
118 uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */
119 QEMUTimer *mmap_timer; /* enable mmaps after periods w/o interrupts */
122 typedef struct VFIOMSIVector {
124 * Two interrupt paths are configured per vector. The first, is only used
125 * for interrupts injected via QEMU. This is typically the non-accel path,
126 * but may also be used when we want QEMU to handle masking and pending
127 * bits. The KVM path bypasses QEMU and is therefore higher performance,
128 * but requires masking at the device. virq is used to track the MSI route
129 * through KVM, thus kvm_interrupt is only available when virq is set to a
130 * valid (>= 0) value.
132 EventNotifier interrupt;
133 EventNotifier kvm_interrupt;
134 struct VFIOPCIDevice *vdev; /* back pointer to device */
146 typedef struct VFIOAddressSpace {
148 QLIST_HEAD(, VFIOContainer) containers;
149 QLIST_ENTRY(VFIOAddressSpace) list;
152 static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
153 QLIST_HEAD_INITIALIZER(vfio_address_spaces);
157 typedef struct VFIOType1 {
158 MemoryListener listener;
163 typedef struct VFIOContainer {
164 VFIOAddressSpace *space;
165 int fd; /* /dev/vfio/vfio, empowered by the attached groups */
167 /* enable abstraction to support various iommu backends */
171 void (*release)(struct VFIOContainer *);
173 QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
174 QLIST_HEAD(, VFIOGroup) group_list;
175 QLIST_ENTRY(VFIOContainer) next;
178 typedef struct VFIOGuestIOMMU {
179 VFIOContainer *container;
182 QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
185 /* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */
186 typedef struct VFIOMSIXInfo {
190 uint32_t table_offset;
192 MemoryRegion mmap_mem;
196 typedef struct VFIODeviceOps VFIODeviceOps;
198 typedef struct VFIODevice {
199 QLIST_ENTRY(VFIODevice) next;
200 struct VFIOGroup *group;
207 unsigned int num_irqs;
208 unsigned int num_regions;
212 struct VFIODeviceOps {
213 void (*vfio_compute_needs_reset)(VFIODevice *vdev);
214 int (*vfio_hot_reset_multi)(VFIODevice *vdev);
215 void (*vfio_eoi)(VFIODevice *vdev);
216 int (*vfio_populate_device)(VFIODevice *vdev);
219 typedef struct VFIOPCIDevice {
223 unsigned int config_size;
224 uint8_t *emulated_config_bits; /* QEMU emulated bits, little-endian */
225 off_t config_offset; /* Offset of config space region within device fd */
226 unsigned int rom_size;
227 off_t rom_offset; /* Offset of ROM region within device fd */
230 VFIOMSIVector *msi_vectors;
232 int nr_vectors; /* Number of MSI/MSIX vectors currently in use */
233 int interrupt; /* Current interrupt type */
234 VFIOBAR bars[PCI_NUM_REGIONS - 1]; /* No ROM */
235 VFIOVGA vga; /* 0xa0000, 0x3b0, 0x3c0 */
236 PCIHostDeviceAddress host;
237 EventNotifier err_notifier;
239 #define VFIO_FEATURE_ENABLE_VGA_BIT 0
240 #define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT)
247 bool rom_read_failed;
250 typedef struct VFIOGroup {
253 VFIOContainer *container;
254 QLIST_HEAD(, VFIODevice) device_list;
255 QLIST_ENTRY(VFIOGroup) next;
256 QLIST_ENTRY(VFIOGroup) container_next;
259 typedef struct VFIORomBlacklistEntry {
262 } VFIORomBlacklistEntry;
265 * List of device ids/vendor ids for which to disable
266 * option rom loading. This avoids the guest hangs during rom
267 * execution as noticed with the BCM 57810 card for lack of a
268 * more better way to handle such issues.
269 * The user can still override by specifying a romfile or
271 * Please see https://bugs.launchpad.net/qemu/+bug/1284874
272 * for an analysis of the 57810 card hang. When adding
273 * a new vendor id/device id combination below, please also add
274 * your card/environment details and information that could
275 * help in debugging to the bug tracking this issue
277 static const VFIORomBlacklistEntry romblacklist[] = {
278 /* Broadcom BCM 57810 */
282 #define MSIX_CAP_LENGTH 12
284 static QLIST_HEAD(, VFIOGroup)
285 vfio_group_list = QLIST_HEAD_INITIALIZER(vfio_group_list);
289 * We have a single VFIO pseudo device per KVM VM. Once created it lives
290 * for the life of the VM. Closing the file descriptor only drops our
291 * reference to it and the device's reference to kvm. Therefore once
292 * initialized, this file descriptor is only released on QEMU exit and
293 * we'll re-use it should another vfio device be attached before then.
295 static int vfio_kvm_device_fd = -1;
298 static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
299 static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len);
300 static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr,
301 uint32_t val, int len);
302 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
303 static void vfio_put_base_device(VFIODevice *vbasedev);
304 static int vfio_populate_device(VFIODevice *vbasedev);
307 * Common VFIO interrupt disable
309 static void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
311 struct vfio_irq_set irq_set = {
312 .argsz = sizeof(irq_set),
313 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
319 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
325 static void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
327 struct vfio_irq_set irq_set = {
328 .argsz = sizeof(irq_set),
329 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
335 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
338 #ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */
339 static void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
341 struct vfio_irq_set irq_set = {
342 .argsz = sizeof(irq_set),
343 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
349 ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
354 * Disabling BAR mmaping can be slow, but toggling it around INTx can
355 * also be a huge overhead. We try to get the best of both worlds by
356 * waiting until an interrupt to disable mmaps (subsequent transitions
357 * to the same state are effectively no overhead). If the interrupt has
358 * been serviced and the time gap is long enough, we re-enable mmaps for
359 * performance. This works well for things like graphics cards, which
360 * may not use their interrupt at all and are penalized to an unusable
361 * level by read/write BAR traps. Other devices, like NICs, have more
362 * regular interrupts and see much better latency by staying in non-mmap
363 * mode. We therefore set the default mmap_timeout such that a ping
364 * is just enough to keep the mmap disabled. Users can experiment with
365 * other options with the x-intx-mmap-timeout-ms parameter (a value of
366 * zero disables the timer).
368 static void vfio_intx_mmap_enable(void *opaque)
370 VFIOPCIDevice *vdev = opaque;
372 if (vdev->intx.pending) {
373 timer_mod(vdev->intx.mmap_timer,
374 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
378 vfio_mmap_set_enabled(vdev, true);
381 static void vfio_intx_interrupt(void *opaque)
383 VFIOPCIDevice *vdev = opaque;
385 if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
389 trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
391 vdev->intx.pending = true;
392 pci_irq_assert(&vdev->pdev);
393 vfio_mmap_set_enabled(vdev, false);
394 if (vdev->intx.mmap_timeout) {
395 timer_mod(vdev->intx.mmap_timer,
396 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
400 static void vfio_eoi(VFIODevice *vbasedev)
402 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
404 if (!vdev->intx.pending) {
408 trace_vfio_eoi(vbasedev->name);
410 vdev->intx.pending = false;
411 pci_irq_deassert(&vdev->pdev);
412 vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
415 static void vfio_enable_intx_kvm(VFIOPCIDevice *vdev)
418 struct kvm_irqfd irqfd = {
419 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
420 .gsi = vdev->intx.route.irq,
421 .flags = KVM_IRQFD_FLAG_RESAMPLE,
423 struct vfio_irq_set *irq_set;
427 if (!VFIO_ALLOW_KVM_INTX || !kvm_irqfds_enabled() ||
428 vdev->intx.route.mode != PCI_INTX_ENABLED ||
429 !kvm_resamplefds_enabled()) {
433 /* Get to a known interrupt state */
434 qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
435 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
436 vdev->intx.pending = false;
437 pci_irq_deassert(&vdev->pdev);
439 /* Get an eventfd for resample/unmask */
440 if (event_notifier_init(&vdev->intx.unmask, 0)) {
441 error_report("vfio: Error: event_notifier_init failed eoi");
445 /* KVM triggers it, VFIO listens for it */
446 irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
448 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
449 error_report("vfio: Error: Failed to setup resample irqfd: %m");
453 argsz = sizeof(*irq_set) + sizeof(*pfd);
455 irq_set = g_malloc0(argsz);
456 irq_set->argsz = argsz;
457 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
458 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
461 pfd = (int32_t *)&irq_set->data;
463 *pfd = irqfd.resamplefd;
465 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
468 error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
473 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
475 vdev->intx.kvm_accel = true;
477 trace_vfio_enable_intx_kvm(vdev->vbasedev.name);
482 irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
483 kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
485 event_notifier_cleanup(&vdev->intx.unmask);
487 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
488 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
492 static void vfio_disable_intx_kvm(VFIOPCIDevice *vdev)
495 struct kvm_irqfd irqfd = {
496 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
497 .gsi = vdev->intx.route.irq,
498 .flags = KVM_IRQFD_FLAG_DEASSIGN,
501 if (!vdev->intx.kvm_accel) {
506 * Get to a known state, hardware masked, QEMU ready to accept new
507 * interrupts, QEMU IRQ de-asserted.
509 vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
510 vdev->intx.pending = false;
511 pci_irq_deassert(&vdev->pdev);
513 /* Tell KVM to stop listening for an INTx irqfd */
514 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
515 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
518 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
519 event_notifier_cleanup(&vdev->intx.unmask);
521 /* QEMU starts listening for interrupt events. */
522 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
524 vdev->intx.kvm_accel = false;
526 /* If we've missed an event, let it re-fire through QEMU */
527 vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
529 trace_vfio_disable_intx_kvm(vdev->vbasedev.name);
533 static void vfio_update_irq(PCIDevice *pdev)
535 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
538 if (vdev->interrupt != VFIO_INT_INTx) {
542 route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
544 if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
545 return; /* Nothing changed */
548 trace_vfio_update_irq(vdev->vbasedev.name,
549 vdev->intx.route.irq, route.irq);
551 vfio_disable_intx_kvm(vdev);
553 vdev->intx.route = route;
555 if (route.mode != PCI_INTX_ENABLED) {
559 vfio_enable_intx_kvm(vdev);
561 /* Re-enable the interrupt in cased we missed an EOI */
562 vfio_eoi(&vdev->vbasedev);
565 static int vfio_enable_intx(VFIOPCIDevice *vdev)
567 uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
569 struct vfio_irq_set *irq_set;
576 vfio_disable_interrupts(vdev);
578 vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
579 pci_config_set_interrupt_pin(vdev->pdev.config, pin);
583 * Only conditional to avoid generating error messages on platforms
584 * where we won't actually use the result anyway.
586 if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
587 vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
592 ret = event_notifier_init(&vdev->intx.interrupt, 0);
594 error_report("vfio: Error: event_notifier_init failed");
598 argsz = sizeof(*irq_set) + sizeof(*pfd);
600 irq_set = g_malloc0(argsz);
601 irq_set->argsz = argsz;
602 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
603 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
606 pfd = (int32_t *)&irq_set->data;
608 *pfd = event_notifier_get_fd(&vdev->intx.interrupt);
609 qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
611 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
614 error_report("vfio: Error: Failed to setup INTx fd: %m");
615 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
616 event_notifier_cleanup(&vdev->intx.interrupt);
620 vfio_enable_intx_kvm(vdev);
622 vdev->interrupt = VFIO_INT_INTx;
624 trace_vfio_enable_intx(vdev->vbasedev.name);
629 static void vfio_disable_intx(VFIOPCIDevice *vdev)
633 timer_del(vdev->intx.mmap_timer);
634 vfio_disable_intx_kvm(vdev);
635 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
636 vdev->intx.pending = false;
637 pci_irq_deassert(&vdev->pdev);
638 vfio_mmap_set_enabled(vdev, true);
640 fd = event_notifier_get_fd(&vdev->intx.interrupt);
641 qemu_set_fd_handler(fd, NULL, NULL, vdev);
642 event_notifier_cleanup(&vdev->intx.interrupt);
644 vdev->interrupt = VFIO_INT_NONE;
646 trace_vfio_disable_intx(vdev->vbasedev.name);
652 static void vfio_msi_interrupt(void *opaque)
654 VFIOMSIVector *vector = opaque;
655 VFIOPCIDevice *vdev = vector->vdev;
656 int nr = vector - vdev->msi_vectors;
658 if (!event_notifier_test_and_clear(&vector->interrupt)) {
665 if (vdev->interrupt == VFIO_INT_MSIX) {
666 msg = msix_get_message(&vdev->pdev, nr);
667 } else if (vdev->interrupt == VFIO_INT_MSI) {
668 msg = msi_get_message(&vdev->pdev, nr);
673 trace_vfio_msi_interrupt(vbasedev->name, nr, msg.address, msg.data);
676 if (vdev->interrupt == VFIO_INT_MSIX) {
677 msix_notify(&vdev->pdev, nr);
678 } else if (vdev->interrupt == VFIO_INT_MSI) {
679 msi_notify(&vdev->pdev, nr);
681 error_report("vfio: MSI interrupt receieved, but not enabled?");
685 static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
687 struct vfio_irq_set *irq_set;
688 int ret = 0, i, argsz;
691 argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
693 irq_set = g_malloc0(argsz);
694 irq_set->argsz = argsz;
695 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
696 irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
698 irq_set->count = vdev->nr_vectors;
699 fds = (int32_t *)&irq_set->data;
701 for (i = 0; i < vdev->nr_vectors; i++) {
705 * MSI vs MSI-X - The guest has direct access to MSI mask and pending
706 * bits, therefore we always use the KVM signaling path when setup.
707 * MSI-X mask and pending bits are emulated, so we want to use the
708 * KVM signaling path only when configured and unmasked.
710 if (vdev->msi_vectors[i].use) {
711 if (vdev->msi_vectors[i].virq < 0 ||
712 (msix && msix_is_masked(&vdev->pdev, i))) {
713 fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
715 fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
722 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
729 static void vfio_add_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage *msg,
734 if ((msix && !VFIO_ALLOW_KVM_MSIX) ||
735 (!msix && !VFIO_ALLOW_KVM_MSI) || !msg) {
739 if (event_notifier_init(&vector->kvm_interrupt, 0)) {
743 virq = kvm_irqchip_add_msi_route(kvm_state, *msg);
745 event_notifier_cleanup(&vector->kvm_interrupt);
749 if (kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->kvm_interrupt,
751 kvm_irqchip_release_virq(kvm_state, virq);
752 event_notifier_cleanup(&vector->kvm_interrupt);
759 static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
761 kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->kvm_interrupt,
763 kvm_irqchip_release_virq(kvm_state, vector->virq);
765 event_notifier_cleanup(&vector->kvm_interrupt);
768 static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg)
770 kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg);
773 static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
774 MSIMessage *msg, IOHandler *handler)
776 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
777 VFIOMSIVector *vector;
780 trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
782 vector = &vdev->msi_vectors[nr];
787 if (event_notifier_init(&vector->interrupt, 0)) {
788 error_report("vfio: Error: event_notifier_init failed");
791 msix_vector_use(pdev, nr);
794 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
795 handler, NULL, vector);
798 * Attempt to enable route through KVM irqchip,
799 * default to userspace handling if unavailable.
801 if (vector->virq >= 0) {
803 vfio_remove_kvm_msi_virq(vector);
805 vfio_update_kvm_msi_virq(vector, *msg);
808 vfio_add_kvm_msi_virq(vector, msg, true);
812 * We don't want to have the host allocate all possible MSI vectors
813 * for a device if they're not in use, so we shutdown and incrementally
814 * increase them as needed.
816 if (vdev->nr_vectors < nr + 1) {
817 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
818 vdev->nr_vectors = nr + 1;
819 ret = vfio_enable_vectors(vdev, true);
821 error_report("vfio: failed to enable vectors, %d", ret);
825 struct vfio_irq_set *irq_set;
828 argsz = sizeof(*irq_set) + sizeof(*pfd);
830 irq_set = g_malloc0(argsz);
831 irq_set->argsz = argsz;
832 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
833 VFIO_IRQ_SET_ACTION_TRIGGER;
834 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
837 pfd = (int32_t *)&irq_set->data;
839 if (vector->virq >= 0) {
840 *pfd = event_notifier_get_fd(&vector->kvm_interrupt);
842 *pfd = event_notifier_get_fd(&vector->interrupt);
845 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
848 error_report("vfio: failed to modify vector, %d", ret);
855 static int vfio_msix_vector_use(PCIDevice *pdev,
856 unsigned int nr, MSIMessage msg)
858 return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
861 static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
863 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
864 VFIOMSIVector *vector = &vdev->msi_vectors[nr];
866 trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
869 * There are still old guests that mask and unmask vectors on every
870 * interrupt. If we're using QEMU bypass with a KVM irqfd, leave all of
871 * the KVM setup in place, simply switch VFIO to use the non-bypass
872 * eventfd. We'll then fire the interrupt through QEMU and the MSI-X
873 * core will mask the interrupt and set pending bits, allowing it to
874 * be re-asserted on unmask. Nothing to do if already using QEMU mode.
876 if (vector->virq >= 0) {
878 struct vfio_irq_set *irq_set;
881 argsz = sizeof(*irq_set) + sizeof(*pfd);
883 irq_set = g_malloc0(argsz);
884 irq_set->argsz = argsz;
885 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
886 VFIO_IRQ_SET_ACTION_TRIGGER;
887 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
890 pfd = (int32_t *)&irq_set->data;
892 *pfd = event_notifier_get_fd(&vector->interrupt);
894 ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
900 static void vfio_enable_msix(VFIOPCIDevice *vdev)
902 vfio_disable_interrupts(vdev);
904 vdev->msi_vectors = g_malloc0(vdev->msix->entries * sizeof(VFIOMSIVector));
906 vdev->interrupt = VFIO_INT_MSIX;
909 * Some communication channels between VF & PF or PF & fw rely on the
910 * physical state of the device and expect that enabling MSI-X from the
911 * guest enables the same on the host. When our guest is Linux, the
912 * guest driver call to pci_enable_msix() sets the enabling bit in the
913 * MSI-X capability, but leaves the vector table masked. We therefore
914 * can't rely on a vector_use callback (from request_irq() in the guest)
915 * to switch the physical device into MSI-X mode because that may come a
916 * long time after pci_enable_msix(). This code enables vector 0 with
917 * triggering to userspace, then immediately release the vector, leaving
918 * the physical device with no vectors enabled, but MSI-X enabled, just
919 * like the guest view.
921 vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
922 vfio_msix_vector_release(&vdev->pdev, 0);
924 if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
925 vfio_msix_vector_release, NULL)) {
926 error_report("vfio: msix_set_vector_notifiers failed");
929 trace_vfio_enable_msix(vdev->vbasedev.name);
932 static void vfio_enable_msi(VFIOPCIDevice *vdev)
936 vfio_disable_interrupts(vdev);
938 vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
940 vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector));
942 for (i = 0; i < vdev->nr_vectors; i++) {
943 VFIOMSIVector *vector = &vdev->msi_vectors[i];
944 MSIMessage msg = msi_get_message(&vdev->pdev, i);
950 if (event_notifier_init(&vector->interrupt, 0)) {
951 error_report("vfio: Error: event_notifier_init failed");
954 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
955 vfio_msi_interrupt, NULL, vector);
958 * Attempt to enable route through KVM irqchip,
959 * default to userspace handling if unavailable.
961 vfio_add_kvm_msi_virq(vector, &msg, false);
964 /* Set interrupt type prior to possible interrupts */
965 vdev->interrupt = VFIO_INT_MSI;
967 ret = vfio_enable_vectors(vdev, false);
970 error_report("vfio: Error: Failed to setup MSI fds: %m");
971 } else if (ret != vdev->nr_vectors) {
972 error_report("vfio: Error: Failed to enable %d "
973 "MSI vectors, retry with %d", vdev->nr_vectors, ret);
976 for (i = 0; i < vdev->nr_vectors; i++) {
977 VFIOMSIVector *vector = &vdev->msi_vectors[i];
978 if (vector->virq >= 0) {
979 vfio_remove_kvm_msi_virq(vector);
981 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
983 event_notifier_cleanup(&vector->interrupt);
986 g_free(vdev->msi_vectors);
988 if (ret > 0 && ret != vdev->nr_vectors) {
989 vdev->nr_vectors = ret;
992 vdev->nr_vectors = 0;
995 * Failing to setup MSI doesn't really fall within any specification.
996 * Let's try leaving interrupts disabled and hope the guest figures
997 * out to fall back to INTx for this device.
999 error_report("vfio: Error: Failed to enable MSI");
1000 vdev->interrupt = VFIO_INT_NONE;
1005 trace_vfio_enable_msi(vdev->vbasedev.name, vdev->nr_vectors);
1008 static void vfio_disable_msi_common(VFIOPCIDevice *vdev)
1012 for (i = 0; i < vdev->nr_vectors; i++) {
1013 VFIOMSIVector *vector = &vdev->msi_vectors[i];
1014 if (vdev->msi_vectors[i].use) {
1015 if (vector->virq >= 0) {
1016 vfio_remove_kvm_msi_virq(vector);
1018 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
1020 event_notifier_cleanup(&vector->interrupt);
1024 g_free(vdev->msi_vectors);
1025 vdev->msi_vectors = NULL;
1026 vdev->nr_vectors = 0;
1027 vdev->interrupt = VFIO_INT_NONE;
1029 vfio_enable_intx(vdev);
1032 static void vfio_disable_msix(VFIOPCIDevice *vdev)
1036 msix_unset_vector_notifiers(&vdev->pdev);
1039 * MSI-X will only release vectors if MSI-X is still enabled on the
1040 * device, check through the rest and release it ourselves if necessary.
1042 for (i = 0; i < vdev->nr_vectors; i++) {
1043 if (vdev->msi_vectors[i].use) {
1044 vfio_msix_vector_release(&vdev->pdev, i);
1045 msix_vector_unuse(&vdev->pdev, i);
1049 if (vdev->nr_vectors) {
1050 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
1053 vfio_disable_msi_common(vdev);
1055 trace_vfio_disable_msix(vdev->vbasedev.name);
1058 static void vfio_disable_msi(VFIOPCIDevice *vdev)
1060 vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
1061 vfio_disable_msi_common(vdev);
1063 trace_vfio_disable_msi(vdev->vbasedev.name);
1066 static void vfio_update_msi(VFIOPCIDevice *vdev)
1070 for (i = 0; i < vdev->nr_vectors; i++) {
1071 VFIOMSIVector *vector = &vdev->msi_vectors[i];
1074 if (!vector->use || vector->virq < 0) {
1078 msg = msi_get_message(&vdev->pdev, i);
1079 vfio_update_kvm_msi_virq(vector, msg);
1084 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
1086 static void vfio_region_write(void *opaque, hwaddr addr,
1087 uint64_t data, unsigned size)
1089 VFIORegion *region = opaque;
1090 VFIODevice *vbasedev = region->vbasedev;
1103 buf.word = cpu_to_le16(data);
1106 buf.dword = cpu_to_le32(data);
1109 hw_error("vfio: unsupported write size, %d bytes", size);
1113 if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
1114 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
1116 __func__, vbasedev->name, region->nr,
1120 trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
1123 * A read or write to a BAR always signals an INTx EOI. This will
1124 * do nothing if not pending (including not in INTx mode). We assume
1125 * that a BAR access is in response to an interrupt and that BAR
1126 * accesses will service the interrupt. Unfortunately, we don't know
1127 * which access will service the interrupt, so we're potentially
1128 * getting quite a few host interrupts per guest interrupt.
1130 vbasedev->ops->vfio_eoi(vbasedev);
1133 static uint64_t vfio_region_read(void *opaque,
1134 hwaddr addr, unsigned size)
1136 VFIORegion *region = opaque;
1137 VFIODevice *vbasedev = region->vbasedev;
1146 if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
1147 error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
1148 __func__, vbasedev->name, region->nr,
1150 return (uint64_t)-1;
1158 data = le16_to_cpu(buf.word);
1161 data = le32_to_cpu(buf.dword);
1164 hw_error("vfio: unsupported read size, %d bytes", size);
1168 trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
1170 /* Same as write above */
1171 vbasedev->ops->vfio_eoi(vbasedev);
1176 static const MemoryRegionOps vfio_region_ops = {
1177 .read = vfio_region_read,
1178 .write = vfio_region_write,
1179 .endianness = DEVICE_LITTLE_ENDIAN,
1182 static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
1184 struct vfio_region_info reg_info = {
1185 .argsz = sizeof(reg_info),
1186 .index = VFIO_PCI_ROM_REGION_INDEX
1192 if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) {
1193 error_report("vfio: Error getting ROM info: %m");
1197 trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info.size,
1198 (unsigned long)reg_info.offset,
1199 (unsigned long)reg_info.flags);
1201 vdev->rom_size = size = reg_info.size;
1202 vdev->rom_offset = reg_info.offset;
1204 if (!vdev->rom_size) {
1205 vdev->rom_read_failed = true;
1206 error_report("vfio-pci: Cannot read device rom at "
1207 "%s", vdev->vbasedev.name);
1208 error_printf("Device option ROM contents are probably invalid "
1209 "(check dmesg).\nSkip option ROM probe with rombar=0, "
1210 "or load from file with romfile=\n");
1214 vdev->rom = g_malloc(size);
1215 memset(vdev->rom, 0xff, size);
1218 bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
1219 size, vdev->rom_offset + off);
1222 } else if (bytes > 0) {
1226 if (errno == EINTR || errno == EAGAIN) {
1229 error_report("vfio: Error reading device ROM: %m");
1235 static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
1237 VFIOPCIDevice *vdev = opaque;
1246 /* Load the ROM lazily when the guest tries to read it */
1247 if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
1248 vfio_pci_load_rom(vdev);
1251 memcpy(&val, vdev->rom + addr,
1252 (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);
1259 data = le16_to_cpu(val.word);
1262 data = le32_to_cpu(val.dword);
1265 hw_error("vfio: unsupported read size, %d bytes\n", size);
1269 trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
1274 static void vfio_rom_write(void *opaque, hwaddr addr,
1275 uint64_t data, unsigned size)
1279 static const MemoryRegionOps vfio_rom_ops = {
1280 .read = vfio_rom_read,
1281 .write = vfio_rom_write,
1282 .endianness = DEVICE_LITTLE_ENDIAN,
1285 static bool vfio_blacklist_opt_rom(VFIOPCIDevice *vdev)
1287 PCIDevice *pdev = &vdev->pdev;
1288 uint16_t vendor_id, device_id;
1291 vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
1292 device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
1294 while (count < ARRAY_SIZE(romblacklist)) {
1295 if (romblacklist[count].vendor_id == vendor_id &&
1296 romblacklist[count].device_id == device_id) {
1305 static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
1307 uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
1308 off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
1309 DeviceState *dev = DEVICE(vdev);
1311 int fd = vdev->vbasedev.fd;
1313 if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
1314 /* Since pci handles romfile, just print a message and return */
1315 if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) {
1316 error_printf("Warning : Device at %04x:%02x:%02x.%x "
1317 "is known to cause system instability issues during "
1318 "option rom execution. "
1319 "Proceeding anyway since user specified romfile\n",
1320 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1321 vdev->host.function);
1327 * Use the same size ROM BAR as the physical device. The contents
1328 * will get filled in later when the guest tries to read it.
1330 if (pread(fd, &orig, 4, offset) != 4 ||
1331 pwrite(fd, &size, 4, offset) != 4 ||
1332 pread(fd, &size, 4, offset) != 4 ||
1333 pwrite(fd, &orig, 4, offset) != 4) {
1334 error_report("%s(%04x:%02x:%02x.%x) failed: %m",
1335 __func__, vdev->host.domain, vdev->host.bus,
1336 vdev->host.slot, vdev->host.function);
1340 size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
1346 if (vfio_blacklist_opt_rom(vdev)) {
1347 if (dev->opts && qemu_opt_get(dev->opts, "rombar")) {
1348 error_printf("Warning : Device at %04x:%02x:%02x.%x "
1349 "is known to cause system instability issues during "
1350 "option rom execution. "
1351 "Proceeding anyway since user specified non zero value for "
1353 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1354 vdev->host.function);
1356 error_printf("Warning : Rom loading for device at "
1357 "%04x:%02x:%02x.%x has been disabled due to "
1358 "system instability issues. "
1359 "Specify rombar=1 or romfile to force\n",
1360 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1361 vdev->host.function);
1366 trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
1368 snprintf(name, sizeof(name), "vfio[%04x:%02x:%02x.%x].rom",
1369 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1370 vdev->host.function);
1372 memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
1373 &vfio_rom_ops, vdev, name, size);
1375 pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
1376 PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);
1378 vdev->pdev.has_rom = true;
1379 vdev->rom_read_failed = false;
1382 static void vfio_vga_write(void *opaque, hwaddr addr,
1383 uint64_t data, unsigned size)
1385 VFIOVGARegion *region = opaque;
1386 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1393 off_t offset = vga->fd_offset + region->offset + addr;
1400 buf.word = cpu_to_le16(data);
1403 buf.dword = cpu_to_le32(data);
1406 hw_error("vfio: unsupported write size, %d bytes", size);
1410 if (pwrite(vga->fd, &buf, size, offset) != size) {
1411 error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
1412 __func__, region->offset + addr, data, size);
1415 trace_vfio_vga_write(region->offset + addr, data, size);
1418 static uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
1420 VFIOVGARegion *region = opaque;
1421 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1429 off_t offset = vga->fd_offset + region->offset + addr;
1431 if (pread(vga->fd, &buf, size, offset) != size) {
1432 error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
1433 __func__, region->offset + addr, size);
1434 return (uint64_t)-1;
1442 data = le16_to_cpu(buf.word);
1445 data = le32_to_cpu(buf.dword);
1448 hw_error("vfio: unsupported read size, %d bytes", size);
1452 trace_vfio_vga_read(region->offset + addr, size, data);
1457 static const MemoryRegionOps vfio_vga_ops = {
1458 .read = vfio_vga_read,
1459 .write = vfio_vga_write,
1460 .endianness = DEVICE_LITTLE_ENDIAN,
1464 * Device specific quirks
1467 /* Is range1 fully contained within range2? */
1468 static bool vfio_range_contained(uint64_t first1, uint64_t len1,
1469 uint64_t first2, uint64_t len2) {
1470 return (first1 >= first2 && first1 + len1 <= first2 + len2);
1473 static bool vfio_flags_enabled(uint8_t flags, uint8_t mask)
1475 return (mask && (flags & mask) == mask);
1478 static uint64_t vfio_generic_window_quirk_read(void *opaque,
1479 hwaddr addr, unsigned size)
1481 VFIOQuirk *quirk = opaque;
1482 VFIOPCIDevice *vdev = quirk->vdev;
1485 if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) &&
1486 ranges_overlap(addr, size,
1487 quirk->data.data_offset, quirk->data.data_size)) {
1488 hwaddr offset = addr - quirk->data.data_offset;
1490 if (!vfio_range_contained(addr, size, quirk->data.data_offset,
1491 quirk->data.data_size)) {
1492 hw_error("%s: window data read not fully contained: %s",
1493 __func__, memory_region_name(&quirk->mem));
1496 data = vfio_pci_read_config(&vdev->pdev,
1497 quirk->data.address_val + offset, size);
1499 trace_vfio_generic_window_quirk_read(memory_region_name(&quirk->mem),
1500 vdev->vbasedev.name,
1504 data = vfio_region_read(&vdev->bars[quirk->data.bar].region,
1505 addr + quirk->data.base_offset, size);
1511 static void vfio_generic_window_quirk_write(void *opaque, hwaddr addr,
1512 uint64_t data, unsigned size)
1514 VFIOQuirk *quirk = opaque;
1515 VFIOPCIDevice *vdev = quirk->vdev;
1517 if (ranges_overlap(addr, size,
1518 quirk->data.address_offset, quirk->data.address_size)) {
1520 if (addr != quirk->data.address_offset) {
1521 hw_error("%s: offset write into address window: %s",
1522 __func__, memory_region_name(&quirk->mem));
1525 if ((data & ~quirk->data.address_mask) == quirk->data.address_match) {
1526 quirk->data.flags |= quirk->data.write_flags |
1527 quirk->data.read_flags;
1528 quirk->data.address_val = data & quirk->data.address_mask;
1530 quirk->data.flags &= ~(quirk->data.write_flags |
1531 quirk->data.read_flags);
1535 if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) &&
1536 ranges_overlap(addr, size,
1537 quirk->data.data_offset, quirk->data.data_size)) {
1538 hwaddr offset = addr - quirk->data.data_offset;
1540 if (!vfio_range_contained(addr, size, quirk->data.data_offset,
1541 quirk->data.data_size)) {
1542 hw_error("%s: window data write not fully contained: %s",
1543 __func__, memory_region_name(&quirk->mem));
1546 vfio_pci_write_config(&vdev->pdev,
1547 quirk->data.address_val + offset, data, size);
1548 trace_vfio_generic_window_quirk_write(memory_region_name(&quirk->mem),
1549 vdev->vbasedev.name,
1555 vfio_region_write(&vdev->bars[quirk->data.bar].region,
1556 addr + quirk->data.base_offset, data, size);
1559 static const MemoryRegionOps vfio_generic_window_quirk = {
1560 .read = vfio_generic_window_quirk_read,
1561 .write = vfio_generic_window_quirk_write,
1562 .endianness = DEVICE_LITTLE_ENDIAN,
1565 static uint64_t vfio_generic_quirk_read(void *opaque,
1566 hwaddr addr, unsigned size)
1568 VFIOQuirk *quirk = opaque;
1569 VFIOPCIDevice *vdev = quirk->vdev;
1570 hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK;
1571 hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK;
1574 if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) &&
1575 ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) {
1576 if (!vfio_range_contained(addr, size, offset,
1577 quirk->data.address_mask + 1)) {
1578 hw_error("%s: read not fully contained: %s",
1579 __func__, memory_region_name(&quirk->mem));
1582 data = vfio_pci_read_config(&vdev->pdev, addr - offset, size);
1584 trace_vfio_generic_quirk_read(memory_region_name(&quirk->mem),
1585 vdev->vbasedev.name, quirk->data.bar,
1586 addr + base, size, data);
1588 data = vfio_region_read(&vdev->bars[quirk->data.bar].region,
1595 static void vfio_generic_quirk_write(void *opaque, hwaddr addr,
1596 uint64_t data, unsigned size)
1598 VFIOQuirk *quirk = opaque;
1599 VFIOPCIDevice *vdev = quirk->vdev;
1600 hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK;
1601 hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK;
1603 if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) &&
1604 ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) {
1605 if (!vfio_range_contained(addr, size, offset,
1606 quirk->data.address_mask + 1)) {
1607 hw_error("%s: write not fully contained: %s",
1608 __func__, memory_region_name(&quirk->mem));
1611 vfio_pci_write_config(&vdev->pdev, addr - offset, data, size);
1613 trace_vfio_generic_quirk_write(memory_region_name(&quirk->mem),
1614 vdev->vbasedev.name, quirk->data.bar,
1615 addr + base, data, size);
1617 vfio_region_write(&vdev->bars[quirk->data.bar].region,
1618 addr + base, data, size);
1622 static const MemoryRegionOps vfio_generic_quirk = {
1623 .read = vfio_generic_quirk_read,
1624 .write = vfio_generic_quirk_write,
1625 .endianness = DEVICE_LITTLE_ENDIAN,
1628 #define PCI_VENDOR_ID_ATI 0x1002
1631 * Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR
1632 * through VGA register 0x3c3. On newer cards, the I/O port BAR is always
1633 * BAR4 (older cards like the X550 used BAR1, but we don't care to support
1634 * those). Note that on bare metal, a read of 0x3c3 doesn't always return the
1635 * I/O port BAR address. Originally this was coded to return the virtual BAR
1636 * address only if the physical register read returns the actual BAR address,
1637 * but users have reported greater success if we return the virtual address
1640 static uint64_t vfio_ati_3c3_quirk_read(void *opaque,
1641 hwaddr addr, unsigned size)
1643 VFIOQuirk *quirk = opaque;
1644 VFIOPCIDevice *vdev = quirk->vdev;
1645 uint64_t data = vfio_pci_read_config(&vdev->pdev,
1646 PCI_BASE_ADDRESS_0 + (4 * 4) + 1,
1648 trace_vfio_ati_3c3_quirk_read(data);
1653 static const MemoryRegionOps vfio_ati_3c3_quirk = {
1654 .read = vfio_ati_3c3_quirk_read,
1655 .endianness = DEVICE_LITTLE_ENDIAN,
1658 static void vfio_vga_probe_ati_3c3_quirk(VFIOPCIDevice *vdev)
1660 PCIDevice *pdev = &vdev->pdev;
1663 if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) {
1668 * As long as the BAR is >= 256 bytes it will be aligned such that the
1669 * lower byte is always zero. Filter out anything else, if it exists.
1671 if (!vdev->bars[4].ioport || vdev->bars[4].region.size < 256) {
1675 quirk = g_malloc0(sizeof(*quirk));
1678 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_ati_3c3_quirk, quirk,
1679 "vfio-ati-3c3-quirk", 1);
1680 memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem,
1681 3 /* offset 3 bytes from 0x3c0 */, &quirk->mem);
1683 QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks,
1686 trace_vfio_vga_probe_ati_3c3_quirk(vdev->vbasedev.name);
1690 * Newer ATI/AMD devices, including HD5450 and HD7850, have a window to PCI
1691 * config space through MMIO BAR2 at offset 0x4000. Nothing seems to access
1692 * the MMIO space directly, but a window to this space is provided through
1693 * I/O port BAR4. Offset 0x0 is the address register and offset 0x4 is the
1694 * data register. When the address is programmed to a range of 0x4000-0x4fff
1695 * PCI configuration space is available. Experimentation seems to indicate
1696 * that only read-only access is provided, but we drop writes when the window
1697 * is enabled to config space nonetheless.
1699 static void vfio_probe_ati_bar4_window_quirk(VFIOPCIDevice *vdev, int nr)
1701 PCIDevice *pdev = &vdev->pdev;
1704 if (!vdev->has_vga || nr != 4 ||
1705 pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) {
1709 quirk = g_malloc0(sizeof(*quirk));
1711 quirk->data.address_size = 4;
1712 quirk->data.data_offset = 4;
1713 quirk->data.data_size = 4;
1714 quirk->data.address_match = 0x4000;
1715 quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1;
1716 quirk->data.bar = nr;
1717 quirk->data.read_flags = quirk->data.write_flags = 1;
1719 memory_region_init_io(&quirk->mem, OBJECT(vdev),
1720 &vfio_generic_window_quirk, quirk,
1721 "vfio-ati-bar4-window-quirk", 8);
1722 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem,
1723 quirk->data.base_offset, &quirk->mem, 1);
1725 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
1727 trace_vfio_probe_ati_bar4_window_quirk(vdev->vbasedev.name);
1730 #define PCI_VENDOR_ID_REALTEK 0x10ec
1733 * RTL8168 devices have a backdoor that can access the MSI-X table. At BAR2
1734 * offset 0x70 there is a dword data register, offset 0x74 is a dword address
1735 * register. According to the Linux r8169 driver, the MSI-X table is addressed
1736 * when the "type" portion of the address register is set to 0x1. This appears
1737 * to be bits 16:30. Bit 31 is both a write indicator and some sort of
1738 * "address latched" indicator. Bits 12:15 are a mask field, which we can
1739 * ignore because the MSI-X table should always be accessed as a dword (full
1740 * mask). Bits 0:11 is offset within the type.
1744 * Read from MSI-X table offset 0
1745 * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x1f000, 4) // store read addr
1746 * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x8001f000 // latch
1747 * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x70, 4) = 0xfee00398 // read data
1749 * Write 0xfee00000 to MSI-X table offset 0
1750 * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x70, 0xfee00000, 4) // write data
1751 * vfio: vfio_bar_write(0000:05:00.0:BAR2+0x74, 0x8001f000, 4) // do write
1752 * vfio: vfio_bar_read(0000:05:00.0:BAR2+0x74, 4) = 0x1f000 // complete
1755 static uint64_t vfio_rtl8168_window_quirk_read(void *opaque,
1756 hwaddr addr, unsigned size)
1758 VFIOQuirk *quirk = opaque;
1759 VFIOPCIDevice *vdev = quirk->vdev;
1762 case 4: /* address */
1763 if (quirk->data.flags) {
1764 trace_vfio_rtl8168_window_quirk_read_fake(
1765 memory_region_name(&quirk->mem),
1766 vdev->vbasedev.name);
1768 return quirk->data.address_match ^ 0x10000000U;
1772 if (quirk->data.flags) {
1775 trace_vfio_rtl8168_window_quirk_read_table(
1776 memory_region_name(&quirk->mem),
1777 vdev->vbasedev.name);
1779 if (!(vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) {
1783 io_mem_read(&vdev->pdev.msix_table_mmio,
1784 (hwaddr)(quirk->data.address_match & 0xfff),
1790 trace_vfio_rtl8168_window_quirk_read_direct(memory_region_name(&quirk->mem),
1791 vdev->vbasedev.name);
1793 return vfio_region_read(&vdev->bars[quirk->data.bar].region,
1797 static void vfio_rtl8168_window_quirk_write(void *opaque, hwaddr addr,
1798 uint64_t data, unsigned size)
1800 VFIOQuirk *quirk = opaque;
1801 VFIOPCIDevice *vdev = quirk->vdev;
1804 case 4: /* address */
1805 if ((data & 0x7fff0000) == 0x10000) {
1806 if (data & 0x10000000U &&
1807 vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX) {
1809 trace_vfio_rtl8168_window_quirk_write_table(
1810 memory_region_name(&quirk->mem),
1811 vdev->vbasedev.name);
1813 io_mem_write(&vdev->pdev.msix_table_mmio,
1814 (hwaddr)(quirk->data.address_match & 0xfff),
1818 quirk->data.flags = 1;
1819 quirk->data.address_match = data;
1823 quirk->data.flags = 0;
1826 quirk->data.address_mask = data;
1830 trace_vfio_rtl8168_window_quirk_write_direct(
1831 memory_region_name(&quirk->mem),
1832 vdev->vbasedev.name);
1834 vfio_region_write(&vdev->bars[quirk->data.bar].region,
1835 addr + 0x70, data, size);
1838 static const MemoryRegionOps vfio_rtl8168_window_quirk = {
1839 .read = vfio_rtl8168_window_quirk_read,
1840 .write = vfio_rtl8168_window_quirk_write,
1842 .min_access_size = 4,
1843 .max_access_size = 4,
1846 .endianness = DEVICE_LITTLE_ENDIAN,
1849 static void vfio_probe_rtl8168_bar2_window_quirk(VFIOPCIDevice *vdev, int nr)
1851 PCIDevice *pdev = &vdev->pdev;
1854 if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_REALTEK ||
1855 pci_get_word(pdev->config + PCI_DEVICE_ID) != 0x8168 || nr != 2) {
1859 quirk = g_malloc0(sizeof(*quirk));
1861 quirk->data.bar = nr;
1863 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_rtl8168_window_quirk,
1864 quirk, "vfio-rtl8168-window-quirk", 8);
1865 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem,
1866 0x70, &quirk->mem, 1);
1868 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
1870 trace_vfio_probe_rtl8168_bar2_window_quirk(vdev->vbasedev.name);
1873 * Trap the BAR2 MMIO window to config space as well.
1875 static void vfio_probe_ati_bar2_4000_quirk(VFIOPCIDevice *vdev, int nr)
1877 PCIDevice *pdev = &vdev->pdev;
1880 /* Only enable on newer devices where BAR2 is 64bit */
1881 if (!vdev->has_vga || nr != 2 || !vdev->bars[2].mem64 ||
1882 pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) {
1886 quirk = g_malloc0(sizeof(*quirk));
1888 quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1;
1889 quirk->data.address_match = 0x4000;
1890 quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1;
1891 quirk->data.bar = nr;
1893 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, quirk,
1894 "vfio-ati-bar2-4000-quirk",
1895 TARGET_PAGE_ALIGN(quirk->data.address_mask + 1));
1896 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem,
1897 quirk->data.address_match & TARGET_PAGE_MASK,
1900 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
1902 trace_vfio_probe_ati_bar2_4000_quirk(vdev->vbasedev.name);
1906 * Older ATI/AMD cards like the X550 have a similar window to that above.
1907 * I/O port BAR1 provides a window to a mirror of PCI config space located
1908 * in BAR2 at offset 0xf00. We don't care to support such older cards, but
1909 * note it for future reference.
1912 #define PCI_VENDOR_ID_NVIDIA 0x10de
1915 * Nvidia has several different methods to get to config space, the
1916 * nouveu project has several of these documented here:
1917 * https://github.com/pathscale/envytools/tree/master/hwdocs
1919 * The first quirk is actually not documented in envytools and is found
1920 * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an
1921 * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access
1922 * the mirror of PCI config space found at BAR0 offset 0x1800. The access
1923 * sequence first writes 0x338 to I/O port 0x3d4. The target offset is
1924 * then written to 0x3d0. Finally 0x538 is written for a read and 0x738
1925 * is written for a write to 0x3d4. The BAR0 offset is then accessible
1926 * through 0x3d0. This quirk doesn't seem to be necessary on newer cards
1927 * that use the I/O port BAR5 window but it doesn't hurt to leave it.
1937 static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque,
1938 hwaddr addr, unsigned size)
1940 VFIOQuirk *quirk = opaque;
1941 VFIOPCIDevice *vdev = quirk->vdev;
1942 PCIDevice *pdev = &vdev->pdev;
1943 uint64_t data = vfio_vga_read(&vdev->vga.region[QEMU_PCI_VGA_IO_HI],
1944 addr + quirk->data.base_offset, size);
1946 if (quirk->data.flags == NV_3D0_READ && addr == quirk->data.data_offset) {
1947 data = vfio_pci_read_config(pdev, quirk->data.address_val, size);
1948 trace_vfio_nvidia_3d0_quirk_read(size, data);
1951 quirk->data.flags = NV_3D0_NONE;
1956 static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr,
1957 uint64_t data, unsigned size)
1959 VFIOQuirk *quirk = opaque;
1960 VFIOPCIDevice *vdev = quirk->vdev;
1961 PCIDevice *pdev = &vdev->pdev;
1963 switch (quirk->data.flags) {
1965 if (addr == quirk->data.address_offset && data == 0x338) {
1966 quirk->data.flags = NV_3D0_SELECT;
1970 quirk->data.flags = NV_3D0_NONE;
1971 if (addr == quirk->data.data_offset &&
1972 (data & ~quirk->data.address_mask) == quirk->data.address_match) {
1973 quirk->data.flags = NV_3D0_WINDOW;
1974 quirk->data.address_val = data & quirk->data.address_mask;
1978 quirk->data.flags = NV_3D0_NONE;
1979 if (addr == quirk->data.address_offset) {
1980 if (data == 0x538) {
1981 quirk->data.flags = NV_3D0_READ;
1982 } else if (data == 0x738) {
1983 quirk->data.flags = NV_3D0_WRITE;
1988 quirk->data.flags = NV_3D0_NONE;
1989 if (addr == quirk->data.data_offset) {
1990 vfio_pci_write_config(pdev, quirk->data.address_val, data, size);
1991 trace_vfio_nvidia_3d0_quirk_write(data, size);
1997 vfio_vga_write(&vdev->vga.region[QEMU_PCI_VGA_IO_HI],
1998 addr + quirk->data.base_offset, data, size);
2001 static const MemoryRegionOps vfio_nvidia_3d0_quirk = {
2002 .read = vfio_nvidia_3d0_quirk_read,
2003 .write = vfio_nvidia_3d0_quirk_write,
2004 .endianness = DEVICE_LITTLE_ENDIAN,
2007 static void vfio_vga_probe_nvidia_3d0_quirk(VFIOPCIDevice *vdev)
2009 PCIDevice *pdev = &vdev->pdev;
2012 if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA ||
2013 !vdev->bars[1].region.size) {
2017 quirk = g_malloc0(sizeof(*quirk));
2019 quirk->data.base_offset = 0x10;
2020 quirk->data.address_offset = 4;
2021 quirk->data.address_size = 2;
2022 quirk->data.address_match = 0x1800;
2023 quirk->data.address_mask = PCI_CONFIG_SPACE_SIZE - 1;
2024 quirk->data.data_offset = 0;
2025 quirk->data.data_size = 4;
2027 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_nvidia_3d0_quirk,
2028 quirk, "vfio-nvidia-3d0-quirk", 6);
2029 memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem,
2030 quirk->data.base_offset, &quirk->mem);
2032 QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks,
2035 trace_vfio_vga_probe_nvidia_3d0_quirk(vdev->vbasedev.name);
2039 * The second quirk is documented in envytools. The I/O port BAR5 is just
2040 * a set of address/data ports to the MMIO BARs. The BAR we care about is
2041 * again BAR0. This backdoor is apparently a bit newer than the one above
2042 * so we need to not only trap 256 bytes @0x1800, but all of PCI config
2043 * space, including extended space is available at the 4k @0x88000.
2046 NV_BAR5_ADDRESS = 0x1,
2047 NV_BAR5_ENABLE = 0x2,
2048 NV_BAR5_MASTER = 0x4,
2049 NV_BAR5_VALID = 0x7,
2052 static void vfio_nvidia_bar5_window_quirk_write(void *opaque, hwaddr addr,
2053 uint64_t data, unsigned size)
2055 VFIOQuirk *quirk = opaque;
2060 quirk->data.flags |= NV_BAR5_MASTER;
2062 quirk->data.flags &= ~NV_BAR5_MASTER;
2067 quirk->data.flags |= NV_BAR5_ENABLE;
2069 quirk->data.flags &= ~NV_BAR5_ENABLE;
2073 if (quirk->data.flags & NV_BAR5_MASTER) {
2074 if ((data & ~0xfff) == 0x88000) {
2075 quirk->data.flags |= NV_BAR5_ADDRESS;
2076 quirk->data.address_val = data & 0xfff;
2077 } else if ((data & ~0xff) == 0x1800) {
2078 quirk->data.flags |= NV_BAR5_ADDRESS;
2079 quirk->data.address_val = data & 0xff;
2081 quirk->data.flags &= ~NV_BAR5_ADDRESS;
2087 vfio_generic_window_quirk_write(opaque, addr, data, size);
2090 static const MemoryRegionOps vfio_nvidia_bar5_window_quirk = {
2091 .read = vfio_generic_window_quirk_read,
2092 .write = vfio_nvidia_bar5_window_quirk_write,
2093 .valid.min_access_size = 4,
2094 .endianness = DEVICE_LITTLE_ENDIAN,
2097 static void vfio_probe_nvidia_bar5_window_quirk(VFIOPCIDevice *vdev, int nr)
2099 PCIDevice *pdev = &vdev->pdev;
2102 if (!vdev->has_vga || nr != 5 ||
2103 pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) {
2107 quirk = g_malloc0(sizeof(*quirk));
2109 quirk->data.read_flags = quirk->data.write_flags = NV_BAR5_VALID;
2110 quirk->data.address_offset = 0x8;
2111 quirk->data.address_size = 0; /* actually 4, but avoids generic code */
2112 quirk->data.data_offset = 0xc;
2113 quirk->data.data_size = 4;
2114 quirk->data.bar = nr;
2116 memory_region_init_io(&quirk->mem, OBJECT(vdev),
2117 &vfio_nvidia_bar5_window_quirk, quirk,
2118 "vfio-nvidia-bar5-window-quirk", 16);
2119 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem,
2122 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
2124 trace_vfio_probe_nvidia_bar5_window_quirk(vdev->vbasedev.name);
2127 static void vfio_nvidia_88000_quirk_write(void *opaque, hwaddr addr,
2128 uint64_t data, unsigned size)
2130 VFIOQuirk *quirk = opaque;
2131 VFIOPCIDevice *vdev = quirk->vdev;
2132 PCIDevice *pdev = &vdev->pdev;
2133 hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK;
2135 vfio_generic_quirk_write(opaque, addr, data, size);
2138 * Nvidia seems to acknowledge MSI interrupts by writing 0xff to the
2139 * MSI capability ID register. Both the ID and next register are
2140 * read-only, so we allow writes covering either of those to real hw.
2141 * NB - only fixed for the 0x88000 MMIO window.
2143 if ((pdev->cap_present & QEMU_PCI_CAP_MSI) &&
2144 vfio_range_contained(addr, size, pdev->msi_cap, PCI_MSI_FLAGS)) {
2145 vfio_region_write(&vdev->bars[quirk->data.bar].region,
2146 addr + base, data, size);
2150 static const MemoryRegionOps vfio_nvidia_88000_quirk = {
2151 .read = vfio_generic_quirk_read,
2152 .write = vfio_nvidia_88000_quirk_write,
2153 .endianness = DEVICE_LITTLE_ENDIAN,
2157 * Finally, BAR0 itself. We want to redirect any accesses to either
2158 * 0x1800 or 0x88000 through the PCI config space access functions.
2160 * NB - quirk at a page granularity or else they don't seem to work when
2163 * Here's offset 0x88000...
2165 static void vfio_probe_nvidia_bar0_88000_quirk(VFIOPCIDevice *vdev, int nr)
2167 PCIDevice *pdev = &vdev->pdev;
2169 uint16_t vendor, class;
2171 vendor = pci_get_word(pdev->config + PCI_VENDOR_ID);
2172 class = pci_get_word(pdev->config + PCI_CLASS_DEVICE);
2174 if (nr != 0 || vendor != PCI_VENDOR_ID_NVIDIA ||
2175 class != PCI_CLASS_DISPLAY_VGA) {
2179 quirk = g_malloc0(sizeof(*quirk));
2181 quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1;
2182 quirk->data.address_match = 0x88000;
2183 quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1;
2184 quirk->data.bar = nr;
2186 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_nvidia_88000_quirk,
2187 quirk, "vfio-nvidia-bar0-88000-quirk",
2188 TARGET_PAGE_ALIGN(quirk->data.address_mask + 1));
2189 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem,
2190 quirk->data.address_match & TARGET_PAGE_MASK,
2193 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
2195 trace_vfio_probe_nvidia_bar0_88000_quirk(vdev->vbasedev.name);
2199 * And here's the same for BAR0 offset 0x1800...
2201 static void vfio_probe_nvidia_bar0_1800_quirk(VFIOPCIDevice *vdev, int nr)
2203 PCIDevice *pdev = &vdev->pdev;
2206 if (!vdev->has_vga || nr != 0 ||
2207 pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) {
2211 /* Log the chipset ID */
2212 trace_vfio_probe_nvidia_bar0_1800_quirk_id(
2213 (unsigned int)(vfio_region_read(&vdev->bars[0].region, 0, 4) >> 20)
2216 quirk = g_malloc0(sizeof(*quirk));
2218 quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1;
2219 quirk->data.address_match = 0x1800;
2220 quirk->data.address_mask = PCI_CONFIG_SPACE_SIZE - 1;
2221 quirk->data.bar = nr;
2223 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, quirk,
2224 "vfio-nvidia-bar0-1800-quirk",
2225 TARGET_PAGE_ALIGN(quirk->data.address_mask + 1));
2226 memory_region_add_subregion_overlap(&vdev->bars[nr].region.mem,
2227 quirk->data.address_match & TARGET_PAGE_MASK,
2230 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
2232 trace_vfio_probe_nvidia_bar0_1800_quirk(vdev->vbasedev.name);
2236 * TODO - Some Nvidia devices provide config access to their companion HDA
2237 * device and even to their parent bridge via these config space mirrors.
2238 * Add quirks for those regions.
2242 * Common quirk probe entry points.
2244 static void vfio_vga_quirk_setup(VFIOPCIDevice *vdev)
2246 vfio_vga_probe_ati_3c3_quirk(vdev);
2247 vfio_vga_probe_nvidia_3d0_quirk(vdev);
2250 static void vfio_vga_quirk_teardown(VFIOPCIDevice *vdev)
2254 for (i = 0; i < ARRAY_SIZE(vdev->vga.region); i++) {
2255 while (!QLIST_EMPTY(&vdev->vga.region[i].quirks)) {
2256 VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga.region[i].quirks);
2257 memory_region_del_subregion(&vdev->vga.region[i].mem, &quirk->mem);
2258 object_unparent(OBJECT(&quirk->mem));
2259 QLIST_REMOVE(quirk, next);
2265 static void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr)
2267 vfio_probe_ati_bar4_window_quirk(vdev, nr);
2268 vfio_probe_ati_bar2_4000_quirk(vdev, nr);
2269 vfio_probe_nvidia_bar5_window_quirk(vdev, nr);
2270 vfio_probe_nvidia_bar0_88000_quirk(vdev, nr);
2271 vfio_probe_nvidia_bar0_1800_quirk(vdev, nr);
2272 vfio_probe_rtl8168_bar2_window_quirk(vdev, nr);
2275 static void vfio_bar_quirk_teardown(VFIOPCIDevice *vdev, int nr)
2277 VFIOBAR *bar = &vdev->bars[nr];
2279 while (!QLIST_EMPTY(&bar->quirks)) {
2280 VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks);
2281 memory_region_del_subregion(&bar->region.mem, &quirk->mem);
2282 object_unparent(OBJECT(&quirk->mem));
2283 QLIST_REMOVE(quirk, next);
2291 static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
2293 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2294 uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
2296 memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
2297 emu_bits = le32_to_cpu(emu_bits);
2300 emu_val = pci_default_read_config(pdev, addr, len);
2303 if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
2306 ret = pread(vdev->vbasedev.fd, &phys_val, len,
2307 vdev->config_offset + addr);
2309 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m",
2310 __func__, vdev->host.domain, vdev->host.bus,
2311 vdev->host.slot, vdev->host.function, addr, len);
2314 phys_val = le32_to_cpu(phys_val);
2317 val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
2319 trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
2324 static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr,
2325 uint32_t val, int len)
2327 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2328 uint32_t val_le = cpu_to_le32(val);
2330 trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
2332 /* Write everything to VFIO, let it filter out what we can't write */
2333 if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
2335 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m",
2336 __func__, vdev->host.domain, vdev->host.bus,
2337 vdev->host.slot, vdev->host.function, addr, val, len);
2340 /* MSI/MSI-X Enabling/Disabling */
2341 if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
2342 ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
2343 int is_enabled, was_enabled = msi_enabled(pdev);
2345 pci_default_write_config(pdev, addr, val, len);
2347 is_enabled = msi_enabled(pdev);
2351 vfio_enable_msi(vdev);
2355 vfio_disable_msi(vdev);
2357 vfio_update_msi(vdev);
2360 } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
2361 ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
2362 int is_enabled, was_enabled = msix_enabled(pdev);
2364 pci_default_write_config(pdev, addr, val, len);
2366 is_enabled = msix_enabled(pdev);
2368 if (!was_enabled && is_enabled) {
2369 vfio_enable_msix(vdev);
2370 } else if (was_enabled && !is_enabled) {
2371 vfio_disable_msix(vdev);
2374 /* Write everything to QEMU to keep emulated bits correct */
2375 pci_default_write_config(pdev, addr, val, len);
2380 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
2382 static int vfio_dma_unmap(VFIOContainer *container,
2383 hwaddr iova, ram_addr_t size)
2385 struct vfio_iommu_type1_dma_unmap unmap = {
2386 .argsz = sizeof(unmap),
2392 if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
2393 error_report("VFIO_UNMAP_DMA: %d\n", -errno);
2400 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
2401 ram_addr_t size, void *vaddr, bool readonly)
2403 struct vfio_iommu_type1_dma_map map = {
2404 .argsz = sizeof(map),
2405 .flags = VFIO_DMA_MAP_FLAG_READ,
2406 .vaddr = (__u64)(uintptr_t)vaddr,
2412 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
2416 * Try the mapping, if it fails with EBUSY, unmap the region and try
2417 * again. This shouldn't be necessary, but we sometimes see it in
2418 * the the VGA ROM space.
2420 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
2421 (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
2422 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
2426 error_report("VFIO_MAP_DMA: %d\n", -errno);
2430 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
2432 return (!memory_region_is_ram(section->mr) &&
2433 !memory_region_is_iommu(section->mr)) ||
2435 * Sizing an enabled 64-bit BAR can cause spurious mappings to
2436 * addresses in the upper part of the 64-bit address space. These
2437 * are never accessed by the CPU and beyond the address width of
2438 * some IOMMU hardware. TODO: VFIO should tell us the IOMMU width.
2440 section->offset_within_address_space & (1ULL << 63);
2443 static void vfio_iommu_map_notify(Notifier *n, void *data)
2445 VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
2446 VFIOContainer *container = giommu->container;
2447 IOMMUTLBEntry *iotlb = data;
2450 hwaddr len = iotlb->addr_mask + 1;
2454 trace_vfio_iommu_map_notify(iotlb->iova,
2455 iotlb->iova + iotlb->addr_mask);
2458 * The IOMMU TLB entry we have just covers translation through
2459 * this IOMMU to its immediate target. We need to translate
2460 * it the rest of the way through to memory.
2462 mr = address_space_translate(&address_space_memory,
2463 iotlb->translated_addr,
2464 &xlat, &len, iotlb->perm & IOMMU_WO);
2465 if (!memory_region_is_ram(mr)) {
2466 error_report("iommu map to non memory area %"HWADDR_PRIx"\n",
2471 * Translation truncates length to the IOMMU page size,
2472 * check that it did not truncate too much.
2474 if (len & iotlb->addr_mask) {
2475 error_report("iommu has granularity incompatible with target AS\n");
2479 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
2480 vaddr = memory_region_get_ram_ptr(mr) + xlat;
2482 ret = vfio_dma_map(container, iotlb->iova,
2483 iotlb->addr_mask + 1, vaddr,
2484 !(iotlb->perm & IOMMU_WO) || mr->readonly);
2486 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
2487 "0x%"HWADDR_PRIx", %p) = %d (%m)",
2488 container, iotlb->iova,
2489 iotlb->addr_mask + 1, vaddr, ret);
2492 ret = vfio_dma_unmap(container, iotlb->iova, iotlb->addr_mask + 1);
2494 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
2495 "0x%"HWADDR_PRIx") = %d (%m)",
2496 container, iotlb->iova,
2497 iotlb->addr_mask + 1, ret);
2502 static void vfio_listener_region_add(MemoryListener *listener,
2503 MemoryRegionSection *section)
2505 VFIOContainer *container = container_of(listener, VFIOContainer,
2506 iommu_data.type1.listener);
2512 if (vfio_listener_skipped_section(section)) {
2513 trace_vfio_listener_region_add_skip(
2514 section->offset_within_address_space,
2515 section->offset_within_address_space +
2516 int128_get64(int128_sub(section->size, int128_one())));
2520 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
2521 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
2522 error_report("%s received unaligned region", __func__);
2526 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
2527 llend = int128_make64(section->offset_within_address_space);
2528 llend = int128_add(llend, section->size);
2529 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
2531 if (int128_ge(int128_make64(iova), llend)) {
2535 memory_region_ref(section->mr);
2537 if (memory_region_is_iommu(section->mr)) {
2538 VFIOGuestIOMMU *giommu;
2540 trace_vfio_listener_region_add_iommu(iova,
2541 int128_get64(int128_sub(llend, int128_one())));
2543 * FIXME: We should do some checking to see if the
2544 * capabilities of the host VFIO IOMMU are adequate to model
2547 * FIXME: For VFIO iommu types which have KVM acceleration to
2548 * avoid bouncing all map/unmaps through qemu this way, this
2549 * would be the right place to wire that up (tell the KVM
2550 * device emulation the VFIO iommu handles to use).
2553 * This assumes that the guest IOMMU is empty of
2554 * mappings at this point.
2556 * One way of doing this is:
2557 * 1. Avoid sharing IOMMUs between emulated devices or different
2559 * 2. Implement VFIO_IOMMU_ENABLE in the host kernel to fail if
2560 * there are some mappings in IOMMU.
2562 * VFIO on SPAPR does that. Other IOMMU models may do that different,
2563 * they must make sure there are no existing mappings or
2564 * loop through existing mappings to map them into VFIO.
2566 giommu = g_malloc0(sizeof(*giommu));
2567 giommu->iommu = section->mr;
2568 giommu->container = container;
2569 giommu->n.notify = vfio_iommu_map_notify;
2570 QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
2571 memory_region_register_iommu_notifier(giommu->iommu, &giommu->n);
2576 /* Here we assume that memory_region_is_ram(section->mr)==true */
2578 end = int128_get64(llend);
2579 vaddr = memory_region_get_ram_ptr(section->mr) +
2580 section->offset_within_region +
2581 (iova - section->offset_within_address_space);
2583 trace_vfio_listener_region_add_ram(iova, end - 1, vaddr);
2585 ret = vfio_dma_map(container, iova, end - iova, vaddr, section->readonly);
2587 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
2588 "0x%"HWADDR_PRIx", %p) = %d (%m)",
2589 container, iova, end - iova, vaddr, ret);
2592 * On the initfn path, store the first error in the container so we
2593 * can gracefully fail. Runtime, there's not much we can do other
2594 * than throw a hardware error.
2596 if (!container->iommu_data.type1.initialized) {
2597 if (!container->iommu_data.type1.error) {
2598 container->iommu_data.type1.error = ret;
2601 hw_error("vfio: DMA mapping failed, unable to continue");
2606 static void vfio_listener_region_del(MemoryListener *listener,
2607 MemoryRegionSection *section)
2609 VFIOContainer *container = container_of(listener, VFIOContainer,
2610 iommu_data.type1.listener);
2614 if (vfio_listener_skipped_section(section)) {
2615 trace_vfio_listener_region_del_skip(
2616 section->offset_within_address_space,
2617 section->offset_within_address_space +
2618 int128_get64(int128_sub(section->size, int128_one())));
2622 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
2623 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
2624 error_report("%s received unaligned region", __func__);
2628 if (memory_region_is_iommu(section->mr)) {
2629 VFIOGuestIOMMU *giommu;
2631 QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
2632 if (giommu->iommu == section->mr) {
2633 memory_region_unregister_iommu_notifier(&giommu->n);
2634 QLIST_REMOVE(giommu, giommu_next);
2641 * FIXME: We assume the one big unmap below is adequate to
2642 * remove any individual page mappings in the IOMMU which
2643 * might have been copied into VFIO. This works for a page table
2644 * based IOMMU where a big unmap flattens a large range of IO-PTEs.
2645 * That may not be true for all IOMMU types.
2649 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
2650 end = (section->offset_within_address_space + int128_get64(section->size)) &
2657 trace_vfio_listener_region_del(iova, end - 1);
2659 ret = vfio_dma_unmap(container, iova, end - iova);
2660 memory_region_unref(section->mr);
2662 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
2663 "0x%"HWADDR_PRIx") = %d (%m)",
2664 container, iova, end - iova, ret);
2668 static MemoryListener vfio_memory_listener = {
2669 .region_add = vfio_listener_region_add,
2670 .region_del = vfio_listener_region_del,
2673 static void vfio_listener_release(VFIOContainer *container)
2675 memory_listener_unregister(&container->iommu_data.type1.listener);
2681 static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
2683 switch (vdev->interrupt) {
2685 vfio_disable_intx(vdev);
2688 vfio_disable_msi(vdev);
2691 vfio_disable_msix(vdev);
2696 static int vfio_setup_msi(VFIOPCIDevice *vdev, int pos)
2699 bool msi_64bit, msi_maskbit;
2702 if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
2703 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
2706 ctrl = le16_to_cpu(ctrl);
2708 msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
2709 msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
2710 entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
2712 trace_vfio_setup_msi(vdev->vbasedev.name, pos);
2714 ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit);
2716 if (ret == -ENOTSUP) {
2719 error_report("vfio: msi_init failed");
2722 vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
2728 * We don't have any control over how pci_add_capability() inserts
2729 * capabilities into the chain. In order to setup MSI-X we need a
2730 * MemoryRegion for the BAR. In order to setup the BAR and not
2731 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
2732 * need to first look for where the MSI-X table lives. So we
2733 * unfortunately split MSI-X setup across two functions.
2735 static int vfio_early_setup_msix(VFIOPCIDevice *vdev)
2739 uint32_t table, pba;
2740 int fd = vdev->vbasedev.fd;
2742 pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
2747 if (pread(fd, &ctrl, sizeof(ctrl),
2748 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
2752 if (pread(fd, &table, sizeof(table),
2753 vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
2757 if (pread(fd, &pba, sizeof(pba),
2758 vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
2762 ctrl = le16_to_cpu(ctrl);
2763 table = le32_to_cpu(table);
2764 pba = le32_to_cpu(pba);
2766 vdev->msix = g_malloc0(sizeof(*(vdev->msix)));
2767 vdev->msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
2768 vdev->msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
2769 vdev->msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
2770 vdev->msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
2771 vdev->msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
2773 trace_vfio_early_setup_msix(vdev->vbasedev.name, pos,
2774 vdev->msix->table_bar,
2775 vdev->msix->table_offset,
2776 vdev->msix->entries);
2781 static int vfio_setup_msix(VFIOPCIDevice *vdev, int pos)
2785 ret = msix_init(&vdev->pdev, vdev->msix->entries,
2786 &vdev->bars[vdev->msix->table_bar].region.mem,
2787 vdev->msix->table_bar, vdev->msix->table_offset,
2788 &vdev->bars[vdev->msix->pba_bar].region.mem,
2789 vdev->msix->pba_bar, vdev->msix->pba_offset, pos);
2791 if (ret == -ENOTSUP) {
2794 error_report("vfio: msix_init failed");
2801 static void vfio_teardown_msi(VFIOPCIDevice *vdev)
2803 msi_uninit(&vdev->pdev);
2806 msix_uninit(&vdev->pdev,
2807 &vdev->bars[vdev->msix->table_bar].region.mem,
2808 &vdev->bars[vdev->msix->pba_bar].region.mem);
2815 static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
2819 for (i = 0; i < PCI_ROM_SLOT; i++) {
2820 VFIOBAR *bar = &vdev->bars[i];
2822 if (!bar->region.size) {
2826 memory_region_set_enabled(&bar->region.mmap_mem, enabled);
2827 if (vdev->msix && vdev->msix->table_bar == i) {
2828 memory_region_set_enabled(&vdev->msix->mmap_mem, enabled);
2833 static void vfio_unmap_bar(VFIOPCIDevice *vdev, int nr)
2835 VFIOBAR *bar = &vdev->bars[nr];
2837 if (!bar->region.size) {
2841 vfio_bar_quirk_teardown(vdev, nr);
2843 memory_region_del_subregion(&bar->region.mem, &bar->region.mmap_mem);
2844 munmap(bar->region.mmap, memory_region_size(&bar->region.mmap_mem));
2846 if (vdev->msix && vdev->msix->table_bar == nr) {
2847 memory_region_del_subregion(&bar->region.mem, &vdev->msix->mmap_mem);
2848 munmap(vdev->msix->mmap, memory_region_size(&vdev->msix->mmap_mem));
2852 static int vfio_mmap_region(Object *obj, VFIORegion *region,
2853 MemoryRegion *mem, MemoryRegion *submem,
2854 void **map, size_t size, off_t offset,
2858 VFIODevice *vbasedev = region->vbasedev;
2860 if (VFIO_ALLOW_MMAP && size && region->flags &
2861 VFIO_REGION_INFO_FLAG_MMAP) {
2864 if (region->flags & VFIO_REGION_INFO_FLAG_READ) {
2868 if (region->flags & VFIO_REGION_INFO_FLAG_WRITE) {
2872 *map = mmap(NULL, size, prot, MAP_SHARED,
2873 vbasedev->fd, region->fd_offset + offset);
2874 if (*map == MAP_FAILED) {
2880 memory_region_init_ram_ptr(submem, obj, name, size, *map);
2881 memory_region_set_skip_dump(submem);
2884 /* Create a zero sized sub-region to make cleanup easy. */
2885 memory_region_init(submem, obj, name, 0);
2888 memory_region_add_subregion(mem, offset, submem);
2893 static void vfio_map_bar(VFIOPCIDevice *vdev, int nr)
2895 VFIOBAR *bar = &vdev->bars[nr];
2896 unsigned size = bar->region.size;
2902 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
2907 snprintf(name, sizeof(name), "VFIO %04x:%02x:%02x.%x BAR %d",
2908 vdev->host.domain, vdev->host.bus, vdev->host.slot,
2909 vdev->host.function, nr);
2911 /* Determine what type of BAR this is for registration */
2912 ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
2913 vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
2914 if (ret != sizeof(pci_bar)) {
2915 error_report("vfio: Failed to read BAR %d (%m)", nr);
2919 pci_bar = le32_to_cpu(pci_bar);
2920 bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
2921 bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
2922 type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
2923 ~PCI_BASE_ADDRESS_MEM_MASK);
2925 /* A "slow" read/write mapping underlies all BARs */
2926 memory_region_init_io(&bar->region.mem, OBJECT(vdev), &vfio_region_ops,
2928 pci_register_bar(&vdev->pdev, nr, type, &bar->region.mem);
2931 * We can't mmap areas overlapping the MSIX vector table, so we
2932 * potentially insert a direct-mapped subregion before and after it.
2934 if (vdev->msix && vdev->msix->table_bar == nr) {
2935 size = vdev->msix->table_offset & qemu_host_page_mask;
2938 strncat(name, " mmap", sizeof(name) - strlen(name) - 1);
2939 if (vfio_mmap_region(OBJECT(vdev), &bar->region, &bar->region.mem,
2940 &bar->region.mmap_mem, &bar->region.mmap,
2942 error_report("%s unsupported. Performance may be slow", name);
2945 if (vdev->msix && vdev->msix->table_bar == nr) {
2948 start = HOST_PAGE_ALIGN(vdev->msix->table_offset +
2949 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
2951 size = start < bar->region.size ? bar->region.size - start : 0;
2952 strncat(name, " msix-hi", sizeof(name) - strlen(name) - 1);
2953 /* VFIOMSIXInfo contains another MemoryRegion for this mapping */
2954 if (vfio_mmap_region(OBJECT(vdev), &bar->region, &bar->region.mem,
2955 &vdev->msix->mmap_mem,
2956 &vdev->msix->mmap, size, start, name)) {
2957 error_report("%s unsupported. Performance may be slow", name);
2961 vfio_bar_quirk_setup(vdev, nr);
2964 static void vfio_map_bars(VFIOPCIDevice *vdev)
2968 for (i = 0; i < PCI_ROM_SLOT; i++) {
2969 vfio_map_bar(vdev, i);
2972 if (vdev->has_vga) {
2973 memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem,
2974 OBJECT(vdev), &vfio_vga_ops,
2975 &vdev->vga.region[QEMU_PCI_VGA_MEM],
2976 "vfio-vga-mmio@0xa0000",
2977 QEMU_PCI_VGA_MEM_SIZE);
2978 memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem,
2979 OBJECT(vdev), &vfio_vga_ops,
2980 &vdev->vga.region[QEMU_PCI_VGA_IO_LO],
2981 "vfio-vga-io@0x3b0",
2982 QEMU_PCI_VGA_IO_LO_SIZE);
2983 memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem,
2984 OBJECT(vdev), &vfio_vga_ops,
2985 &vdev->vga.region[QEMU_PCI_VGA_IO_HI],
2986 "vfio-vga-io@0x3c0",
2987 QEMU_PCI_VGA_IO_HI_SIZE);
2989 pci_register_vga(&vdev->pdev, &vdev->vga.region[QEMU_PCI_VGA_MEM].mem,
2990 &vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem,
2991 &vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem);
2992 vfio_vga_quirk_setup(vdev);
2996 static void vfio_unmap_bars(VFIOPCIDevice *vdev)
3000 for (i = 0; i < PCI_ROM_SLOT; i++) {
3001 vfio_unmap_bar(vdev, i);
3004 if (vdev->has_vga) {
3005 vfio_vga_quirk_teardown(vdev);
3006 pci_unregister_vga(&vdev->pdev);
3013 static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
3015 uint8_t tmp, next = 0xff;
3017 for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
3018 tmp = pdev->config[tmp + 1]) {
3019 if (tmp > pos && tmp < next) {
3027 static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
3029 pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
3032 static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
3033 uint16_t val, uint16_t mask)
3035 vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
3036 vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
3037 vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
3040 static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
3042 pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
3045 static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
3046 uint32_t val, uint32_t mask)
3048 vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
3049 vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
3050 vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
3053 static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size)
3058 flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
3059 type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
3061 if (type != PCI_EXP_TYPE_ENDPOINT &&
3062 type != PCI_EXP_TYPE_LEG_END &&
3063 type != PCI_EXP_TYPE_RC_END) {
3065 error_report("vfio: Assignment of PCIe type 0x%x "
3066 "devices is not currently supported", type);
3070 if (!pci_bus_is_express(vdev->pdev.bus)) {
3072 * Use express capability as-is on PCI bus. It doesn't make much
3073 * sense to even expose, but some drivers (ex. tg3) depend on it
3074 * and guests don't seem to be particular about it. We'll need
3075 * to revist this or force express devices to express buses if we
3076 * ever expose an IOMMU to the guest.
3078 } else if (pci_bus_is_root(vdev->pdev.bus)) {
3080 * On a Root Complex bus Endpoints become Root Complex Integrated
3081 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
3083 if (type == PCI_EXP_TYPE_ENDPOINT) {
3084 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
3085 PCI_EXP_TYPE_RC_END << 4,
3086 PCI_EXP_FLAGS_TYPE);
3088 /* Link Capabilities, Status, and Control goes away */
3089 if (size > PCI_EXP_LNKCTL) {
3090 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
3091 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
3092 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
3094 #ifndef PCI_EXP_LNKCAP2
3095 #define PCI_EXP_LNKCAP2 44
3097 #ifndef PCI_EXP_LNKSTA2
3098 #define PCI_EXP_LNKSTA2 50
3100 /* Link 2 Capabilities, Status, and Control goes away */
3101 if (size > PCI_EXP_LNKCAP2) {
3102 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
3103 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
3104 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
3108 } else if (type == PCI_EXP_TYPE_LEG_END) {
3110 * Legacy endpoints don't belong on the root complex. Windows
3111 * seems to be happier with devices if we skip the capability.
3118 * Convert Root Complex Integrated Endpoints to regular endpoints.
3119 * These devices don't support LNK/LNK2 capabilities, so make them up.
3121 if (type == PCI_EXP_TYPE_RC_END) {
3122 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
3123 PCI_EXP_TYPE_ENDPOINT << 4,
3124 PCI_EXP_FLAGS_TYPE);
3125 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
3126 PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
3127 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
3130 /* Mark the Link Status bits as emulated to allow virtual negotiation */
3131 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA,
3132 pci_get_word(vdev->pdev.config + pos +
3134 PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
3137 pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size);
3139 vdev->pdev.exp.exp_cap = pos;
3145 static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
3147 uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);
3149 if (cap & PCI_EXP_DEVCAP_FLR) {
3150 trace_vfio_check_pcie_flr(vdev->vbasedev.name);
3151 vdev->has_flr = true;
3155 static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
3157 uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);
3159 if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
3160 trace_vfio_check_pm_reset(vdev->vbasedev.name);
3161 vdev->has_pm_reset = true;
3165 static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
3167 uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);
3169 if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
3170 trace_vfio_check_af_flr(vdev->vbasedev.name);
3171 vdev->has_flr = true;
3175 static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos)
3177 PCIDevice *pdev = &vdev->pdev;
3178 uint8_t cap_id, next, size;
3181 cap_id = pdev->config[pos];
3182 next = pdev->config[pos + 1];
3185 * If it becomes important to configure capabilities to their actual
3186 * size, use this as the default when it's something we don't recognize.
3187 * Since QEMU doesn't actually handle many of the config accesses,
3188 * exact size doesn't seem worthwhile.
3190 size = vfio_std_cap_max_size(pdev, pos);
3193 * pci_add_capability always inserts the new capability at the head
3194 * of the chain. Therefore to end up with a chain that matches the
3195 * physical device, we insert from the end by making this recursive.
3196 * This is also why we pre-caclulate size above as cached config space
3197 * will be changed as we unwind the stack.
3200 ret = vfio_add_std_cap(vdev, next);
3205 /* Begin the rebuild, use QEMU emulated list bits */
3206 pdev->config[PCI_CAPABILITY_LIST] = 0;
3207 vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
3208 vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
3211 /* Use emulated next pointer to allow dropping caps */
3212 pci_set_byte(vdev->emulated_config_bits + pos + 1, 0xff);
3215 case PCI_CAP_ID_MSI:
3216 ret = vfio_setup_msi(vdev, pos);
3218 case PCI_CAP_ID_EXP:
3219 vfio_check_pcie_flr(vdev, pos);
3220 ret = vfio_setup_pcie_cap(vdev, pos, size);
3222 case PCI_CAP_ID_MSIX:
3223 ret = vfio_setup_msix(vdev, pos);
3226 vfio_check_pm_reset(vdev, pos);
3228 ret = pci_add_capability(pdev, cap_id, pos, size);
3231 vfio_check_af_flr(vdev, pos);
3232 ret = pci_add_capability(pdev, cap_id, pos, size);
3235 ret = pci_add_capability(pdev, cap_id, pos, size);
3240 error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability "
3241 "0x%x[0x%x]@0x%x: %d", vdev->host.domain,
3242 vdev->host.bus, vdev->host.slot, vdev->host.function,
3243 cap_id, size, pos, ret);
3250 static int vfio_add_capabilities(VFIOPCIDevice *vdev)
3252 PCIDevice *pdev = &vdev->pdev;
3254 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
3255 !pdev->config[PCI_CAPABILITY_LIST]) {
3256 return 0; /* Nothing to add */
3259 return vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
3262 static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
3264 PCIDevice *pdev = &vdev->pdev;
3267 vfio_disable_interrupts(vdev);
3269 /* Make sure the device is in D0 */
3274 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
3275 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
3277 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3278 vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
3279 /* vfio handles the necessary delay here */
3280 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
3281 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
3283 error_report("vfio: Unable to power on device, stuck in D%d",
3290 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
3291 * Also put INTx Disable in known state.
3293 cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
3294 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
3295 PCI_COMMAND_INTX_DISABLE);
3296 vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
3299 static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
3301 vfio_enable_intx(vdev);
3304 static bool vfio_pci_host_match(PCIHostDeviceAddress *host1,
3305 PCIHostDeviceAddress *host2)
3307 return (host1->domain == host2->domain && host1->bus == host2->bus &&
3308 host1->slot == host2->slot && host1->function == host2->function);
3311 static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
3314 struct vfio_pci_hot_reset_info *info;
3315 struct vfio_pci_dependent_device *devices;
3316 struct vfio_pci_hot_reset *reset;
3321 trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
3323 vfio_pci_pre_reset(vdev);
3324 vdev->vbasedev.needs_reset = false;
3326 info = g_malloc0(sizeof(*info));
3327 info->argsz = sizeof(*info);
3329 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
3330 if (ret && errno != ENOSPC) {
3332 if (!vdev->has_pm_reset) {
3333 error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, "
3334 "no available reset mechanism.", vdev->host.domain,
3335 vdev->host.bus, vdev->host.slot, vdev->host.function);
3340 count = info->count;
3341 info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
3342 info->argsz = sizeof(*info) + (count * sizeof(*devices));
3343 devices = &info->devices[0];
3345 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
3348 error_report("vfio: hot reset info failed: %m");
3352 trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
3354 /* Verify that we have all the groups required */
3355 for (i = 0; i < info->count; i++) {
3356 PCIHostDeviceAddress host;
3358 VFIODevice *vbasedev_iter;
3360 host.domain = devices[i].segment;
3361 host.bus = devices[i].bus;
3362 host.slot = PCI_SLOT(devices[i].devfn);
3363 host.function = PCI_FUNC(devices[i].devfn);
3365 trace_vfio_pci_hot_reset_dep_devices(host.domain,
3366 host.bus, host.slot, host.function, devices[i].group_id);
3368 if (vfio_pci_host_match(&host, &vdev->host)) {
3372 QLIST_FOREACH(group, &vfio_group_list, next) {
3373 if (group->groupid == devices[i].group_id) {
3379 if (!vdev->has_pm_reset) {
3380 error_report("vfio: Cannot reset device %s, "
3381 "depends on group %d which is not owned.",
3382 vdev->vbasedev.name, devices[i].group_id);
3388 /* Prep dependent devices for reset and clear our marker. */
3389 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
3390 if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
3393 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
3394 if (vfio_pci_host_match(&host, &tmp->host)) {
3396 error_report("vfio: found another in-use device "
3397 "%s\n", vbasedev_iter->name);
3401 vfio_pci_pre_reset(tmp);
3402 tmp->vbasedev.needs_reset = false;
3409 if (!single && !multi) {
3410 error_report("vfio: No other in-use devices for multi hot reset\n");
3415 /* Determine how many group fds need to be passed */
3417 QLIST_FOREACH(group, &vfio_group_list, next) {
3418 for (i = 0; i < info->count; i++) {
3419 if (group->groupid == devices[i].group_id) {
3426 reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
3427 reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
3428 fds = &reset->group_fds[0];
3430 /* Fill in group fds */
3431 QLIST_FOREACH(group, &vfio_group_list, next) {
3432 for (i = 0; i < info->count; i++) {
3433 if (group->groupid == devices[i].group_id) {
3434 fds[reset->count++] = group->fd;
3441 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
3444 trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
3445 ret ? "%m" : "Success");
3448 /* Re-enable INTx on affected devices */
3449 for (i = 0; i < info->count; i++) {
3450 PCIHostDeviceAddress host;
3452 VFIODevice *vbasedev_iter;
3454 host.domain = devices[i].segment;
3455 host.bus = devices[i].bus;
3456 host.slot = PCI_SLOT(devices[i].devfn);
3457 host.function = PCI_FUNC(devices[i].devfn);
3459 if (vfio_pci_host_match(&host, &vdev->host)) {
3463 QLIST_FOREACH(group, &vfio_group_list, next) {
3464 if (group->groupid == devices[i].group_id) {
3473 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
3474 if (vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
3477 tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
3478 if (vfio_pci_host_match(&host, &tmp->host)) {
3479 vfio_pci_post_reset(tmp);
3485 vfio_pci_post_reset(vdev);
3492 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
3493 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
3494 * of doing hot resets when there is only a single device per bus. The in-use
3495 * here refers to how many VFIODevices are affected. A hot reset that affects
3496 * multiple devices, but only a single in-use device, means that we can call
3497 * it from our bus ->reset() callback since the extent is effectively a single
3498 * device. This allows us to make use of it in the hotplug path. When there
3499 * are multiple in-use devices, we can only trigger the hot reset during a
3500 * system reset and thus from our reset handler. We separate _one vs _multi
3501 * here so that we don't overlap and do a double reset on the system reset
3502 * path where both our reset handler and ->reset() callback are used. Calling
3503 * _one() will only do a hot reset for the one in-use devices case, calling
3504 * _multi() will do nothing if a _one() would have been sufficient.
3506 static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
3508 return vfio_pci_hot_reset(vdev, true);
3511 static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
3513 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
3514 return vfio_pci_hot_reset(vdev, false);
3517 static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
3519 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
3520 if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
3521 vbasedev->needs_reset = true;
3525 static VFIODeviceOps vfio_pci_ops = {
3526 .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
3527 .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
3528 .vfio_eoi = vfio_eoi,
3529 .vfio_populate_device = vfio_populate_device,
3532 static void vfio_reset_handler(void *opaque)
3535 VFIODevice *vbasedev;
3537 QLIST_FOREACH(group, &vfio_group_list, next) {
3538 QLIST_FOREACH(vbasedev, &group->device_list, next) {
3539 vbasedev->ops->vfio_compute_needs_reset(vbasedev);
3543 QLIST_FOREACH(group, &vfio_group_list, next) {
3544 QLIST_FOREACH(vbasedev, &group->device_list, next) {
3545 if (vbasedev->needs_reset) {
3546 vbasedev->ops->vfio_hot_reset_multi(vbasedev);
3552 static void vfio_kvm_device_add_group(VFIOGroup *group)
3555 struct kvm_device_attr attr = {
3556 .group = KVM_DEV_VFIO_GROUP,
3557 .attr = KVM_DEV_VFIO_GROUP_ADD,
3558 .addr = (uint64_t)(unsigned long)&group->fd,
3561 if (!kvm_enabled()) {
3565 if (vfio_kvm_device_fd < 0) {
3566 struct kvm_create_device cd = {
3567 .type = KVM_DEV_TYPE_VFIO,
3570 if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
3571 error_report("KVM_CREATE_DEVICE: %m\n");
3575 vfio_kvm_device_fd = cd.fd;
3578 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
3579 error_report("Failed to add group %d to KVM VFIO device: %m",
3585 static void vfio_kvm_device_del_group(VFIOGroup *group)
3588 struct kvm_device_attr attr = {
3589 .group = KVM_DEV_VFIO_GROUP,
3590 .attr = KVM_DEV_VFIO_GROUP_DEL,
3591 .addr = (uint64_t)(unsigned long)&group->fd,
3594 if (vfio_kvm_device_fd < 0) {
3598 if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
3599 error_report("Failed to remove group %d from KVM VFIO device: %m",
3605 static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
3607 VFIOAddressSpace *space;
3609 QLIST_FOREACH(space, &vfio_address_spaces, list) {
3610 if (space->as == as) {
3615 /* No suitable VFIOAddressSpace, create a new one */
3616 space = g_malloc0(sizeof(*space));
3618 QLIST_INIT(&space->containers);
3620 QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
3625 static void vfio_put_address_space(VFIOAddressSpace *space)
3627 if (QLIST_EMPTY(&space->containers)) {
3628 QLIST_REMOVE(space, list);
3633 static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
3635 VFIOContainer *container;
3637 VFIOAddressSpace *space;
3639 space = vfio_get_address_space(as);
3641 QLIST_FOREACH(container, &space->containers, next) {
3642 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
3643 group->container = container;
3644 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
3649 fd = qemu_open("/dev/vfio/vfio", O_RDWR);
3651 error_report("vfio: failed to open /dev/vfio/vfio: %m");
3653 goto put_space_exit;
3656 ret = ioctl(fd, VFIO_GET_API_VERSION);
3657 if (ret != VFIO_API_VERSION) {
3658 error_report("vfio: supported vfio version: %d, "
3659 "reported version: %d", VFIO_API_VERSION, ret);
3664 container = g_malloc0(sizeof(*container));
3665 container->space = space;
3668 if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
3669 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
3671 error_report("vfio: failed to set group container: %m");
3673 goto free_container_exit;
3676 ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
3678 error_report("vfio: failed to set iommu for container: %m");
3680 goto free_container_exit;
3683 container->iommu_data.type1.listener = vfio_memory_listener;
3684 container->iommu_data.release = vfio_listener_release;
3686 memory_listener_register(&container->iommu_data.type1.listener,
3687 container->space->as);
3689 if (container->iommu_data.type1.error) {
3690 ret = container->iommu_data.type1.error;
3691 error_report("vfio: memory listener initialization failed for container");
3692 goto listener_release_exit;
3695 container->iommu_data.type1.initialized = true;
3697 } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU)) {
3698 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
3700 error_report("vfio: failed to set group container: %m");
3702 goto free_container_exit;
3705 ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_SPAPR_TCE_IOMMU);
3707 error_report("vfio: failed to set iommu for container: %m");
3709 goto free_container_exit;
3713 * The host kernel code implementing VFIO_IOMMU_DISABLE is called
3714 * when container fd is closed so we do not call it explicitly
3717 ret = ioctl(fd, VFIO_IOMMU_ENABLE);
3719 error_report("vfio: failed to enable container: %m");
3721 goto free_container_exit;
3724 container->iommu_data.type1.listener = vfio_memory_listener;
3725 container->iommu_data.release = vfio_listener_release;
3727 memory_listener_register(&container->iommu_data.type1.listener,
3728 container->space->as);
3731 error_report("vfio: No available IOMMU models");
3733 goto free_container_exit;
3736 QLIST_INIT(&container->group_list);
3737 QLIST_INSERT_HEAD(&space->containers, container, next);
3739 group->container = container;
3740 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
3744 listener_release_exit:
3745 vfio_listener_release(container);
3747 free_container_exit:
3754 vfio_put_address_space(space);
3759 static void vfio_disconnect_container(VFIOGroup *group)
3761 VFIOContainer *container = group->container;
3763 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
3764 error_report("vfio: error disconnecting group %d from container",
3768 QLIST_REMOVE(group, container_next);
3769 group->container = NULL;
3771 if (QLIST_EMPTY(&container->group_list)) {
3772 VFIOAddressSpace *space = container->space;
3774 if (container->iommu_data.release) {
3775 container->iommu_data.release(container);
3777 QLIST_REMOVE(container, next);
3778 trace_vfio_disconnect_container(container->fd);
3779 close(container->fd);
3782 vfio_put_address_space(space);
3786 static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as)
3790 struct vfio_group_status status = { .argsz = sizeof(status) };
3792 QLIST_FOREACH(group, &vfio_group_list, next) {
3793 if (group->groupid == groupid) {
3794 /* Found it. Now is it already in the right context? */
3795 if (group->container->space->as == as) {
3798 error_report("vfio: group %d used in multiple address spaces",
3805 group = g_malloc0(sizeof(*group));
3807 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
3808 group->fd = qemu_open(path, O_RDWR);
3809 if (group->fd < 0) {
3810 error_report("vfio: error opening %s: %m", path);
3811 goto free_group_exit;
3814 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
3815 error_report("vfio: error getting group status: %m");
3819 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
3820 error_report("vfio: error, group %d is not viable, please ensure "
3821 "all devices within the iommu_group are bound to their "
3822 "vfio bus driver.", groupid);
3826 group->groupid = groupid;
3827 QLIST_INIT(&group->device_list);
3829 if (vfio_connect_container(group, as)) {
3830 error_report("vfio: failed to setup container for group %d", groupid);
3834 if (QLIST_EMPTY(&vfio_group_list)) {
3835 qemu_register_reset(vfio_reset_handler, NULL);
3838 QLIST_INSERT_HEAD(&vfio_group_list, group, next);
3840 vfio_kvm_device_add_group(group);
3853 static void vfio_put_group(VFIOGroup *group)
3855 if (!QLIST_EMPTY(&group->device_list)) {
3859 vfio_kvm_device_del_group(group);
3860 vfio_disconnect_container(group);
3861 QLIST_REMOVE(group, next);
3862 trace_vfio_put_group(group->fd);
3866 if (QLIST_EMPTY(&vfio_group_list)) {
3867 qemu_unregister_reset(vfio_reset_handler, NULL);
3871 static int vfio_populate_device(VFIODevice *vbasedev)
3873 VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
3874 struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
3875 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
3878 /* Sanity check device */
3879 if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
3880 error_report("vfio: Um, this isn't a PCI device");
3884 if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
3885 error_report("vfio: unexpected number of io regions %u",
3886 vbasedev->num_regions);
3890 if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
3891 error_report("vfio: unexpected number of irqs %u", vbasedev->num_irqs);
3895 for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
3898 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info);
3900 error_report("vfio: Error getting region %d info: %m", i);
3904 trace_vfio_populate_device_region(vbasedev->name, i,
3905 (unsigned long)reg_info.size,
3906 (unsigned long)reg_info.offset,
3907 (unsigned long)reg_info.flags);
3909 vdev->bars[i].region.vbasedev = vbasedev;
3910 vdev->bars[i].region.flags = reg_info.flags;
3911 vdev->bars[i].region.size = reg_info.size;
3912 vdev->bars[i].region.fd_offset = reg_info.offset;
3913 vdev->bars[i].region.nr = i;
3914 QLIST_INIT(&vdev->bars[i].quirks);
3917 reg_info.index = VFIO_PCI_CONFIG_REGION_INDEX;
3919 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, ®_info);
3921 error_report("vfio: Error getting config info: %m");
3925 trace_vfio_populate_device_config(vdev->vbasedev.name,
3926 (unsigned long)reg_info.size,
3927 (unsigned long)reg_info.offset,
3928 (unsigned long)reg_info.flags);
3930 vdev->config_size = reg_info.size;
3931 if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
3932 vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
3934 vdev->config_offset = reg_info.offset;
3936 if ((vdev->features & VFIO_FEATURE_ENABLE_VGA) &&
3937 vbasedev->num_regions > VFIO_PCI_VGA_REGION_INDEX) {
3938 struct vfio_region_info vga_info = {
3939 .argsz = sizeof(vga_info),
3940 .index = VFIO_PCI_VGA_REGION_INDEX,
3943 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_REGION_INFO, &vga_info);
3946 "vfio: Device does not support requested feature x-vga");
3950 if (!(vga_info.flags & VFIO_REGION_INFO_FLAG_READ) ||
3951 !(vga_info.flags & VFIO_REGION_INFO_FLAG_WRITE) ||
3952 vga_info.size < 0xbffff + 1) {
3953 error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
3954 (unsigned long)vga_info.flags,
3955 (unsigned long)vga_info.size);
3959 vdev->vga.fd_offset = vga_info.offset;
3960 vdev->vga.fd = vdev->vbasedev.fd;
3962 vdev->vga.region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
3963 vdev->vga.region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
3964 QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_MEM].quirks);
3966 vdev->vga.region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
3967 vdev->vga.region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
3968 QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].quirks);
3970 vdev->vga.region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
3971 vdev->vga.region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
3972 QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks);
3974 vdev->has_vga = true;
3976 irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
3978 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
3980 /* This can fail for an old kernel or legacy PCI dev */
3981 trace_vfio_populate_device_get_irq_info_failure();
3983 } else if (irq_info.count == 1) {
3984 vdev->pci_aer = true;
3986 error_report("vfio: %s "
3987 "Could not enable error recovery for the device",
3995 static int vfio_get_device(VFIOGroup *group, const char *name,
3996 VFIODevice *vbasedev)
3998 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
4001 ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
4003 error_report("vfio: error getting device %s from group %d: %m",
4004 name, group->groupid);
4005 error_printf("Verify all devices in group %d are bound to vfio-<bus> "
4006 "or pci-stub and not already in use\n", group->groupid);
4011 vbasedev->group = group;
4012 QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
4014 ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_INFO, &dev_info);
4016 error_report("vfio: error getting device info: %m");
4020 vbasedev->num_irqs = dev_info.num_irqs;
4021 vbasedev->num_regions = dev_info.num_regions;
4022 vbasedev->flags = dev_info.flags;
4024 trace_vfio_get_device(name, dev_info.flags,
4025 dev_info.num_regions, dev_info.num_irqs);
4027 vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
4029 ret = vbasedev->ops->vfio_populate_device(vbasedev);
4033 vfio_put_base_device(vbasedev);
4038 void vfio_put_base_device(VFIODevice *vbasedev)
4040 QLIST_REMOVE(vbasedev, next);
4041 vbasedev->group = NULL;
4042 trace_vfio_put_base_device(vbasedev->fd);
4043 close(vbasedev->fd);
4046 static void vfio_put_device(VFIOPCIDevice *vdev)
4048 g_free(vdev->vbasedev.name);
4053 vfio_put_base_device(&vdev->vbasedev);
4056 static void vfio_err_notifier_handler(void *opaque)
4058 VFIOPCIDevice *vdev = opaque;
4060 if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
4065 * TBD. Retrieve the error details and decide what action
4066 * needs to be taken. One of the actions could be to pass
4067 * the error to the guest and have the guest driver recover
4068 * from the error. This requires that PCIe capabilities be
4069 * exposed to the guest. For now, we just terminate the
4070 * guest to contain the error.
4073 error_report("%s(%04x:%02x:%02x.%x) Unrecoverable error detected. "
4074 "Please collect any data possible and then kill the guest",
4075 __func__, vdev->host.domain, vdev->host.bus,
4076 vdev->host.slot, vdev->host.function);
4078 vm_stop(RUN_STATE_INTERNAL_ERROR);
4082 * Registers error notifier for devices supporting error recovery.
4083 * If we encounter a failure in this function, we report an error
4084 * and continue after disabling error recovery support for the
4087 static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
4091 struct vfio_irq_set *irq_set;
4094 if (!vdev->pci_aer) {
4098 if (event_notifier_init(&vdev->err_notifier, 0)) {
4099 error_report("vfio: Unable to init event notifier for error detection");
4100 vdev->pci_aer = false;
4104 argsz = sizeof(*irq_set) + sizeof(*pfd);
4106 irq_set = g_malloc0(argsz);
4107 irq_set->argsz = argsz;
4108 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
4109 VFIO_IRQ_SET_ACTION_TRIGGER;
4110 irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
4113 pfd = (int32_t *)&irq_set->data;
4115 *pfd = event_notifier_get_fd(&vdev->err_notifier);
4116 qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev);
4118 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
4120 error_report("vfio: Failed to set up error notification");
4121 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
4122 event_notifier_cleanup(&vdev->err_notifier);
4123 vdev->pci_aer = false;
4128 static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
4131 struct vfio_irq_set *irq_set;
4135 if (!vdev->pci_aer) {
4139 argsz = sizeof(*irq_set) + sizeof(*pfd);
4141 irq_set = g_malloc0(argsz);
4142 irq_set->argsz = argsz;
4143 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
4144 VFIO_IRQ_SET_ACTION_TRIGGER;
4145 irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
4148 pfd = (int32_t *)&irq_set->data;
4151 ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
4153 error_report("vfio: Failed to de-assign error fd: %m");
4156 qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
4158 event_notifier_cleanup(&vdev->err_notifier);
4161 static int vfio_initfn(PCIDevice *pdev)
4163 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
4164 VFIODevice *vbasedev_iter;
4166 char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
4172 /* Check that the host device exists */
4173 snprintf(path, sizeof(path),
4174 "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
4175 vdev->host.domain, vdev->host.bus, vdev->host.slot,
4176 vdev->host.function);
4177 if (stat(path, &st) < 0) {
4178 error_report("vfio: error: no such host device: %s", path);
4182 vdev->vbasedev.ops = &vfio_pci_ops;
4184 vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;
4185 vdev->vbasedev.name = g_strdup_printf("%04x:%02x:%02x.%01x",
4186 vdev->host.domain, vdev->host.bus,
4187 vdev->host.slot, vdev->host.function);
4189 strncat(path, "iommu_group", sizeof(path) - strlen(path) - 1);
4191 len = readlink(path, iommu_group_path, sizeof(path));
4192 if (len <= 0 || len >= sizeof(path)) {
4193 error_report("vfio: error no iommu_group for device");
4194 return len < 0 ? -errno : ENAMETOOLONG;
4197 iommu_group_path[len] = 0;
4198 group_name = basename(iommu_group_path);
4200 if (sscanf(group_name, "%d", &groupid) != 1) {
4201 error_report("vfio: error reading %s: %m", path);
4205 trace_vfio_initfn(vdev->vbasedev.name, groupid);
4207 group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev));
4209 error_report("vfio: failed to get group %d", groupid);
4213 snprintf(path, sizeof(path), "%04x:%02x:%02x.%01x",
4214 vdev->host.domain, vdev->host.bus, vdev->host.slot,
4215 vdev->host.function);
4217 QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
4218 if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
4219 error_report("vfio: error: device %s is already attached", path);
4220 vfio_put_group(group);
4225 ret = vfio_get_device(group, path, &vdev->vbasedev);
4227 error_report("vfio: failed to get device %s", path);
4228 vfio_put_group(group);
4232 /* Get a copy of config space */
4233 ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
4234 MIN(pci_config_size(&vdev->pdev), vdev->config_size),
4235 vdev->config_offset);
4236 if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
4237 ret = ret < 0 ? -errno : -EFAULT;
4238 error_report("vfio: Failed to read device config space");
4242 /* vfio emulates a lot for us, but some bits need extra love */
4243 vdev->emulated_config_bits = g_malloc0(vdev->config_size);
4245 /* QEMU can choose to expose the ROM or not */
4246 memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
4248 /* QEMU can change multi-function devices to single function, or reverse */
4249 vdev->emulated_config_bits[PCI_HEADER_TYPE] =
4250 PCI_HEADER_TYPE_MULTI_FUNCTION;
4252 /* Restore or clear multifunction, this is always controlled by QEMU */
4253 if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
4254 vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
4256 vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
4260 * Clear host resource mapping info. If we choose not to register a
4261 * BAR, such as might be the case with the option ROM, we can get
4262 * confusing, unwritable, residual addresses from the host here.
4264 memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
4265 memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
4267 vfio_pci_size_rom(vdev);
4269 ret = vfio_early_setup_msix(vdev);
4274 vfio_map_bars(vdev);
4276 ret = vfio_add_capabilities(vdev);
4281 /* QEMU emulates all of MSI & MSIX */
4282 if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
4283 memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
4287 if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
4288 memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
4289 vdev->msi_cap_size);
4292 if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
4293 vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
4294 vfio_intx_mmap_enable, vdev);
4295 pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_update_irq);
4296 ret = vfio_enable_intx(vdev);
4302 vfio_register_err_notifier(vdev);
4307 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
4308 vfio_teardown_msi(vdev);
4309 vfio_unmap_bars(vdev);
4311 g_free(vdev->emulated_config_bits);
4312 vfio_put_device(vdev);
4313 vfio_put_group(group);
4317 static void vfio_exitfn(PCIDevice *pdev)
4319 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
4320 VFIOGroup *group = vdev->vbasedev.group;
4322 vfio_unregister_err_notifier(vdev);
4323 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
4324 vfio_disable_interrupts(vdev);
4325 if (vdev->intx.mmap_timer) {
4326 timer_free(vdev->intx.mmap_timer);
4328 vfio_teardown_msi(vdev);
4329 vfio_unmap_bars(vdev);
4330 g_free(vdev->emulated_config_bits);
4332 vfio_put_device(vdev);
4333 vfio_put_group(group);
4336 static void vfio_pci_reset(DeviceState *dev)
4338 PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev);
4339 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
4341 trace_vfio_pci_reset(vdev->vbasedev.name);
4343 vfio_pci_pre_reset(vdev);
4345 if (vdev->vbasedev.reset_works &&
4346 (vdev->has_flr || !vdev->has_pm_reset) &&
4347 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
4348 trace_vfio_pci_reset_flr(vdev->vbasedev.name);
4352 /* See if we can do our own bus reset */
4353 if (!vfio_pci_hot_reset_one(vdev)) {
4357 /* If nothing else works and the device supports PM reset, use it */
4358 if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
4359 !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
4360 trace_vfio_pci_reset_pm(vdev->vbasedev.name);
4365 vfio_pci_post_reset(vdev);
4368 static void vfio_instance_init(Object *obj)
4370 PCIDevice *pci_dev = PCI_DEVICE(obj);
4371 VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, PCI_DEVICE(obj));
4373 device_add_bootindex_property(obj, &vdev->bootindex,
4375 &pci_dev->qdev, NULL);
4378 static Property vfio_pci_dev_properties[] = {
4379 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
4380 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
4381 intx.mmap_timeout, 1100),
4382 DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
4383 VFIO_FEATURE_ENABLE_VGA_BIT, false),
4384 DEFINE_PROP_INT32("bootindex", VFIOPCIDevice, bootindex, -1),
4386 * TODO - support passed fds... is this necessary?
4387 * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
4388 * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
4390 DEFINE_PROP_END_OF_LIST(),
4393 static const VMStateDescription vfio_pci_vmstate = {
4398 static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
4400 DeviceClass *dc = DEVICE_CLASS(klass);
4401 PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
4403 dc->reset = vfio_pci_reset;
4404 dc->props = vfio_pci_dev_properties;
4405 dc->vmsd = &vfio_pci_vmstate;
4406 dc->desc = "VFIO-based PCI device assignment";
4407 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
4408 pdc->init = vfio_initfn;
4409 pdc->exit = vfio_exitfn;
4410 pdc->config_read = vfio_pci_read_config;
4411 pdc->config_write = vfio_pci_write_config;
4412 pdc->is_express = 1; /* We might be */
4415 static const TypeInfo vfio_pci_dev_info = {
4417 .parent = TYPE_PCI_DEVICE,
4418 .instance_size = sizeof(VFIOPCIDevice),
4419 .class_init = vfio_pci_dev_class_init,
4420 .instance_init = vfio_instance_init,
4423 static void register_vfio_pci_dev_type(void)
4425 type_register_static(&vfio_pci_dev_info);
4428 type_init(register_vfio_pci_dev_type)
4430 static int vfio_container_do_ioctl(AddressSpace *as, int32_t groupid,
4431 int req, void *param)
4434 VFIOContainer *container;
4437 group = vfio_get_group(groupid, as);
4439 error_report("vfio: group %d not registered", groupid);
4443 container = group->container;
4444 if (group->container) {
4445 ret = ioctl(container->fd, req, param);
4447 error_report("vfio: failed to ioctl container: ret=%d, %s",
4448 ret, strerror(errno));
4452 vfio_put_group(group);
4457 int vfio_container_ioctl(AddressSpace *as, int32_t groupid,
4458 int req, void *param)
4460 /* We allow only certain ioctls to the container */
4462 case VFIO_CHECK_EXTENSION:
4463 case VFIO_IOMMU_SPAPR_TCE_GET_INFO:
4466 /* Return an error on unknown requests */
4467 error_report("vfio: unsupported ioctl %X", req);
4471 return vfio_container_do_ioctl(as, groupid, req, param);