#include "exec-memory.h"
#include "kvm.h"
#include "memory.h"
-#include "msi.h"
-#include "msix.h"
-#include "pci.h"
+#include "pci/msi.h"
+#include "pci/msix.h"
+#include "pci/pci.h"
#include "qemu-common.h"
#include "qemu-error.h"
#include "qemu-queue.h"
ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
}
+#ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */
+static void vfio_mask_intx(VFIODevice *vdev)
+{
+ struct vfio_irq_set irq_set = {
+ .argsz = sizeof(irq_set),
+ .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
+ .index = VFIO_PCI_INTX_IRQ_INDEX,
+ .start = 0,
+ .count = 1,
+ };
+
+ ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
+}
+#endif
+
/*
* Disabling BAR mmaping can be slow, but toggling it around INTx can
* also be a huge overhead. We try to get the best of both worlds by
vfio_unmask_intx(vdev);
}
+static void vfio_enable_intx_kvm(VFIODevice *vdev)
+{
+#ifdef CONFIG_KVM
+ struct kvm_irqfd irqfd = {
+ .fd = event_notifier_get_fd(&vdev->intx.interrupt),
+ .gsi = vdev->intx.route.irq,
+ .flags = KVM_IRQFD_FLAG_RESAMPLE,
+ };
+ struct vfio_irq_set *irq_set;
+ int ret, argsz;
+ int32_t *pfd;
+
+ if (!kvm_irqfds_enabled() ||
+ vdev->intx.route.mode != PCI_INTX_ENABLED ||
+ !kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) {
+ return;
+ }
+
+ /* Get to a known interrupt state */
+ qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
+ vfio_mask_intx(vdev);
+ vdev->intx.pending = false;
+ qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
+
+ /* Get an eventfd for resample/unmask */
+ if (event_notifier_init(&vdev->intx.unmask, 0)) {
+ error_report("vfio: Error: event_notifier_init failed eoi\n");
+ goto fail;
+ }
+
+ /* KVM triggers it, VFIO listens for it */
+ irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
+
+ if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
+ error_report("vfio: Error: Failed to setup resample irqfd: %m\n");
+ goto fail_irqfd;
+ }
+
+ argsz = sizeof(*irq_set) + sizeof(*pfd);
+
+ irq_set = g_malloc0(argsz);
+ irq_set->argsz = argsz;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
+ irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
+ irq_set->start = 0;
+ irq_set->count = 1;
+ pfd = (int32_t *)&irq_set->data;
+
+ *pfd = irqfd.resamplefd;
+
+ ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ g_free(irq_set);
+ if (ret) {
+ error_report("vfio: Error: Failed to setup INTx unmask fd: %m\n");
+ goto fail_vfio;
+ }
+
+ /* Let'em rip */
+ vfio_unmask_intx(vdev);
+
+ vdev->intx.kvm_accel = true;
+
+ DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel enabled\n",
+ __func__, vdev->host.domain, vdev->host.bus,
+ vdev->host.slot, vdev->host.function);
+
+ return;
+
+fail_vfio:
+ irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
+ kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
+fail_irqfd:
+ event_notifier_cleanup(&vdev->intx.unmask);
+fail:
+ qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
+ vfio_unmask_intx(vdev);
+#endif
+}
+
+static void vfio_disable_intx_kvm(VFIODevice *vdev)
+{
+#ifdef CONFIG_KVM
+ struct kvm_irqfd irqfd = {
+ .fd = event_notifier_get_fd(&vdev->intx.interrupt),
+ .gsi = vdev->intx.route.irq,
+ .flags = KVM_IRQFD_FLAG_DEASSIGN,
+ };
+
+ if (!vdev->intx.kvm_accel) {
+ return;
+ }
+
+ /*
+ * Get to a known state, hardware masked, QEMU ready to accept new
+ * interrupts, QEMU IRQ de-asserted.
+ */
+ vfio_mask_intx(vdev);
+ vdev->intx.pending = false;
+ qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
+
+ /* Tell KVM to stop listening for an INTx irqfd */
+ if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
+ error_report("vfio: Error: Failed to disable INTx irqfd: %m\n");
+ }
+
+ /* We only need to close the eventfd for VFIO to cleanup the kernel side */
+ event_notifier_cleanup(&vdev->intx.unmask);
+
+ /* QEMU starts listening for interrupt events. */
+ qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
+
+ vdev->intx.kvm_accel = false;
+
+ /* If we've missed an event, let it re-fire through QEMU */
+ vfio_unmask_intx(vdev);
+
+ DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel disabled\n",
+ __func__, vdev->host.domain, vdev->host.bus,
+ vdev->host.slot, vdev->host.function);
+#endif
+}
+
+static void vfio_update_irq(PCIDevice *pdev)
+{
+ VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
+ PCIINTxRoute route;
+
+ if (vdev->interrupt != VFIO_INT_INTx) {
+ return;
+ }
+
+ route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
+
+ if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
+ return; /* Nothing changed */
+ }
+
+ DPRINTF("%s(%04x:%02x:%02x.%x) IRQ moved %d -> %d\n", __func__,
+ vdev->host.domain, vdev->host.bus, vdev->host.slot,
+ vdev->host.function, vdev->intx.route.irq, route.irq);
+
+ vfio_disable_intx_kvm(vdev);
+
+ vdev->intx.route = route;
+
+ if (route.mode != PCI_INTX_ENABLED) {
+ return;
+ }
+
+ vfio_enable_intx_kvm(vdev);
+
+ /* Re-enable the interrupt in cased we missed an EOI */
+ vfio_eoi(vdev);
+}
+
static int vfio_enable_intx(VFIODevice *vdev)
{
uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
vfio_disable_interrupts(vdev);
vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
+
+#ifdef CONFIG_KVM
+ /*
+ * Only conditional to avoid generating error messages on platforms
+ * where we won't actually use the result anyway.
+ */
+ if (kvm_irqfds_enabled() &&
+ kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) {
+ vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
+ vdev->intx.pin);
+ }
+#endif
+
ret = event_notifier_init(&vdev->intx.interrupt, 0);
if (ret) {
error_report("vfio: Error: event_notifier_init failed\n");
return -errno;
}
+ vfio_enable_intx_kvm(vdev);
+
vdev->interrupt = VFIO_INT_INTx;
DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
int fd;
qemu_del_timer(vdev->intx.mmap_timer);
+ vfio_disable_intx_kvm(vdev);
vfio_disable_irqindex(vdev, VFIO_PCI_INTX_IRQ_INDEX);
vdev->intx.pending = false;
qemu_set_irq(vdev->pdev.irq[vdev->intx.pin], 0);
vector->use = false;
}
-/* TODO This should move to msi.c */
-static MSIMessage msi_get_msg(PCIDevice *pdev, unsigned int vector)
-{
- uint16_t flags = pci_get_word(pdev->config + pdev->msi_cap + PCI_MSI_FLAGS);
- bool msi64bit = flags & PCI_MSI_FLAGS_64BIT;
- MSIMessage msg;
-
- if (msi64bit) {
- msg.address = pci_get_quad(pdev->config +
- pdev->msi_cap + PCI_MSI_ADDRESS_LO);
- } else {
- msg.address = pci_get_long(pdev->config +
- pdev->msi_cap + PCI_MSI_ADDRESS_LO);
- }
-
- msg.data = pci_get_word(pdev->config + pdev->msi_cap +
- (msi64bit ? PCI_MSI_DATA_64 : PCI_MSI_DATA_32));
- msg.data += vector;
-
- return msg;
-}
-
static void vfio_enable_msix(VFIODevice *vdev)
{
vfio_disable_interrupts(vdev);
error_report("vfio: Error: event_notifier_init failed\n");
}
- msg = msi_get_msg(&vdev->pdev, i);
+ msg = msi_get_message(&vdev->pdev, i);
/*
* Attempt to enable route through KVM irqchip,
vfio_disable_msi_common(vdev);
- DPRINTF("%s(%04x:%02x:%02x.%x, msi%s)\n", __func__,
- vdev->host.domain, vdev->host.bus, vdev->host.slot,
- vdev->host.function, msix ? "x" : "");
+ DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
+ vdev->host.bus, vdev->host.slot, vdev->host.function);
}
static void vfio_disable_msi(VFIODevice *vdev)
/*
* IO Port/MMIO - Beware of the endians, VFIO is always little endian
*/
-static void vfio_bar_write(void *opaque, target_phys_addr_t addr,
+static void vfio_bar_write(void *opaque, hwaddr addr,
uint64_t data, unsigned size)
{
VFIOBAR *bar = opaque;
}
if (pwrite(bar->fd, &buf, size, bar->fd_offset + addr) != size) {
- error_report("%s(,0x%"TARGET_PRIxPHYS", 0x%"PRIx64", %d) failed: %m\n",
+ error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m\n",
__func__, addr, data, size);
}
- DPRINTF("%s(BAR%d+0x%"TARGET_PRIxPHYS", 0x%"PRIx64", %d)\n",
+ DPRINTF("%s(BAR%d+0x%"HWADDR_PRIx", 0x%"PRIx64", %d)\n",
__func__, bar->nr, addr, data, size);
/*
}
static uint64_t vfio_bar_read(void *opaque,
- target_phys_addr_t addr, unsigned size)
+ hwaddr addr, unsigned size)
{
VFIOBAR *bar = opaque;
union {
uint64_t data = 0;
if (pread(bar->fd, &buf, size, bar->fd_offset + addr) != size) {
- error_report("%s(,0x%"TARGET_PRIxPHYS", %d) failed: %m\n",
+ error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m\n",
__func__, addr, size);
return (uint64_t)-1;
}
break;
}
- DPRINTF("%s(BAR%d+0x%"TARGET_PRIxPHYS", %d) = 0x%"PRIx64"\n",
+ DPRINTF("%s(BAR%d+0x%"HWADDR_PRIx", %d) = 0x%"PRIx64"\n",
__func__, bar->nr, addr, size, data);
/* Same as write above */
* DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
*/
static int vfio_dma_unmap(VFIOContainer *container,
- target_phys_addr_t iova, ram_addr_t size)
+ hwaddr iova, ram_addr_t size)
{
struct vfio_iommu_type1_dma_unmap unmap = {
.argsz = sizeof(unmap),
return 0;
}
-static int vfio_dma_map(VFIOContainer *container, target_phys_addr_t iova,
+static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
ram_addr_t size, void *vaddr, bool readonly)
{
struct vfio_iommu_type1_dma_map map = {
{
VFIOContainer *container = container_of(listener, VFIOContainer,
iommu_data.listener);
- target_phys_addr_t iova, end;
+ hwaddr iova, end;
void *vaddr;
int ret;
if (vfio_listener_skipped_section(section)) {
- DPRINTF("vfio: SKIPPING region_add %"TARGET_PRIxPHYS" - %"PRIx64"\n",
+ DPRINTF("vfio: SKIPPING region_add %"HWADDR_PRIx" - %"PRIx64"\n",
section->offset_within_address_space,
section->offset_within_address_space + section->size - 1);
return;
section->offset_within_region +
(iova - section->offset_within_address_space);
- DPRINTF("vfio: region_add %"TARGET_PRIxPHYS" - %"TARGET_PRIxPHYS" [%p]\n",
+ DPRINTF("vfio: region_add %"HWADDR_PRIx" - %"HWADDR_PRIx" [%p]\n",
iova, end - 1, vaddr);
ret = vfio_dma_map(container, iova, end - iova, vaddr, section->readonly);
if (ret) {
- error_report("vfio_dma_map(%p, 0x%"TARGET_PRIxPHYS", "
- "0x%"TARGET_PRIxPHYS", %p) = %d (%m)\n",
+ error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx", %p) = %d (%m)\n",
container, iova, end - iova, vaddr, ret);
}
}
{
VFIOContainer *container = container_of(listener, VFIOContainer,
iommu_data.listener);
- target_phys_addr_t iova, end;
+ hwaddr iova, end;
int ret;
if (vfio_listener_skipped_section(section)) {
- DPRINTF("vfio: SKIPPING region_del %"TARGET_PRIxPHYS" - %"PRIx64"\n",
+ DPRINTF("vfio: SKIPPING region_del %"HWADDR_PRIx" - %"PRIx64"\n",
section->offset_within_address_space,
section->offset_within_address_space + section->size - 1);
return;
return;
}
- DPRINTF("vfio: region_del %"TARGET_PRIxPHYS" - %"TARGET_PRIxPHYS"\n",
+ DPRINTF("vfio: region_del %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
iova, end - 1);
ret = vfio_dma_unmap(container, iova, end - iova);
if (ret) {
- error_report("vfio_dma_unmap(%p, 0x%"TARGET_PRIxPHYS", "
- "0x%"TARGET_PRIxPHYS") = %d (%m)\n",
+ error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
+ "0x%"HWADDR_PRIx") = %d (%m)\n",
container, iova, end - iova, ret);
}
}
container->iommu_data.listener = vfio_memory_listener;
container->iommu_data.release = vfio_listener_release;
- memory_listener_register(&container->iommu_data.listener,
- get_system_memory());
+ memory_listener_register(&container->iommu_data.listener, &address_space_memory);
} else {
error_report("vfio: No available IOMMU models\n");
g_free(container);
if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
vdev->intx.mmap_timer = qemu_new_timer_ms(vm_clock,
vfio_intx_mmap_enable, vdev);
+ pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_update_irq);
ret = vfio_enable_intx(vdev);
if (ret) {
goto out_teardown;
DEFINE_PROP_END_OF_LIST(),
};
+static const VMStateDescription vfio_pci_vmstate = {
+ .name = "vfio-pci",
+ .unmigratable = 1,
+};
static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
{
dc->reset = vfio_pci_reset;
dc->props = vfio_pci_dev_properties;
+ dc->vmsd = &vfio_pci_vmstate;
+ dc->desc = "VFIO-based PCI device assignment";
pdc->init = vfio_initfn;
pdc->exit = vfio_exitfn;
pdc->config_read = vfio_pci_read_config;