#include "cpu.h"
#include "hw/pci/pci.h"
+#include "hw/pci/pci_host.h"
#include "hw/i386/pc.h"
+#include "hw/irq.h"
+#include "hw/hw.h"
#include "hw/i386/apic-msidef.h"
#include "hw/xen/xen_common.h"
-#include "hw/xen/xen_backend.h"
+#include "hw/xen/xen-legacy-backend.h"
+#include "hw/xen/xen-bus.h"
#include "qapi/error.h"
#include "qapi/qapi-commands-misc.h"
#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
#include "qemu/range.h"
#include "sysemu/xen-mapcache.h"
#include "trace.h"
#include "exec/address-spaces.h"
#include <xen/hvm/ioreq.h>
-#include <xen/hvm/params.h>
#include <xen/hvm/e820.h>
//#define DEBUG_XEN_HVM
QLIST_ENTRY(XenPhysmap) list;
} XenPhysmap;
+static QLIST_HEAD(, XenPhysmap) xen_physmap;
+
+typedef struct XenPciDevice {
+ PCIDevice *pci_dev;
+ uint32_t sbdf;
+ QLIST_ENTRY(XenPciDevice) entry;
+} XenPciDevice;
+
typedef struct XenIOState {
ioservid_t ioservid;
shared_iopage_t *shared_page;
struct xs_handle *xenstore;
MemoryListener memory_listener;
MemoryListener io_listener;
+ QLIST_HEAD(, XenPciDevice) dev_list;
DeviceListener device_listener;
- QLIST_HEAD(, XenPhysmap) physmap;
hwaddr free_phys_offset;
const XenPhysmap *log_for_dirtybit;
+ /* Buffer used by xen_sync_dirty_bitmap */
+ unsigned long *dirty_bitmap;
Notifier exit;
Notifier suspend;
g_free(pfn_list);
}
-static XenPhysmap *get_physmapping(XenIOState *state,
- hwaddr start_addr, ram_addr_t size)
+static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size)
{
XenPhysmap *physmap = NULL;
start_addr &= TARGET_PAGE_MASK;
- QLIST_FOREACH(physmap, &state->physmap, list) {
+ QLIST_FOREACH(physmap, &xen_physmap, list) {
if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
return physmap;
}
return NULL;
}
-#ifdef XEN_COMPAT_PHYSMAP
-static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr,
- ram_addr_t size, void *opaque)
+static hwaddr xen_phys_offset_to_gaddr(hwaddr phys_offset, ram_addr_t size)
{
- hwaddr addr = start_addr & TARGET_PAGE_MASK;
- XenIOState *xen_io_state = opaque;
+ hwaddr addr = phys_offset & TARGET_PAGE_MASK;
XenPhysmap *physmap = NULL;
- QLIST_FOREACH(physmap, &xen_io_state->physmap, list) {
+ QLIST_FOREACH(physmap, &xen_physmap, list) {
if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
- return physmap->start_addr;
+ return physmap->start_addr + (phys_offset - physmap->phys_offset);
}
}
- return start_addr;
+ return phys_offset;
}
+#ifdef XEN_COMPAT_PHYSMAP
static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap)
{
char path[80], value[17];
hwaddr phys_offset = memory_region_get_ram_addr(mr);
const char *mr_name;
- if (get_physmapping(state, start_addr, size)) {
+ if (get_physmapping(start_addr, size)) {
return 0;
}
if (size <= 0) {
physmap->name = mr_name;
physmap->phys_offset = phys_offset;
- QLIST_INSERT_HEAD(&state->physmap, physmap, list);
+ QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
if (runstate_check(RUN_STATE_INMIGRATE)) {
/* Now when we have a physmap entry we can replace a dummy mapping with
XenPhysmap *physmap = NULL;
hwaddr phys_offset = 0;
- physmap = get_physmapping(state, start_addr, size);
+ physmap = get_physmapping(start_addr, size);
if (physmap == NULL) {
return -1;
}
QLIST_REMOVE(physmap, list);
if (state->log_for_dirtybit == physmap) {
state->log_for_dirtybit = NULL;
+ g_free(state->dirty_bitmap);
+ state->dirty_bitmap = NULL;
}
g_free(physmap);
}
static void xen_device_realize(DeviceListener *listener,
- DeviceState *dev)
+ DeviceState *dev)
{
XenIOState *state = container_of(listener, XenIOState, device_listener);
if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
PCIDevice *pci_dev = PCI_DEVICE(dev);
+ XenPciDevice *xendev = g_new(XenPciDevice, 1);
+
+ xendev->pci_dev = pci_dev;
+ xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev),
+ pci_dev->devfn);
+ QLIST_INSERT_HEAD(&state->dev_list, xendev, entry);
xen_map_pcidev(xen_domid, state->ioservid, pci_dev);
}
}
static void xen_device_unrealize(DeviceListener *listener,
- DeviceState *dev)
+ DeviceState *dev)
{
XenIOState *state = container_of(listener, XenIOState, device_listener);
if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
PCIDevice *pci_dev = PCI_DEVICE(dev);
+ XenPciDevice *xendev, *next;
xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev);
+
+ QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) {
+ if (xendev->pci_dev == pci_dev) {
+ QLIST_REMOVE(xendev, entry);
+ g_free(xendev);
+ break;
+ }
+ }
}
}
{
hwaddr npages = size >> TARGET_PAGE_BITS;
const int width = sizeof(unsigned long) * 8;
- unsigned long bitmap[DIV_ROUND_UP(npages, width)];
+ size_t bitmap_size = DIV_ROUND_UP(npages, width);
int rc, i, j;
const XenPhysmap *physmap = NULL;
- physmap = get_physmapping(state, start_addr, size);
+ physmap = get_physmapping(start_addr, size);
if (physmap == NULL) {
/* not handled */
return;
if (state->log_for_dirtybit == NULL) {
state->log_for_dirtybit = physmap;
+ state->dirty_bitmap = g_new(unsigned long, bitmap_size);
} else if (state->log_for_dirtybit != physmap) {
/* Only one range for dirty bitmap can be tracked. */
return;
}
rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS,
- npages, bitmap);
+ npages, state->dirty_bitmap);
if (rc < 0) {
#ifndef ENODATA
#define ENODATA ENOENT
return;
}
- for (i = 0; i < ARRAY_SIZE(bitmap); i++) {
- unsigned long map = bitmap[i];
+ for (i = 0; i < bitmap_size; i++) {
+ unsigned long map = state->dirty_bitmap[i];
while (map != 0) {
j = ctzl(map);
map &= ~(1ul << j);
if (old & ~new & (1 << DIRTY_MEMORY_VGA)) {
state->log_for_dirtybit = NULL;
+ g_free(state->dirty_bitmap);
+ state->dirty_bitmap = NULL;
/* Disable dirty bit tracking */
xen_track_dirty_vram(xen_domid, 0, 0, NULL);
}
/* retval--the number of ioreq packet */
static ioreq_t *cpu_get_ioreq(XenIOState *state)
{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ unsigned int max_cpus = ms->smp.max_cpus;
int i;
evtchn_port_t port;
}
}
+static void cpu_ioreq_config(XenIOState *state, ioreq_t *req)
+{
+ uint32_t sbdf = req->addr >> 32;
+ uint32_t reg = req->addr;
+ XenPciDevice *xendev;
+
+ if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) &&
+ req->size != sizeof(uint32_t)) {
+ hw_error("PCI config access: bad size (%u)", req->size);
+ }
+
+ if (req->count != 1) {
+ hw_error("PCI config access: bad count (%u)", req->count);
+ }
+
+ QLIST_FOREACH(xendev, &state->dev_list, entry) {
+ if (xendev->sbdf != sbdf) {
+ continue;
+ }
+
+ if (!req->data_is_ptr) {
+ if (req->dir == IOREQ_READ) {
+ req->data = pci_host_config_read_common(
+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
+ req->size);
+ trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
+ req->size, req->data);
+ } else if (req->dir == IOREQ_WRITE) {
+ trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
+ req->size, req->data);
+ pci_host_config_write_common(
+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
+ req->data, req->size);
+ }
+ } else {
+ uint32_t tmp;
+
+ if (req->dir == IOREQ_READ) {
+ tmp = pci_host_config_read_common(
+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
+ req->size);
+ trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
+ req->size, tmp);
+ write_phys_req_item(req->data, req, 0, &tmp);
+ } else if (req->dir == IOREQ_WRITE) {
+ read_phys_req_item(req->data, req, 0, &tmp);
+ trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
+ req->size, tmp);
+ pci_host_config_write_common(
+ xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
+ tmp, req->size);
+ }
+ }
+ }
+}
+
static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req)
{
X86CPU *cpu;
case IOREQ_TYPE_INVALIDATE:
xen_invalidate_map_cache();
break;
- case IOREQ_TYPE_PCI_CONFIG: {
- uint32_t sbdf = req->addr >> 32;
- uint32_t val;
-
- /* Fake a write to port 0xCF8 so that
- * the config space access will target the
- * correct device model.
- */
- val = (1u << 31) |
- ((req->addr & 0x0f00) << 16) |
- ((sbdf & 0xffff) << 8) |
- (req->addr & 0xfc);
- do_outp(0xcf8, 4, val);
-
- /* Now issue the config space access via
- * port 0xCFC
- */
- req->addr = 0xcfc | (req->addr & 0x03);
- cpu_ioreq_pio(req);
+ case IOREQ_TYPE_PCI_CONFIG:
+ cpu_ioreq_config(state, req);
break;
- }
default:
hw_error("Invalid ioreq type 0x%x\n", req->type);
}
xen_domid, entries[i]);
physmap->name = xs_read(state->xenstore, 0, path, &len);
- QLIST_INSERT_HEAD(&state->physmap, physmap, list);
+ QLIST_INSERT_HEAD(&xen_physmap, physmap, list);
}
free(entries);
}
static int xen_map_ioreq_server(XenIOState *state)
{
+ void *addr = NULL;
+ xenforeignmemory_resource_handle *fres;
xen_pfn_t ioreq_pfn;
xen_pfn_t bufioreq_pfn;
evtchn_port_t bufioreq_evtchn;
int rc;
+ /*
+ * Attempt to map using the resource API and fall back to normal
+ * foreign mapping if this is not supported.
+ */
+ QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
+ QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
+ fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
+ XENMEM_resource_ioreq_server,
+ state->ioservid, 0, 2,
+ &addr,
+ PROT_READ | PROT_WRITE, 0);
+ if (fres != NULL) {
+ trace_xen_map_resource_ioreq(state->ioservid, addr);
+ state->buffered_io_page = addr;
+ state->shared_page = addr + TARGET_PAGE_SIZE;
+ } else if (errno != EOPNOTSUPP) {
+ error_report("failed to map ioreq server resources: error %d handle=%p",
+ errno, xen_xc);
+ return -1;
+ }
+
rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
- &ioreq_pfn, &bufioreq_pfn,
+ (state->shared_page == NULL) ?
+ &ioreq_pfn : NULL,
+ (state->buffered_io_page == NULL) ?
+ &bufioreq_pfn : NULL,
&bufioreq_evtchn);
if (rc < 0) {
error_report("failed to get ioreq server info: error %d handle=%p",
return rc;
}
- DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
- DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
- DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
-
- state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
- PROT_READ | PROT_WRITE,
- 1, &ioreq_pfn, NULL);
if (state->shared_page == NULL) {
- error_report("map shared IO page returned error %d handle=%p",
- errno, xen_xc);
- return -1;
+ DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
+
+ state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
+ PROT_READ | PROT_WRITE,
+ 1, &ioreq_pfn, NULL);
+ if (state->shared_page == NULL) {
+ error_report("map shared IO page returned error %d handle=%p",
+ errno, xen_xc);
+ }
}
- state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
- PROT_READ | PROT_WRITE,
- 1, &bufioreq_pfn, NULL);
if (state->buffered_io_page == NULL) {
- error_report("map buffered IO page returned error %d", errno);
+ DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
+
+ state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
+ PROT_READ | PROT_WRITE,
+ 1, &bufioreq_pfn,
+ NULL);
+ if (state->buffered_io_page == NULL) {
+ error_report("map buffered IO page returned error %d", errno);
+ return -1;
+ }
+ }
+
+ if (state->shared_page == NULL || state->buffered_io_page == NULL) {
return -1;
}
+ DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
+
state->bufioreq_remote_port = bufioreq_evtchn;
return 0;
void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory)
{
+ MachineState *ms = MACHINE(pcms);
+ unsigned int max_cpus = ms->smp.max_cpus;
int i, rc;
xen_pfn_t ioreq_pfn;
XenIOState *state;
state->wakeup.notify = xen_wakeup_notifier;
qemu_register_wakeup_notifier(&state->wakeup);
+ /*
+ * Register wake-up support in QMP query-current-machine API
+ */
+ qemu_register_wakeup_support();
+
rc = xen_map_ioreq_server(state);
if (rc < 0) {
goto err;
qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
state->memory_listener = xen_memory_listener;
- QLIST_INIT(&state->physmap);
memory_listener_register(&state->memory_listener, &address_space_memory);
state->log_for_dirtybit = NULL;
memory_listener_register(&state->io_listener, &address_space_io);
state->device_listener = xen_device_listener;
+ QLIST_INIT(&state->dev_list);
device_listener_register(&state->device_listener);
+ xen_bus_init();
+
/* Initialize backend core & drivers */
if (xen_be_init() != 0) {
error_report("xen backend core setup failed");
goto err;
}
xen_be_register_common();
+
+ QLIST_INIT(&xen_physmap);
xen_read_physmap(state);
/* Disable ACPI build because Xen handles it */
int rc;
ram_addr_t start_pfn, nb_pages;
+ start = xen_phys_offset_to_gaddr(start, length);
+
if (length == 0) {
length = TARGET_PAGE_SIZE;
}