#include "exec/address-spaces.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
+#include "hw/pci-host/gpex.h"
#define NUM_VIRTIO_TRANSPORTS 32
VIRT_MMIO,
VIRT_RTC,
VIRT_FW_CFG,
+ VIRT_PCIE,
};
typedef struct MemMapEntry {
[VIRT_FW_CFG] = { 0x09020000, 0x0000000a },
[VIRT_MMIO] = { 0x0a000000, 0x00000200 },
/* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
- /* 0x10000000 .. 0x40000000 reserved for PCI */
+ /*
+ * PCIE verbose map:
+ *
+ * MMIO window { 0x10000000, 0x2eff0000 },
+ * PIO window { 0x3eff0000, 0x00010000 },
+ * ECAM { 0x3f000000, 0x01000000 },
+ */
+ [VIRT_PCIE] = { 0x10000000, 0x30000000 },
[VIRT_MEM] = { 0x40000000, 30ULL * 1024 * 1024 * 1024 },
};
static const int a15irqmap[] = {
[VIRT_UART] = 1,
[VIRT_RTC] = 2,
+ [VIRT_PCIE] = 3, /* ... to 6 */
[VIRT_MMIO] = 16, /* ...to 16 + NUM_VIRTIO_TRANSPORTS - 1 */
};
}
}
-static void fdt_add_gic_node(const VirtBoardInfo *vbi)
+static uint32_t fdt_add_gic_node(const VirtBoardInfo *vbi)
{
uint32_t gic_phandle;
2, vbi->memmap[VIRT_GIC_CPU].base,
2, vbi->memmap[VIRT_GIC_CPU].size);
qemu_fdt_setprop_cell(vbi->fdt, "/intc", "phandle", gic_phandle);
+
+ return gic_phandle;
}
-static void create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
+static uint32_t create_gic(const VirtBoardInfo *vbi, qemu_irq *pic)
{
/* We create a standalone GIC v2 */
DeviceState *gicdev;
qdev_get_gpio_in(gicdev, ppibase + 27));
sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
+ sysbus_connect_irq(gicbusdev, i + smp_cpus,
+ qdev_get_gpio_in(cpudev, ARM_CPU_FIQ));
}
for (i = 0; i < NUM_IRQS; i++) {
pic[i] = qdev_get_gpio_in(gicdev, i);
}
- fdt_add_gic_node(vbi);
+ return fdt_add_gic_node(vbi);
}
static void create_uart(const VirtBoardInfo *vbi, qemu_irq *pic)
int i;
hwaddr size = vbi->memmap[VIRT_MMIO].size;
- /* Note that we have to create the transports in forwards order
- * so that command line devices are inserted lowest address first,
- * and then add dtb nodes in reverse order so that they appear in
- * the finished device tree lowest address first.
+ /* We create the transports in forwards order. Since qbus_realize()
+ * prepends (not appends) new child buses, the incrementing loop below will
+ * create a list of virtio-mmio buses with decreasing base addresses.
+ *
+ * When a -device option is processed from the command line,
+ * qbus_find_recursive() picks the next free virtio-mmio bus in forwards
+ * order. The upshot is that -device options in increasing command line
+ * order are mapped to virtio-mmio buses with decreasing base addresses.
+ *
+ * When this code was originally written, that arrangement ensured that the
+ * guest Linux kernel would give the lowest "name" (/dev/vda, eth0, etc) to
+ * the first -device on the command line. (The end-to-end order is a
+ * function of this loop, qbus_realize(), qbus_find_recursive(), and the
+ * guest kernel's name-to-address assignment strategy.)
+ *
+ * Meanwhile, the kernel's traversal seems to have been reversed; see eg.
+ * the message, if not necessarily the code, of commit 70161ff336.
+ * Therefore the loop now establishes the inverse of the original intent.
+ *
+ * Unfortunately, we can't counteract the kernel change by reversing the
+ * loop; it would break existing command lines.
+ *
+ * In any case, the kernel makes no guarantee about the stability of
+ * enumeration order of virtio devices (as demonstrated by it changing
+ * between kernel versions). For reliable and stable identification
+ * of disks users must use UUIDs or similar mechanisms.
*/
for (i = 0; i < NUM_VIRTIO_TRANSPORTS; i++) {
int irq = vbi->irqmap[VIRT_MMIO] + i;
sysbus_create_simple("virtio-mmio", base, pic[irq]);
}
+ /* We add dtb nodes in reverse order so that they appear in the finished
+ * device tree lowest address first.
+ *
+ * Note that this mapping is independent of the loop above. The previous
+ * loop influences virtio device to virtio transport assignment, whereas
+ * this loop controls how virtio transports are laid out in the dtb.
+ */
for (i = NUM_VIRTIO_TRANSPORTS - 1; i >= 0; i--) {
char *nodename;
int irq = vbi->irqmap[VIRT_MMIO] + i;
DeviceState *dev = qdev_create(NULL, "cfi.pflash01");
const uint64_t sectorlength = 256 * 1024;
- if (dinfo && qdev_prop_set_drive(dev, "drive",
- blk_by_legacy_dinfo(dinfo))) {
- abort();
+ if (dinfo) {
+ qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo),
+ &error_abort);
}
qdev_prop_set_uint32(dev, "num-blocks", flashsize / sectorlength);
char *nodename;
if (bios_name) {
- const char *fn;
+ char *fn;
+ int image_size;
if (drive_get(IF_PFLASH, 0, 0)) {
error_report("The contents of the first flash device may be "
exit(1);
}
fn = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
- if (!fn || load_image_targphys(fn, flashbase, flashsize) < 0) {
+ if (!fn) {
+ error_report("Could not find ROM image '%s'", bios_name);
+ exit(1);
+ }
+ image_size = load_image_targphys(fn, flashbase, flashsize);
+ g_free(fn);
+ if (image_size < 0) {
error_report("Could not load ROM image '%s'", bios_name);
exit(1);
}
g_free(nodename);
}
+static void create_pcie_irq_map(const VirtBoardInfo *vbi, uint32_t gic_phandle,
+ int first_irq, const char *nodename)
+{
+ int devfn, pin;
+ uint32_t full_irq_map[4 * 4 * 8] = { 0 };
+ uint32_t *irq_map = full_irq_map;
+
+ for (devfn = 0; devfn <= 0x18; devfn += 0x8) {
+ for (pin = 0; pin < 4; pin++) {
+ int irq_type = GIC_FDT_IRQ_TYPE_SPI;
+ int irq_nr = first_irq + ((pin + PCI_SLOT(devfn)) % PCI_NUM_PINS);
+ int irq_level = GIC_FDT_IRQ_FLAGS_LEVEL_HI;
+ int i;
+
+ uint32_t map[] = {
+ devfn << 8, 0, 0, /* devfn */
+ pin + 1, /* PCI pin */
+ gic_phandle, irq_type, irq_nr, irq_level }; /* GIC irq */
+
+ /* Convert map to big endian */
+ for (i = 0; i < 8; i++) {
+ irq_map[i] = cpu_to_be32(map[i]);
+ }
+ irq_map += 8;
+ }
+ }
+
+ qemu_fdt_setprop(vbi->fdt, nodename, "interrupt-map",
+ full_irq_map, sizeof(full_irq_map));
+
+ qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map-mask",
+ 0x1800, 0, 0, /* devfn (PCI_SLOT(3)) */
+ 0x7 /* PCI irq */);
+}
+
+static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
+ uint32_t gic_phandle)
+{
+ hwaddr base = vbi->memmap[VIRT_PCIE].base;
+ hwaddr size = vbi->memmap[VIRT_PCIE].size;
+ hwaddr end = base + size;
+ hwaddr size_mmio;
+ hwaddr size_ioport = 64 * 1024;
+ int nr_pcie_buses = 16;
+ hwaddr size_ecam = PCIE_MMCFG_SIZE_MIN * nr_pcie_buses;
+ hwaddr base_mmio = base;
+ hwaddr base_ioport;
+ hwaddr base_ecam;
+ int irq = vbi->irqmap[VIRT_PCIE];
+ MemoryRegion *mmio_alias;
+ MemoryRegion *mmio_reg;
+ MemoryRegion *ecam_alias;
+ MemoryRegion *ecam_reg;
+ DeviceState *dev;
+ char *nodename;
+ int i;
+
+ base_ecam = QEMU_ALIGN_DOWN(end - size_ecam, size_ecam);
+ base_ioport = QEMU_ALIGN_DOWN(base_ecam - size_ioport, size_ioport);
+ size_mmio = base_ioport - base;
+
+ dev = qdev_create(NULL, TYPE_GPEX_HOST);
+ qdev_init_nofail(dev);
+
+ /* Map only the first size_ecam bytes of ECAM space */
+ ecam_alias = g_new0(MemoryRegion, 1);
+ ecam_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0);
+ memory_region_init_alias(ecam_alias, OBJECT(dev), "pcie-ecam",
+ ecam_reg, 0, size_ecam);
+ memory_region_add_subregion(get_system_memory(), base_ecam, ecam_alias);
+
+ /* Map the MMIO window into system address space so as to expose
+ * the section of PCI MMIO space which starts at the same base address
+ * (ie 1:1 mapping for that part of PCI MMIO space visible through
+ * the window).
+ */
+ mmio_alias = g_new0(MemoryRegion, 1);
+ mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1);
+ memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
+ mmio_reg, base_mmio, size_mmio);
+ memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
+
+ /* Map IO port space */
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 2, base_ioport);
+
+ for (i = 0; i < GPEX_NUM_IRQS; i++) {
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pic[irq + i]);
+ }
+
+ nodename = g_strdup_printf("/pcie@%" PRIx64, base);
+ qemu_fdt_add_subnode(vbi->fdt, nodename);
+ qemu_fdt_setprop_string(vbi->fdt, nodename,
+ "compatible", "pci-host-ecam-generic");
+ qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "pci");
+ qemu_fdt_setprop_cell(vbi->fdt, nodename, "#address-cells", 3);
+ qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2);
+ qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0,
+ nr_pcie_buses - 1);
+
+ qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
+ 2, base_ecam, 2, size_ecam);
+ qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
+ 1, FDT_PCI_RANGE_IOPORT, 2, 0,
+ 2, base_ioport, 2, size_ioport,
+ 1, FDT_PCI_RANGE_MMIO, 2, base_mmio,
+ 2, base_mmio, 2, size_mmio);
+
+ qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
+ create_pcie_irq_map(vbi, gic_phandle, irq, nodename);
+
+ g_free(nodename);
+}
+
static void *machvirt_dtb(const struct arm_boot_info *binfo, int *fdt_size)
{
const VirtBoardInfo *board = (const VirtBoardInfo *)binfo;
MemoryRegion *ram = g_new(MemoryRegion, 1);
const char *cpu_model = machine->cpu_model;
VirtBoardInfo *vbi;
+ uint32_t gic_phandle;
+ char **cpustr;
if (!cpu_model) {
cpu_model = "cortex-a15";
}
- vbi = find_machine_info(cpu_model);
+ /* Separate the actual CPU model name from any appended features */
+ cpustr = g_strsplit(cpu_model, ",", 2);
+
+ vbi = find_machine_info(cpustr[0]);
if (!vbi) {
- error_report("mach-virt: CPU %s not supported", cpu_model);
+ error_report("mach-virt: CPU %s not supported", cpustr[0]);
exit(1);
}
create_fdt(vbi);
for (n = 0; n < smp_cpus; n++) {
- ObjectClass *oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model);
+ ObjectClass *oc = cpu_class_by_name(TYPE_ARM_CPU, cpustr[0]);
+ CPUClass *cc = CPU_CLASS(oc);
Object *cpuobj;
+ Error *err = NULL;
+ char *cpuopts = g_strdup(cpustr[1]);
if (!oc) {
fprintf(stderr, "Unable to find CPU definition\n");
}
cpuobj = object_new(object_class_get_name(oc));
+ /* Handle any CPU options specified by the user */
+ cc->parse_features(CPU(cpuobj), cpuopts, &err);
+ g_free(cpuopts);
+ if (err) {
+ error_report_err(err);
+ exit(1);
+ }
+
if (!vms->secure) {
object_property_set_bool(cpuobj, false, "has_el3", NULL);
}
object_property_set_bool(cpuobj, true, "realized", NULL);
}
+ g_strfreev(cpustr);
fdt_add_timer_nodes(vbi);
fdt_add_cpu_nodes(vbi);
fdt_add_psci_node(vbi);
- memory_region_init_ram(ram, NULL, "mach-virt.ram", machine->ram_size,
- &error_abort);
- vmstate_register_ram_global(ram);
+ memory_region_allocate_system_memory(ram, NULL, "mach-virt.ram",
+ machine->ram_size);
memory_region_add_subregion(sysmem, vbi->memmap[VIRT_MEM].base, ram);
create_flash(vbi);
- create_gic(vbi, pic);
+ gic_phandle = create_gic(vbi, pic);
create_uart(vbi, pic);
create_rtc(vbi, pic);
+ create_pcie(vbi, pic, gic_phandle);
+
/* Create mmio transports, so the user can create virtio backends
* (which will be automatically plugged in to the transports). If
* no backend is created the transport will just sit harmlessly idle.