* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
+#include "qemu/osdep.h"
#include "hw/hw.h"
#include "sysemu/sysemu.h"
#include "sysemu/dma.h"
+#include "hw/boards.h"
#include "hw/isa/isa.h"
#include "hw/nvram/fw_cfg.h"
#include "hw/sysbus.h"
#include "trace.h"
#include "qemu/error-report.h"
#include "qemu/config-file.h"
+#include "qemu/cutils.h"
+#include "qapi/error.h"
-#define FW_CFG_CTL_SIZE 2
#define FW_CFG_NAME "fw_cfg"
#define FW_CFG_PATH "/machine/" FW_CFG_NAME
#define FW_CFG_DMA_CTL_READ 0x02
#define FW_CFG_DMA_CTL_SKIP 0x04
#define FW_CFG_DMA_CTL_SELECT 0x08
+#define FW_CFG_DMA_CTL_WRITE 0x10
#define FW_CFG_DMA_SIGNATURE 0x51454d5520434647ULL /* "QEMU CFG" */
typedef struct FWCfgEntry {
uint32_t len;
+ bool allow_write;
uint8_t *data;
void *callback_opaque;
FWCfgReadCallback read_callback;
SysBusDevice parent_obj;
/*< public >*/
- FWCfgEntry entries[2][FW_CFG_MAX_ENTRY];
+ uint16_t file_slots;
+ FWCfgEntry *entries[2];
+ int *entry_order;
FWCfgFiles *files;
uint16_t cur_entry;
uint32_t cur_offset;
Notifier machine_ready;
+ int fw_cfg_order_override;
+
bool dma_enabled;
dma_addr_t dma_addr;
AddressSpace *dma_as;
temp = qemu_opt_get(opts, "splash-time");
if (temp != NULL) {
p = (char *)temp;
- boot_splash_time = strtol(p, (char **)&p, 10);
+ boot_splash_time = strtol(p, &p, 10);
}
}
temp = qemu_opt_get(opts, "reboot-timeout");
if (temp != NULL) {
p = (char *)temp;
- reboot_timeout = strtol(p, (char **)&p, 10);
+ reboot_timeout = strtol(p, &p, 10);
}
}
/* validate the input */
/* nothing, write support removed in QEMU v2.4+ */
}
+static inline uint16_t fw_cfg_file_slots(const FWCfgState *s)
+{
+ return s->file_slots;
+}
+
+/* Note: this function returns an exclusive limit. */
+static inline uint32_t fw_cfg_max_entry(const FWCfgState *s)
+{
+ return FW_CFG_FILE_FIRST + fw_cfg_file_slots(s);
+}
+
static int fw_cfg_select(FWCfgState *s, uint16_t key)
{
- int ret;
+ int arch, ret;
+ FWCfgEntry *e;
s->cur_offset = 0;
- if ((key & FW_CFG_ENTRY_MASK) >= FW_CFG_MAX_ENTRY) {
+ if ((key & FW_CFG_ENTRY_MASK) >= fw_cfg_max_entry(s)) {
s->cur_entry = FW_CFG_INVALID;
ret = 0;
} else {
s->cur_entry = key;
ret = 1;
- }
-
- trace_fw_cfg_select(s, key, ret);
- return ret;
-}
-
-static uint8_t fw_cfg_read(FWCfgState *s)
-{
- int arch = !!(s->cur_entry & FW_CFG_ARCH_LOCAL);
- FWCfgEntry *e = &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK];
- uint8_t ret;
-
- if (s->cur_entry == FW_CFG_INVALID || !e->data || s->cur_offset >= e->len)
- ret = 0;
- else {
+ /* entry successfully selected, now run callback if present */
+ arch = !!(key & FW_CFG_ARCH_LOCAL);
+ e = &s->entries[arch][key & FW_CFG_ENTRY_MASK];
if (e->read_callback) {
- e->read_callback(e->callback_opaque, s->cur_offset);
+ e->read_callback(e->callback_opaque);
}
- ret = e->data[s->cur_offset++];
}
- trace_fw_cfg_read(s, ret);
+ trace_fw_cfg_select(s, key, ret);
return ret;
}
-static uint64_t fw_cfg_data_mem_read(void *opaque, hwaddr addr,
- unsigned size)
+static uint64_t fw_cfg_data_read(void *opaque, hwaddr addr, unsigned size)
{
FWCfgState *s = opaque;
+ int arch = !!(s->cur_entry & FW_CFG_ARCH_LOCAL);
+ FWCfgEntry *e = (s->cur_entry == FW_CFG_INVALID) ? NULL :
+ &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK];
uint64_t value = 0;
- unsigned i;
- for (i = 0; i < size; ++i) {
- value = (value << 8) | fw_cfg_read(s);
+ assert(size > 0 && size <= sizeof(value));
+ if (s->cur_entry != FW_CFG_INVALID && e->data && s->cur_offset < e->len) {
+ /* The least significant 'size' bytes of the return value are
+ * expected to contain a string preserving portion of the item
+ * data, padded with zeros on the right in case we run out early.
+ * In technical terms, we're composing the host-endian representation
+ * of the big endian interpretation of the fw_cfg string.
+ */
+ do {
+ value = (value << 8) | e->data[s->cur_offset++];
+ } while (--size && s->cur_offset < e->len);
+ /* If size is still not zero, we *did* run out early, so continue
+ * left-shifting, to add the appropriate number of padding zeros
+ * on the right.
+ */
+ value <<= 8 * size;
}
+
+ trace_fw_cfg_read(s, value);
return value;
}
FWCfgDmaAccess dma;
int arch;
FWCfgEntry *e;
- int read;
+ int read = 0, write = 0;
dma_addr_t dma_addr;
/* Reset the address before the next access */
}
arch = !!(s->cur_entry & FW_CFG_ARCH_LOCAL);
- e = &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK];
+ e = (s->cur_entry == FW_CFG_INVALID) ? NULL :
+ &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK];
if (dma.control & FW_CFG_DMA_CTL_READ) {
read = 1;
+ write = 0;
+ } else if (dma.control & FW_CFG_DMA_CTL_WRITE) {
+ read = 0;
+ write = 1;
} else if (dma.control & FW_CFG_DMA_CTL_SKIP) {
read = 0;
+ write = 0;
} else {
dma.length = 0;
}
dma.control |= FW_CFG_DMA_CTL_ERROR;
}
}
-
+ if (write) {
+ dma.control |= FW_CFG_DMA_CTL_ERROR;
+ }
} else {
if (dma.length <= (e->len - s->cur_offset)) {
len = dma.length;
len = (e->len - s->cur_offset);
}
- if (e->read_callback) {
- e->read_callback(e->callback_opaque, s->cur_offset);
- }
-
/* If the access is not a read access, it will be a skip access,
* tested before.
*/
dma.control |= FW_CFG_DMA_CTL_ERROR;
}
}
+ if (write) {
+ if (!e->allow_write ||
+ len != dma.length ||
+ dma_memory_read(s->dma_as, dma.address,
+ &e->data[s->cur_offset], len)) {
+ dma.control |= FW_CFG_DMA_CTL_ERROR;
+ }
+ }
s->cur_offset += len;
}
return is_write && size == 2;
}
-static uint64_t fw_cfg_comb_read(void *opaque, hwaddr addr,
- unsigned size)
-{
- return fw_cfg_read(opaque);
-}
-
static void fw_cfg_comb_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
{
};
static const MemoryRegionOps fw_cfg_data_mem_ops = {
- .read = fw_cfg_data_mem_read,
+ .read = fw_cfg_data_read,
.write = fw_cfg_data_mem_write,
.endianness = DEVICE_BIG_ENDIAN,
.valid = {
};
static const MemoryRegionOps fw_cfg_comb_mem_ops = {
- .read = fw_cfg_comb_read,
+ .read = fw_cfg_data_read,
.write = fw_cfg_comb_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.valid.accepts = fw_cfg_comb_valid,
{
FWCfgState *s = FW_CFG(d);
- fw_cfg_select(s, 0);
+ /* we never register a read callback for FW_CFG_SIGNATURE */
+ fw_cfg_select(s, FW_CFG_SIGNATURE);
}
/* Save restore 32 bit int as uint16_t
return version_id == 1;
}
-static bool fw_cfg_dma_enabled(void *opaque)
+bool fw_cfg_dma_enabled(void *opaque)
{
FWCfgState *s = opaque;
static void fw_cfg_add_bytes_read_callback(FWCfgState *s, uint16_t key,
FWCfgReadCallback callback,
void *callback_opaque,
- void *data, size_t len)
+ void *data, size_t len,
+ bool read_only)
{
int arch = !!(key & FW_CFG_ARCH_LOCAL);
key &= FW_CFG_ENTRY_MASK;
- assert(key < FW_CFG_MAX_ENTRY && len < UINT32_MAX);
+ assert(key < fw_cfg_max_entry(s) && len < UINT32_MAX);
assert(s->entries[arch][key].data == NULL); /* avoid key conflict */
s->entries[arch][key].data = data;
s->entries[arch][key].len = (uint32_t)len;
s->entries[arch][key].read_callback = callback;
s->entries[arch][key].callback_opaque = callback_opaque;
+ s->entries[arch][key].allow_write = !read_only;
}
static void *fw_cfg_modify_bytes_read(FWCfgState *s, uint16_t key,
key &= FW_CFG_ENTRY_MASK;
- assert(key < FW_CFG_MAX_ENTRY && len < UINT32_MAX);
+ assert(key < fw_cfg_max_entry(s) && len < UINT32_MAX);
/* return the old data to the function caller, avoid memory leak */
ptr = s->entries[arch][key].data;
s->entries[arch][key].data = data;
s->entries[arch][key].len = len;
s->entries[arch][key].callback_opaque = NULL;
+ s->entries[arch][key].allow_write = false;
return ptr;
}
void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len)
{
- fw_cfg_add_bytes_read_callback(s, key, NULL, NULL, data, len);
+ fw_cfg_add_bytes_read_callback(s, key, NULL, NULL, data, len, true);
}
void fw_cfg_add_string(FWCfgState *s, uint16_t key, const char *value)
fw_cfg_add_bytes(s, key, copy, sizeof(value));
}
+void fw_cfg_set_order_override(FWCfgState *s, int order)
+{
+ assert(s->fw_cfg_order_override == 0);
+ s->fw_cfg_order_override = order;
+}
+
+void fw_cfg_reset_order_override(FWCfgState *s)
+{
+ assert(s->fw_cfg_order_override != 0);
+ s->fw_cfg_order_override = 0;
+}
+
+/*
+ * This is the legacy order list. For legacy systems, files are in
+ * the fw_cfg in the order defined below, by the "order" value. Note
+ * that some entries (VGA ROMs, NIC option ROMS, etc.) go into a
+ * specific area, but there may be more than one and they occur in the
+ * order that the user specifies them on the command line. Those are
+ * handled in a special manner, using the order override above.
+ *
+ * For non-legacy, the files are sorted by filename to avoid this kind
+ * of complexity in the future.
+ *
+ * This is only for x86, other arches don't implement versioning so
+ * they won't set legacy mode.
+ */
+static struct {
+ const char *name;
+ int order;
+} fw_cfg_order[] = {
+ { "etc/boot-menu-wait", 10 },
+ { "bootsplash.jpg", 11 },
+ { "bootsplash.bmp", 12 },
+ { "etc/boot-fail-wait", 15 },
+ { "etc/smbios/smbios-tables", 20 },
+ { "etc/smbios/smbios-anchor", 30 },
+ { "etc/e820", 40 },
+ { "etc/reserved-memory-end", 50 },
+ { "genroms/kvmvapic.bin", 55 },
+ { "genroms/linuxboot.bin", 60 },
+ { }, /* VGA ROMs from pc_vga_init come here, 70. */
+ { }, /* NIC option ROMs from pc_nic_init come here, 80. */
+ { "etc/system-states", 90 },
+ { }, /* User ROMs come here, 100. */
+ { }, /* Device FW comes here, 110. */
+ { "etc/extra-pci-roots", 120 },
+ { "etc/acpi/tables", 130 },
+ { "etc/table-loader", 140 },
+ { "etc/tpm/log", 150 },
+ { "etc/acpi/rsdp", 160 },
+ { "bootorder", 170 },
+
+#define FW_CFG_ORDER_OVERRIDE_LAST 200
+};
+
+static int get_fw_cfg_order(FWCfgState *s, const char *name)
+{
+ int i;
+
+ if (s->fw_cfg_order_override > 0) {
+ return s->fw_cfg_order_override;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(fw_cfg_order); i++) {
+ if (fw_cfg_order[i].name == NULL) {
+ continue;
+ }
+
+ if (strcmp(name, fw_cfg_order[i].name) == 0) {
+ return fw_cfg_order[i].order;
+ }
+ }
+
+ /* Stick unknown stuff at the end. */
+ error_report("warning: Unknown firmware file in legacy mode: %s", name);
+ return FW_CFG_ORDER_OVERRIDE_LAST;
+}
+
void fw_cfg_add_file_callback(FWCfgState *s, const char *filename,
FWCfgReadCallback callback, void *callback_opaque,
- void *data, size_t len)
+ void *data, size_t len, bool read_only)
{
- int i, index;
+ int i, index, count;
size_t dsize;
+ MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
+ int order = 0;
if (!s->files) {
- dsize = sizeof(uint32_t) + sizeof(FWCfgFile) * FW_CFG_FILE_SLOTS;
+ dsize = sizeof(uint32_t) + sizeof(FWCfgFile) * fw_cfg_file_slots(s);
s->files = g_malloc0(dsize);
fw_cfg_add_bytes(s, FW_CFG_FILE_DIR, s->files, dsize);
}
- index = be32_to_cpu(s->files->count);
- assert(index < FW_CFG_FILE_SLOTS);
+ count = be32_to_cpu(s->files->count);
+ assert(count < fw_cfg_file_slots(s));
+
+ /* Find the insertion point. */
+ if (mc->legacy_fw_cfg_order) {
+ /*
+ * Sort by order. For files with the same order, we keep them
+ * in the sequence in which they were added.
+ */
+ order = get_fw_cfg_order(s, filename);
+ for (index = count;
+ index > 0 && order < s->entry_order[index - 1];
+ index--);
+ } else {
+ /* Sort by file name. */
+ for (index = count;
+ index > 0 && strcmp(filename, s->files->f[index - 1].name) < 0;
+ index--);
+ }
- pstrcpy(s->files->f[index].name, sizeof(s->files->f[index].name),
- filename);
- for (i = 0; i < index; i++) {
- if (strcmp(s->files->f[index].name, s->files->f[i].name) == 0) {
+ /*
+ * Move all the entries from the index point and after down one
+ * to create a slot for the new entry. Because calculations are
+ * being done with the index, make it so that "i" is the current
+ * index and "i - 1" is the one being copied from, thus the
+ * unusual start and end in the for statement.
+ */
+ for (i = count + 1; i > index; i--) {
+ s->files->f[i] = s->files->f[i - 1];
+ s->files->f[i].select = cpu_to_be16(FW_CFG_FILE_FIRST + i);
+ s->entries[0][FW_CFG_FILE_FIRST + i] =
+ s->entries[0][FW_CFG_FILE_FIRST + i - 1];
+ s->entry_order[i] = s->entry_order[i - 1];
+ }
+
+ memset(&s->files->f[index], 0, sizeof(FWCfgFile));
+ memset(&s->entries[0][FW_CFG_FILE_FIRST + index], 0, sizeof(FWCfgEntry));
+
+ pstrcpy(s->files->f[index].name, sizeof(s->files->f[index].name), filename);
+ for (i = 0; i <= count; i++) {
+ if (i != index &&
+ strcmp(s->files->f[index].name, s->files->f[i].name) == 0) {
error_report("duplicate fw_cfg file name: %s",
s->files->f[index].name);
exit(1);
}
fw_cfg_add_bytes_read_callback(s, FW_CFG_FILE_FIRST + index,
- callback, callback_opaque, data, len);
+ callback, callback_opaque, data, len,
+ read_only);
s->files->f[index].size = cpu_to_be32(len);
s->files->f[index].select = cpu_to_be16(FW_CFG_FILE_FIRST + index);
+ s->entry_order[index] = order;
trace_fw_cfg_add_file(s, index, s->files->f[index].name, len);
- s->files->count = cpu_to_be32(index+1);
+ s->files->count = cpu_to_be32(count+1);
}
void fw_cfg_add_file(FWCfgState *s, const char *filename,
void *data, size_t len)
{
- fw_cfg_add_file_callback(s, filename, NULL, NULL, data, len);
+ fw_cfg_add_file_callback(s, filename, NULL, NULL, data, len, true);
}
void *fw_cfg_modify_file(FWCfgState *s, const char *filename,
assert(s->files);
index = be32_to_cpu(s->files->count);
- assert(index < FW_CFG_FILE_SLOTS);
+ assert(index < fw_cfg_file_slots(s));
for (i = 0; i < index; i++) {
if (strcmp(filename, s->files->f[i].name) == 0) {
}
}
/* add new one */
- fw_cfg_add_file_callback(s, filename, NULL, NULL, data, len);
+ fw_cfg_add_file_callback(s, filename, NULL, NULL, data, len, true);
return NULL;
}
static void fw_cfg_init1(DeviceState *dev)
{
FWCfgState *s = FW_CFG(dev);
+ MachineState *machine = MACHINE(qdev_get_machine());
assert(!object_resolve_path(FW_CFG_PATH, NULL));
- object_property_add_child(qdev_get_machine(), FW_CFG_NAME, OBJECT(s), NULL);
+ object_property_add_child(OBJECT(machine), FW_CFG_NAME, OBJECT(s), NULL);
qdev_init_nofail(dev);
fw_cfg_add_bytes(s, FW_CFG_SIGNATURE, (char *)"QEMU", 4);
- fw_cfg_add_bytes(s, FW_CFG_UUID, qemu_uuid, 16);
- fw_cfg_add_i16(s, FW_CFG_NOGRAPHIC, (uint16_t)(display_type == DT_NOGRAPHIC));
- fw_cfg_add_i16(s, FW_CFG_NB_CPUS, (uint16_t)smp_cpus);
+ fw_cfg_add_bytes(s, FW_CFG_UUID, &qemu_uuid, 16);
+ fw_cfg_add_i16(s, FW_CFG_NOGRAPHIC, (uint16_t)!machine->enable_graphics);
fw_cfg_add_i16(s, FW_CFG_BOOT_MENU, (uint16_t)boot_menu);
fw_cfg_bootsplash(s);
fw_cfg_reboot(s);
DeviceState *dev;
FWCfgState *s;
uint32_t version = FW_CFG_VERSION;
- bool dma_enabled = dma_iobase && dma_as;
+ bool dma_requested = dma_iobase && dma_as;
dev = qdev_create(NULL, TYPE_FW_CFG_IO);
qdev_prop_set_uint32(dev, "iobase", iobase);
qdev_prop_set_uint32(dev, "dma_iobase", dma_iobase);
- qdev_prop_set_bit(dev, "dma_enabled", dma_enabled);
+ if (!dma_requested) {
+ qdev_prop_set_bit(dev, "dma_enabled", false);
+ }
fw_cfg_init1(dev);
s = FW_CFG(dev);
- if (dma_enabled) {
+ if (s->dma_enabled) {
/* 64 bits for the address field */
s->dma_as = dma_as;
s->dma_addr = 0;
SysBusDevice *sbd;
FWCfgState *s;
uint32_t version = FW_CFG_VERSION;
- bool dma_enabled = dma_addr && dma_as;
+ bool dma_requested = dma_addr && dma_as;
dev = qdev_create(NULL, TYPE_FW_CFG_MEM);
qdev_prop_set_uint32(dev, "data_width", data_width);
- qdev_prop_set_bit(dev, "dma_enabled", dma_enabled);
+ if (!dma_requested) {
+ qdev_prop_set_bit(dev, "dma_enabled", false);
+ }
fw_cfg_init1(dev);
s = FW_CFG(dev);
- if (dma_enabled) {
+ if (s->dma_enabled) {
s->dma_as = dma_as;
s->dma_addr = 0;
sysbus_mmio_map(sbd, 2, dma_addr);
static const TypeInfo fw_cfg_info = {
.name = TYPE_FW_CFG,
.parent = TYPE_SYS_BUS_DEVICE,
+ .abstract = true,
.instance_size = sizeof(FWCfgState),
.class_init = fw_cfg_class_init,
};
+static void fw_cfg_file_slots_allocate(FWCfgState *s, Error **errp)
+{
+ uint16_t file_slots_max;
+
+ if (fw_cfg_file_slots(s) < FW_CFG_FILE_SLOTS_MIN) {
+ error_setg(errp, "\"file_slots\" must be at least 0x%x",
+ FW_CFG_FILE_SLOTS_MIN);
+ return;
+ }
+
+ /* (UINT16_MAX & FW_CFG_ENTRY_MASK) is the highest inclusive selector value
+ * that we permit. The actual (exclusive) value coming from the
+ * configuration is (FW_CFG_FILE_FIRST + fw_cfg_file_slots(s)). */
+ file_slots_max = (UINT16_MAX & FW_CFG_ENTRY_MASK) - FW_CFG_FILE_FIRST + 1;
+ if (fw_cfg_file_slots(s) > file_slots_max) {
+ error_setg(errp, "\"file_slots\" must not exceed 0x%" PRIx16,
+ file_slots_max);
+ return;
+ }
+
+ s->entries[0] = g_new0(FWCfgEntry, fw_cfg_max_entry(s));
+ s->entries[1] = g_new0(FWCfgEntry, fw_cfg_max_entry(s));
+ s->entry_order = g_new0(int, fw_cfg_max_entry(s));
+}
static Property fw_cfg_io_properties[] = {
DEFINE_PROP_UINT32("iobase", FWCfgIoState, iobase, -1),
DEFINE_PROP_UINT32("dma_iobase", FWCfgIoState, dma_iobase, -1),
DEFINE_PROP_BOOL("dma_enabled", FWCfgIoState, parent_obj.dma_enabled,
- false),
+ true),
+ DEFINE_PROP_UINT16("x-file-slots", FWCfgIoState, parent_obj.file_slots,
+ FW_CFG_FILE_SLOTS_MIN),
DEFINE_PROP_END_OF_LIST(),
};
{
FWCfgIoState *s = FW_CFG_IO(dev);
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ Error *local_err = NULL;
+
+ fw_cfg_file_slots_allocate(FW_CFG(s), &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ /* when using port i/o, the 8-bit data register ALWAYS overlaps
+ * with half of the 16-bit control register. Hence, the total size
+ * of the i/o region used is FW_CFG_CTL_SIZE */
memory_region_init_io(&s->comb_iomem, OBJECT(s), &fw_cfg_comb_mem_ops,
FW_CFG(s), "fwcfg", FW_CFG_CTL_SIZE);
sysbus_add_io(sbd, s->iobase, &s->comb_iomem);
static Property fw_cfg_mem_properties[] = {
DEFINE_PROP_UINT32("data_width", FWCfgMemState, data_width, -1),
DEFINE_PROP_BOOL("dma_enabled", FWCfgMemState, parent_obj.dma_enabled,
- false),
+ true),
+ DEFINE_PROP_UINT16("x-file-slots", FWCfgMemState, parent_obj.file_slots,
+ FW_CFG_FILE_SLOTS_MIN),
DEFINE_PROP_END_OF_LIST(),
};
FWCfgMemState *s = FW_CFG_MEM(dev);
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
const MemoryRegionOps *data_ops = &fw_cfg_data_mem_ops;
+ Error *local_err = NULL;
+
+ fw_cfg_file_slots_allocate(FW_CFG(s), &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
memory_region_init_io(&s->ctl_iomem, OBJECT(s), &fw_cfg_ctl_mem_ops,
FW_CFG(s), "fwcfg.ctl", FW_CFG_CTL_SIZE);