#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qemu/units.h"
+#include "hw/qdev-properties.h"
#include "qapi/error.h"
#include "qemu-version.h"
#include "qemu/cutils.h"
#include "qemu/help_option.h"
#include "qemu/uuid.h"
+#include "sysemu/reset.h"
+#include "sysemu/runstate.h"
#include "sysemu/seccomp.h"
#include "sysemu/tcg.h"
#include "qemu/error-report.h"
#include "qemu/sockets.h"
-#include "hw/hw.h"
-#include "hw/boards.h"
#include "sysemu/accel.h"
#include "hw/usb.h"
#include "hw/isa/isa.h"
#include "hw/firmware/smbios.h"
#include "hw/acpi/acpi.h"
#include "hw/xen/xen.h"
-#include "hw/qdev.h"
#include "hw/loader.h"
#include "monitor/qdev.h"
#include "sysemu/bt.h"
#include "qapi/qapi-visit-block-core.h"
#include "qapi/qapi-visit-ui.h"
#include "qapi/qapi-commands-block-core.h"
-#include "qapi/qapi-commands-misc.h"
#include "qapi/qapi-commands-run-state.h"
#include "qapi/qapi-commands-ui.h"
#include "qapi/qmp/qerror.h"
Chardev *parallel_hds[MAX_PARALLEL_PORTS];
int win2k_install_hack = 0;
int singlestep = 0;
-int smp_cpus;
-unsigned int max_cpus;
-int smp_cores = 1;
-int smp_threads = 1;
int acpi_enabled = 1;
int no_hpet = 0;
int fd_bootchk = 1;
}, {
.name = "sockets",
.type = QEMU_OPT_NUMBER,
+ }, {
+ .name = "dies",
+ .type = QEMU_OPT_NUMBER,
}, {
.name = "cores",
.type = QEMU_OPT_NUMBER,
},
};
-static void smp_parse(QemuOpts *opts)
-{
- if (opts) {
- unsigned cpus = qemu_opt_get_number(opts, "cpus", 0);
- unsigned sockets = qemu_opt_get_number(opts, "sockets", 0);
- unsigned cores = qemu_opt_get_number(opts, "cores", 0);
- unsigned threads = qemu_opt_get_number(opts, "threads", 0);
-
- /* compute missing values, prefer sockets over cores over threads */
- if (cpus == 0 || sockets == 0) {
- cores = cores > 0 ? cores : 1;
- threads = threads > 0 ? threads : 1;
- if (cpus == 0) {
- sockets = sockets > 0 ? sockets : 1;
- cpus = cores * threads * sockets;
- } else {
- max_cpus = qemu_opt_get_number(opts, "maxcpus", cpus);
- sockets = max_cpus / (cores * threads);
- }
- } else if (cores == 0) {
- threads = threads > 0 ? threads : 1;
- cores = cpus / (sockets * threads);
- cores = cores > 0 ? cores : 1;
- } else if (threads == 0) {
- threads = cpus / (cores * sockets);
- threads = threads > 0 ? threads : 1;
- } else if (sockets * cores * threads < cpus) {
- error_report("cpu topology: "
- "sockets (%u) * cores (%u) * threads (%u) < "
- "smp_cpus (%u)",
- sockets, cores, threads, cpus);
- exit(1);
- }
-
- max_cpus = qemu_opt_get_number(opts, "maxcpus", cpus);
-
- if (max_cpus < cpus) {
- error_report("maxcpus must be equal to or greater than smp");
- exit(1);
- }
-
- if (sockets * cores * threads > max_cpus) {
- error_report("cpu topology: "
- "sockets (%u) * cores (%u) * threads (%u) > "
- "maxcpus (%u)",
- sockets, cores, threads, max_cpus);
- exit(1);
- }
-
- if (sockets * cores * threads != max_cpus) {
- warn_report("Invalid CPU topology deprecated: "
- "sockets (%u) * cores (%u) * threads (%u) "
- "!= maxcpus (%u)",
- sockets, cores, threads, max_cpus);
- }
-
- smp_cpus = cpus;
- smp_cores = cores;
- smp_threads = threads;
- }
-
- if (smp_cpus > 1) {
- Error *blocker = NULL;
- error_setg(&blocker, QERR_REPLAY_NOT_SUPPORTED, "smp");
- replay_add_blocker(blocker);
- }
-}
-
static void realtime_init(void)
{
if (enable_mlock) {
return NULL;
}
-MachineInfoList *qmp_query_machines(Error **errp)
-{
- GSList *el, *machines = object_class_get_list(TYPE_MACHINE, false);
- MachineInfoList *mach_list = NULL;
-
- for (el = machines; el; el = el->next) {
- MachineClass *mc = el->data;
- MachineInfoList *entry;
- MachineInfo *info;
-
- info = g_malloc0(sizeof(*info));
- if (mc->is_default) {
- info->has_is_default = true;
- info->is_default = true;
- }
-
- if (mc->alias) {
- info->has_alias = true;
- info->alias = g_strdup(mc->alias);
- }
-
- info->name = g_strdup(mc->name);
- info->cpu_max = !mc->max_cpus ? 1 : mc->max_cpus;
- info->hotpluggable_cpus = mc->has_hotpluggable_cpus;
-
- entry = g_malloc0(sizeof(*entry));
- entry->value = info;
- entry->next = mach_list;
- mach_list = entry;
- }
-
- g_slist_free(machines);
- return mach_list;
-}
-
static int machine_help_func(QemuOpts *opts, MachineState *machine)
{
ObjectProperty *prop;
return 1;
}
-struct vm_change_state_entry {
+struct VMChangeStateEntry {
VMChangeStateHandler *cb;
void *opaque;
- QLIST_ENTRY (vm_change_state_entry) entries;
+ QTAILQ_ENTRY(VMChangeStateEntry) entries;
+ int priority;
};
-static QLIST_HEAD(, vm_change_state_entry) vm_change_state_head;
+static QTAILQ_HEAD(, VMChangeStateEntry) vm_change_state_head;
-VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb,
- void *opaque)
+/**
+ * qemu_add_vm_change_state_handler_prio:
+ * @cb: the callback to invoke
+ * @opaque: user data passed to the callback
+ * @priority: low priorities execute first when the vm runs and the reverse is
+ * true when the vm stops
+ *
+ * Register a callback function that is invoked when the vm starts or stops
+ * running.
+ *
+ * Returns: an entry to be freed using qemu_del_vm_change_state_handler()
+ */
+VMChangeStateEntry *qemu_add_vm_change_state_handler_prio(
+ VMChangeStateHandler *cb, void *opaque, int priority)
{
VMChangeStateEntry *e;
+ VMChangeStateEntry *other;
- e = g_malloc0(sizeof (*e));
-
+ e = g_malloc0(sizeof(*e));
e->cb = cb;
e->opaque = opaque;
- QLIST_INSERT_HEAD(&vm_change_state_head, e, entries);
+ e->priority = priority;
+
+ /* Keep list sorted in ascending priority order */
+ QTAILQ_FOREACH(other, &vm_change_state_head, entries) {
+ if (priority < other->priority) {
+ QTAILQ_INSERT_BEFORE(other, e, entries);
+ return e;
+ }
+ }
+
+ QTAILQ_INSERT_TAIL(&vm_change_state_head, e, entries);
return e;
}
+VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb,
+ void *opaque)
+{
+ return qemu_add_vm_change_state_handler_prio(cb, opaque, 0);
+}
+
void qemu_del_vm_change_state_handler(VMChangeStateEntry *e)
{
- QLIST_REMOVE (e, entries);
- g_free (e);
+ QTAILQ_REMOVE(&vm_change_state_head, e, entries);
+ g_free(e);
}
void vm_state_notify(int running, RunState state)
trace_vm_state_notify(running, state, RunState_str(state));
- QLIST_FOREACH_SAFE(e, &vm_change_state_head, entries, next) {
- e->cb(e->opaque, running, state);
+ if (running) {
+ QTAILQ_FOREACH_SAFE(e, &vm_change_state_head, entries, next) {
+ e->cb(e->opaque, running, state);
+ }
+ } else {
+ QTAILQ_FOREACH_REVERSE_SAFE(e, &vm_change_state_head, entries, next) {
+ e->cb(e->opaque, running, state);
+ }
}
}
cpu_synchronize_all_states();
if (mc && mc->reset) {
- mc->reset();
+ mc->reset(current_machine);
} else {
qemu_devices_reset();
}
- if (reason != SHUTDOWN_CAUSE_SUBSYSTEM_RESET) {
+ if (reason && reason != SHUTDOWN_CAUSE_SUBSYSTEM_RESET) {
qapi_event_send_reset(shutdown_caused_by_guest(reason), reason);
}
cpu_synchronize_all_post_reset();
return wakeup_suspend_enabled;
}
-CurrentMachineParams *qmp_query_current_machine(Error **errp)
-{
- CurrentMachineParams *params = g_malloc0(sizeof(*params));
- params->wakeup_suspend_support = qemu_wakeup_suspend_enabled();
-
- return params;
-}
-
void qemu_system_killed(int signal, pid_t pid)
{
shutdown_signal = signal;
return -1;
}
+ if (!qmp && qemu_opt_get(opts, "pretty")) {
+ warn_report("'pretty' is deprecated for HMP monitors, it has no effect "
+ "and will be removed in future versions");
+ }
if (qemu_opt_get_bool(opts, "pretty", 0)) {
pretty = true;
}
opts = qemu_opts_create(qemu_find_opts("mon"), label, 1, &error_fatal);
qemu_opt_set(opts, "mode", mode, &error_abort);
qemu_opt_set(opts, "chardev", label, &error_abort);
- qemu_opt_set_bool(opts, "pretty", pretty, &error_abort);
+ if (!strcmp(mode, "control")) {
+ qemu_opt_set_bool(opts, "pretty", pretty, &error_abort);
+ } else {
+ assert(pretty == false);
+ }
monitor_device_index++;
}
char *dir, **dirs;
BlockdevOptionsQueue bdo_queue = QSIMPLEQ_HEAD_INITIALIZER(bdo_queue);
+ os_set_line_buffering();
+
error_init(argv[0]);
module_call_init(MODULE_INIT_TRACE);
exit(1);
}
- QLIST_INIT (&vm_change_state_head);
+ QTAILQ_INIT(&vm_change_state_head);
os_setup_early_signal_handling();
cpu_option = NULL;
machine_class->default_cpus = machine_class->default_cpus ?: 1;
/* default to machine_class->default_cpus */
- smp_cpus = machine_class->default_cpus;
- max_cpus = machine_class->default_cpus;
+ current_machine->smp.cpus = machine_class->default_cpus;
+ current_machine->smp.max_cpus = machine_class->default_cpus;
+ current_machine->smp.cores = 1;
+ current_machine->smp.threads = 1;
- smp_parse(qemu_opts_find(qemu_find_opts("smp-opts"), NULL));
+ machine_class->smp_parse(current_machine,
+ qemu_opts_find(qemu_find_opts("smp-opts"), NULL));
/* sanity-check smp_cpus and max_cpus against machine_class */
- if (smp_cpus < machine_class->min_cpus) {
+ if (current_machine->smp.cpus < machine_class->min_cpus) {
error_report("Invalid SMP CPUs %d. The min CPUs "
- "supported by machine '%s' is %d", smp_cpus,
+ "supported by machine '%s' is %d",
+ current_machine->smp.cpus,
machine_class->name, machine_class->min_cpus);
exit(1);
}
- if (max_cpus > machine_class->max_cpus) {
+ if (current_machine->smp.max_cpus > machine_class->max_cpus) {
error_report("Invalid SMP CPUs %d. The max CPUs "
- "supported by machine '%s' is %d", max_cpus,
+ "supported by machine '%s' is %d",
+ current_machine->smp.max_cpus,
machine_class->name, machine_class->max_cpus);
exit(1);
}
migration_object_init();
if (qtest_chrdev) {
- qtest_init(qtest_chrdev, qtest_log, &error_fatal);
+ qtest_server_init(qtest_chrdev, qtest_log, &error_fatal);
}
machine_opts = qemu_get_machine_opts();
semihosting_arg_fallback(kernel_filename, kernel_cmdline);
}
- os_set_line_buffering();
-
/* spice needs the timers to be initialized by this point */
qemu_spice_init();
*/
migration_shutdown();
+ /*
+ * We must cancel all block jobs while the block layer is drained,
+ * or cancelling will be affected by throttling and thus may block
+ * for an extended period of time.
+ * vm_shutdown() will bdrv_drain_all(), so we may as well include
+ * it in the drained section.
+ * We do not need to end this section, because we do not want any
+ * requests happening from here on anyway.
+ */
+ bdrv_drain_all_begin();
+
/* No more vcpu or device emulation activity beyond this point */
vm_shutdown();