*/
#include "qemu/osdep.h"
+#include "qemu/units.h"
#include <dirent.h>
#include "cpu.h"
#include "hw/hw.h"
#include "net/slirp.h"
#include "chardev/char-fe.h"
#include "chardev/char-io.h"
+#include "chardev/char-mux.h"
#include "ui/qemu-spice.h"
#include "sysemu/numa.h"
#include "monitor/monitor.h"
#include "qemu/readline.h"
#include "ui/console.h"
#include "ui/input.h"
-#include "sysemu/blockdev.h"
#include "sysemu/block-backend.h"
#include "audio/audio.h"
#include "disas/disas.h"
#include "qapi/qmp/qnum.h"
#include "qapi/qmp/qstring.h"
#include "qapi/qmp/qjson.h"
-#include "qapi/qmp/json-streamer.h"
#include "qapi/qmp/json-parser.h"
#include "qapi/qmp/qlist.h"
#include "qom/object_interfaces.h"
const char *args_type;
const char *params;
const char *help;
+ const char *flags; /* p=preconfig */
void (*cmd)(Monitor *mon, const QDict *qdict);
/* @sub_table is a list of 2nd level of commands. If it does not exist,
* cmd should be used. If it exists, sub_table[?].cmd should be
JSONMessageParser parser;
/*
* When a client connects, we're in capabilities negotiation mode.
- * When command qmp_capabilities succeeds, we go into command
- * mode.
+ * @commands is &qmp_cap_negotiation_commands then. When command
+ * qmp_capabilities succeeds, we go into command mode, and
+ * @command becomes &qmp_commands.
*/
QmpCommandList *commands;
- bool qmp_caps[QMP_CAPABILITY__MAX];
+ bool capab_offered[QMP_CAPABILITY__MAX]; /* capabilities offered */
+ bool capab[QMP_CAPABILITY__MAX]; /* offered and accepted */
+ /*
+ * Protects qmp request/response queue.
+ * Take monitor_lock first when you need both.
+ */
+ QemuMutex qmp_queue_lock;
+ /* Input queue that holds all the parsed QMP requests */
+ GQueue *qmp_requests;
} MonitorQMP;
/*
int flags;
int suspend_cnt; /* Needs to be accessed atomically */
bool skip_flush;
- bool use_io_thr;
-
- QemuMutex out_lock;
- QString *outbuf;
- guint out_watch;
-
- /* Read under either BQL or out_lock, written with BQL+out_lock. */
- int mux_out;
+ bool use_io_thread;
+ /*
+ * State used only in the thread "owning" the monitor.
+ * If @use_io_thread, this is @mon_iothread.
+ * Else, it's the main thread.
+ * These members can be safely accessed without locks.
+ */
ReadLineState *rs;
+
MonitorQMP qmp;
gchar *mon_cpu_path;
BlockCompletionFunc *password_completion_cb;
void *password_opaque;
mon_cmd_t *cmd_table;
- QLIST_HEAD(,mon_fd_t) fds;
QTAILQ_ENTRY(Monitor) entry;
+
+ /*
+ * The per-monitor lock. We can't access guest memory when holding
+ * the lock.
+ */
+ QemuMutex mon_lock;
+
+ /*
+ * Members that are protected by the per-monitor lock
+ */
+ QLIST_HEAD(, mon_fd_t) fds;
+ QString *outbuf;
+ guint out_watch;
+ /* Read under either BQL or mon_lock, written with BQL+mon_lock. */
+ int mux_out;
};
-/* Let's add monitor global variables to this struct. */
-static struct {
- IOThread *mon_iothread;
-} mon_global;
+/* Shared monitor I/O thread */
+IOThread *mon_iothread;
+
+/* Bottom half to dispatch the requests received from I/O thread */
+QEMUBH *qmp_dispatcher_bh;
+
+struct QMPRequest {
+ /* Owner of the request */
+ Monitor *mon;
+ /* "id" field of the request */
+ QObject *id;
+ /*
+ * Request object to be handled or Error to be reported
+ * (exactly one of them is non-null)
+ */
+ QObject *req;
+ Error *err;
+};
+typedef struct QMPRequest QMPRequest;
/* QMP checker flags */
#define QMP_ACCEPT_UNKNOWNS 1
-/* Protects mon_list, monitor_event_state. */
+/* Protects mon_list, monitor_qapi_event_state. */
static QemuMutex monitor_lock;
-
+static GHashTable *monitor_qapi_event_state;
static QTAILQ_HEAD(mon_list, Monitor) mon_list;
+
+/* Protects mon_fdsets */
+static QemuMutex mon_fdsets_lock;
static QLIST_HEAD(mon_fdsets, MonFdset) mon_fdsets;
+
static int mon_refcount;
static mon_cmd_t mon_cmds[];
QmpCommandList qmp_commands, qmp_cap_negotiation_commands;
-Monitor *cur_mon;
-
-static QEMUClockType event_clock_type = QEMU_CLOCK_REALTIME;
+__thread Monitor *cur_mon;
static void monitor_command_cb(void *opaque, const char *cmdline,
void *readline_opaque);
return (mon->flags & MONITOR_USE_CONTROL);
}
+/**
+ * Is @mon is using readline?
+ * Note: not all HMP monitors use readline, e.g., gdbserver has a
+ * non-interactive HMP monitor, so readline is not used there.
+ */
+static inline bool monitor_uses_readline(const Monitor *mon)
+{
+ return mon->flags & MONITOR_USE_READLINE;
+}
+
+static inline bool monitor_is_hmp_non_interactive(const Monitor *mon)
+{
+ return !monitor_is_qmp(mon) && !monitor_uses_readline(mon);
+}
+
+/*
+ * Return the clock to use for recording an event's time.
+ * It's QEMU_CLOCK_REALTIME, except for qtests it's
+ * QEMU_CLOCK_VIRTUAL, to support testing rate limits.
+ * Beware: result is invalid before configure_accelerator().
+ */
+static inline QEMUClockType monitor_get_event_clock(void)
+{
+ return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME;
+}
+
/**
* Is the current monitor, if any, a QMP monitor?
*/
}
}
+static void qmp_request_free(QMPRequest *req)
+{
+ qobject_unref(req->id);
+ qobject_unref(req->req);
+ error_free(req->err);
+ g_free(req);
+}
+
+/* Caller must hold mon->qmp.qmp_queue_lock */
+static void monitor_qmp_cleanup_req_queue_locked(Monitor *mon)
+{
+ while (!g_queue_is_empty(mon->qmp.qmp_requests)) {
+ qmp_request_free(g_queue_pop_head(mon->qmp.qmp_requests));
+ }
+}
+
+static void monitor_qmp_cleanup_queues(Monitor *mon)
+{
+ qemu_mutex_lock(&mon->qmp.qmp_queue_lock);
+ monitor_qmp_cleanup_req_queue_locked(mon);
+ qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
+}
+
+
static void monitor_flush_locked(Monitor *mon);
static gboolean monitor_unblocked(GIOChannel *chan, GIOCondition cond,
{
Monitor *mon = opaque;
- qemu_mutex_lock(&mon->out_lock);
+ qemu_mutex_lock(&mon->mon_lock);
mon->out_watch = 0;
monitor_flush_locked(mon);
- qemu_mutex_unlock(&mon->out_lock);
+ qemu_mutex_unlock(&mon->mon_lock);
return FALSE;
}
-/* Called with mon->out_lock held. */
+/* Caller must hold mon->mon_lock */
static void monitor_flush_locked(Monitor *mon)
{
int rc;
rc = qemu_chr_fe_write(&mon->chr, (const uint8_t *) buf, len);
if ((rc < 0 && errno != EAGAIN) || (rc == len)) {
/* all flushed or error */
- QDECREF(mon->outbuf);
+ qobject_unref(mon->outbuf);
mon->outbuf = qstring_new();
return;
}
if (rc > 0) {
/* partial write */
QString *tmp = qstring_from_str(buf + rc);
- QDECREF(mon->outbuf);
+ qobject_unref(mon->outbuf);
mon->outbuf = tmp;
}
if (mon->out_watch == 0) {
void monitor_flush(Monitor *mon)
{
- qemu_mutex_lock(&mon->out_lock);
+ qemu_mutex_lock(&mon->mon_lock);
monitor_flush_locked(mon);
- qemu_mutex_unlock(&mon->out_lock);
+ qemu_mutex_unlock(&mon->mon_lock);
}
/* flush at every end of line */
{
char c;
- qemu_mutex_lock(&mon->out_lock);
+ qemu_mutex_lock(&mon->mon_lock);
for(;;) {
c = *str++;
if (c == '\0')
monitor_flush_locked(mon);
}
}
- qemu_mutex_unlock(&mon->out_lock);
+ qemu_mutex_unlock(&mon->mon_lock);
}
void monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
return 0;
}
-static void monitor_json_emitter(Monitor *mon, const QObject *data)
+static void qmp_send_response(Monitor *mon, const QDict *rsp)
{
+ const QObject *data = QOBJECT(rsp);
QString *json;
json = mon->flags & MONITOR_USE_PRETTY ? qobject_to_json_pretty(data) :
qstring_append_chr(json, '\n');
monitor_puts(mon, qstring_get_str(json));
- QDECREF(json);
+ qobject_unref(json);
}
static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
[QAPI_EVENT_VSERPORT_CHANGE] = { 1000 * SCALE_MS },
};
-GHashTable *monitor_qapi_event_state;
-
/*
- * Emits the event to every monitor instance, @event is only used for trace
- * Called with monitor_lock held.
+ * Broadcast an event to all monitors.
+ * @qdict is the event object. Its member "event" must match @event.
+ * Caller must hold monitor_lock.
*/
static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict)
{
QTAILQ_FOREACH(mon, &mon_list, entry) {
if (monitor_is_qmp(mon)
&& mon->qmp.commands != &qmp_cap_negotiation_commands) {
- monitor_json_emitter(mon, QOBJECT(qdict));
+ qmp_send_response(mon, qdict);
}
}
}
* applying any rate limiting if required.
*/
static void
-monitor_qapi_event_queue(QAPIEvent event, QDict *qdict, Error **errp)
+monitor_qapi_event_queue_no_reenter(QAPIEvent event, QDict *qdict)
{
MonitorQAPIEventConf *evconf;
MonitorQAPIEventState *evstate;
* last send. Store event for sending when timer fires,
* replacing a prior stored event if any.
*/
- QDECREF(evstate->qdict);
- evstate->qdict = qdict;
- QINCREF(evstate->qdict);
+ qobject_unref(evstate->qdict);
+ evstate->qdict = qobject_ref(qdict);
} else {
/*
* Last send was (at least) evconf->rate ns ago.
* monitor_qapi_event_handler() in evconf->rate ns. Any
* events arriving before then will be delayed until then.
*/
- int64_t now = qemu_clock_get_ns(event_clock_type);
+ int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
monitor_qapi_event_emit(event, qdict);
evstate = g_new(MonitorQAPIEventState, 1);
evstate->event = event;
- evstate->data = data;
- QINCREF(evstate->data);
+ evstate->data = qobject_ref(data);
evstate->qdict = NULL;
- evstate->timer = timer_new_ns(event_clock_type,
+ evstate->timer = timer_new_ns(monitor_get_event_clock(),
monitor_qapi_event_handler,
evstate);
g_hash_table_add(monitor_qapi_event_state, evstate);
qemu_mutex_unlock(&monitor_lock);
}
+static void
+monitor_qapi_event_queue(QAPIEvent event, QDict *qdict)
+{
+ /*
+ * monitor_qapi_event_queue_no_reenter() is not reentrant: it
+ * would deadlock on monitor_lock. Work around by queueing
+ * events in thread-local storage.
+ * TODO: remove this, make it re-enter safe.
+ */
+ typedef struct MonitorQapiEvent {
+ QAPIEvent event;
+ QDict *qdict;
+ QSIMPLEQ_ENTRY(MonitorQapiEvent) entry;
+ } MonitorQapiEvent;
+ static __thread QSIMPLEQ_HEAD(, MonitorQapiEvent) event_queue;
+ static __thread bool reentered;
+ MonitorQapiEvent *ev;
+
+ if (!reentered) {
+ QSIMPLEQ_INIT(&event_queue);
+ }
+
+ ev = g_new(MonitorQapiEvent, 1);
+ ev->qdict = qobject_ref(qdict);
+ ev->event = event;
+ QSIMPLEQ_INSERT_TAIL(&event_queue, ev, entry);
+ if (reentered) {
+ return;
+ }
+
+ reentered = true;
+
+ while ((ev = QSIMPLEQ_FIRST(&event_queue)) != NULL) {
+ QSIMPLEQ_REMOVE_HEAD(&event_queue, entry);
+ monitor_qapi_event_queue_no_reenter(ev->event, ev->qdict);
+ qobject_unref(ev->qdict);
+ g_free(ev);
+ }
+
+ reentered = false;
+}
+
/*
* This function runs evconf->rate ns after sending a throttled
* event.
qemu_mutex_lock(&monitor_lock);
if (evstate->qdict) {
- int64_t now = qemu_clock_get_ns(event_clock_type);
+ int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
monitor_qapi_event_emit(evstate->event, evstate->qdict);
- QDECREF(evstate->qdict);
+ qobject_unref(evstate->qdict);
evstate->qdict = NULL;
timer_mod_ns(evstate->timer, now + evconf->rate);
} else {
g_hash_table_remove(monitor_qapi_event_state, evstate);
- QDECREF(evstate->data);
+ qobject_unref(evstate->data);
timer_free(evstate->timer);
g_free(evstate);
}
static void monitor_qapi_event_init(void)
{
- if (qtest_enabled()) {
- event_clock_type = QEMU_CLOCK_VIRTUAL;
- }
-
monitor_qapi_event_state = g_hash_table_new(qapi_event_throttle_hash,
qapi_event_throttle_equal);
qmp_event_set_func_emit(monitor_qapi_event_queue);
static void handle_hmp_command(Monitor *mon, const char *cmdline);
static void monitor_data_init(Monitor *mon, bool skip_flush,
- bool use_io_thr)
+ bool use_io_thread)
{
memset(mon, 0, sizeof(Monitor));
- qemu_mutex_init(&mon->out_lock);
+ qemu_mutex_init(&mon->mon_lock);
+ qemu_mutex_init(&mon->qmp.qmp_queue_lock);
mon->outbuf = qstring_new();
/* Use *mon_cmds by default. */
mon->cmd_table = mon_cmds;
mon->skip_flush = skip_flush;
- mon->use_io_thr = use_io_thr;
+ mon->use_io_thread = use_io_thread;
+ mon->qmp.qmp_requests = g_queue_new();
}
static void monitor_data_destroy(Monitor *mon)
json_message_parser_destroy(&mon->qmp.parser);
}
readline_free(mon->rs);
- QDECREF(mon->outbuf);
- qemu_mutex_destroy(&mon->out_lock);
+ qobject_unref(mon->outbuf);
+ qemu_mutex_destroy(&mon->mon_lock);
+ qemu_mutex_destroy(&mon->qmp.qmp_queue_lock);
+ monitor_qmp_cleanup_req_queue_locked(mon);
+ g_queue_free(mon->qmp.qmp_requests);
}
char *qmp_human_monitor_command(const char *command_line, bool has_cpu_index,
handle_hmp_command(&hmp, command_line);
cur_mon = old_mon;
- qemu_mutex_lock(&hmp.out_lock);
+ qemu_mutex_lock(&hmp.mon_lock);
if (qstring_get_length(hmp.outbuf) > 0) {
output = g_strdup(qstring_get_str(hmp.outbuf));
} else {
output = g_strdup("");
}
- qemu_mutex_unlock(&hmp.out_lock);
+ qemu_mutex_unlock(&hmp.mon_lock);
out:
monitor_data_destroy(&hmp);
p = list;
for(;;) {
pstart = p;
- p = strchr(p, '|');
- if (!p)
- p = pstart + strlen(pstart);
+ p = qemu_strchrnul(p, '|');
if ((p - pstart) == len && !memcmp(pstart, name, len))
return 1;
if (*p == '\0')
return -1;
}
+/*
+ * Can command @cmd be executed in preconfig state?
+ */
+static bool cmd_can_preconfig(const mon_cmd_t *cmd)
+{
+ if (!cmd->flags) {
+ return false;
+ }
+
+ return strchr(cmd->flags, 'p');
+}
+
static void help_cmd_dump_one(Monitor *mon,
const mon_cmd_t *cmd,
char **prefix_args,
{
int i;
+ if (runstate_check(RUN_STATE_PRECONFIG) && !cmd_can_preconfig(cmd)) {
+ return;
+ }
+
for (i = 0; i < prefix_args_nb; i++) {
monitor_printf(mon, "%s ", prefix_args[i]);
}
char **args, int nb_args, int arg_index)
{
const mon_cmd_t *cmd;
+ size_t i;
/* No valid arg need to compare with, dump all in *cmds */
if (arg_index >= nb_args) {
/* Find one entry to dump */
for (cmd = cmds; cmd->name != NULL; cmd++) {
- if (compare_cmd(args[arg_index], cmd->name)) {
+ if (compare_cmd(args[arg_index], cmd->name) &&
+ ((!runstate_check(RUN_STATE_PRECONFIG) ||
+ cmd_can_preconfig(cmd)))) {
if (cmd->sub_table) {
/* continue with next arg */
help_cmd_dump(mon, cmd->sub_table,
} else {
help_cmd_dump_one(mon, cmd, args, arg_index);
}
- break;
+ return;
}
}
+
+ /* Command not found */
+ monitor_printf(mon, "unknown command: '");
+ for (i = 0; i <= arg_index; i++) {
+ monitor_printf(mon, "%s%s", args[i], i == arg_index ? "'\n" : " ");
+ }
}
static void help_cmd(Monitor *mon, const char *name)
*/
static void qmp_unregister_commands_hack(void)
{
-#ifndef CONFIG_SPICE
- qmp_unregister_command(&qmp_commands, "query-spice");
-#endif
#ifndef CONFIG_REPLICATION
qmp_unregister_command(&qmp_commands, "xen-set-replication");
qmp_unregister_command(&qmp_commands, "query-xen-replication-status");
qmp_init_marshal(&qmp_commands);
qmp_register_command(&qmp_commands, "query-qmp-schema",
- qmp_query_qmp_schema,
- QCO_NO_OPTIONS);
+ qmp_query_qmp_schema, QCO_ALLOW_PRECONFIG);
qmp_register_command(&qmp_commands, "device_add", qmp_device_add,
QCO_NO_OPTIONS);
qmp_register_command(&qmp_commands, "netdev_add", qmp_netdev_add,
QTAILQ_INIT(&qmp_cap_negotiation_commands);
qmp_register_command(&qmp_cap_negotiation_commands, "qmp_capabilities",
- qmp_marshal_qmp_capabilities, QCO_NO_OPTIONS);
+ qmp_marshal_qmp_capabilities, QCO_ALLOW_PRECONFIG);
+}
+
+static bool qmp_oob_enabled(Monitor *mon)
+{
+ return mon->qmp.capab[QMP_CAPABILITY_OOB];
+}
+
+static void monitor_qmp_caps_reset(Monitor *mon)
+{
+ memset(mon->qmp.capab_offered, 0, sizeof(mon->qmp.capab_offered));
+ memset(mon->qmp.capab, 0, sizeof(mon->qmp.capab));
+ mon->qmp.capab_offered[QMP_CAPABILITY_OOB] = mon->use_io_thread;
}
-static void qmp_caps_check(Monitor *mon, QMPCapabilityList *list,
- Error **errp)
+/*
+ * Accept QMP capabilities in @list for @mon.
+ * On success, set mon->qmp.capab[], and return true.
+ * On error, set @errp, and return false.
+ */
+static bool qmp_caps_accept(Monitor *mon, QMPCapabilityList *list,
+ Error **errp)
{
+ GString *unavailable = NULL;
+ bool capab[QMP_CAPABILITY__MAX];
+
+ memset(capab, 0, sizeof(capab));
+
for (; list; list = list->next) {
- assert(list->value < QMP_CAPABILITY__MAX);
- switch (list->value) {
- case QMP_CAPABILITY_OOB:
- if (!mon->use_io_thr) {
- /*
- * Out-Of-Band only works with monitors that are
- * running on dedicated IOThread.
- */
- error_setg(errp, "This monitor does not support "
- "Out-Of-Band (OOB)");
- return;
+ if (!mon->qmp.capab_offered[list->value]) {
+ if (!unavailable) {
+ unavailable = g_string_new(QMPCapability_str(list->value));
+ } else {
+ g_string_append_printf(unavailable, ", %s",
+ QMPCapability_str(list->value));
}
- break;
- default:
- break;
}
+ capab[list->value] = true;
}
-}
-/* This function should only be called after capabilities are checked. */
-static void qmp_caps_apply(Monitor *mon, QMPCapabilityList *list)
-{
- for (; list; list = list->next) {
- mon->qmp.qmp_caps[list->value] = true;
+ if (unavailable) {
+ error_setg(errp, "Capability %s not available", unavailable->str);
+ g_string_free(unavailable, true);
+ return false;
}
+
+ memcpy(mon->qmp.capab, capab, sizeof(capab));
+ return true;
}
void qmp_qmp_capabilities(bool has_enable, QMPCapabilityList *enable,
Error **errp)
{
- Error *local_err = NULL;
-
if (cur_mon->qmp.commands == &qmp_commands) {
error_set(errp, ERROR_CLASS_COMMAND_NOT_FOUND,
"Capabilities negotiation is already complete, command "
return;
}
- /* Enable QMP capabilities provided by the client if applicable. */
- if (has_enable) {
- qmp_caps_check(cur_mon, enable, &local_err);
- if (local_err) {
- /*
- * Failed check on any of the capabilities will fail the
- * entire command (and thus not apply any of the other
- * capabilities that were also requested).
- */
- error_propagate(errp, local_err);
- return;
- }
- qmp_caps_apply(cur_mon, enable);
+ if (!qmp_caps_accept(cur_mon, enable, errp)) {
+ return;
}
cur_mon->qmp.commands = &qmp_commands;
}
-/* set the current CPU defined by the user */
+/* Set the current CPU defined by the user. Callers must hold BQL. */
int monitor_set_cpu(int cpu_index)
{
CPUState *cpu;
return 0;
}
+/* Callers must hold BQL. */
static CPUState *mon_get_cpu_sync(bool synchronize)
{
CPUState *cpu;
}
#endif
+static void hmp_info_sync_profile(Monitor *mon, const QDict *qdict)
+{
+ int64_t max = qdict_get_try_int(qdict, "max", 10);
+ bool mean = qdict_get_try_bool(qdict, "mean", false);
+ bool coalesce = !qdict_get_try_bool(qdict, "no_coalesce", false);
+ enum QSPSortBy sort_by;
+
+ sort_by = mean ? QSP_SORT_BY_AVG_WAIT_TIME : QSP_SORT_BY_TOTAL_WAIT_TIME;
+ qsp_report((FILE *)mon, monitor_fprintf, max, sort_by, coalesce);
+}
+
static void hmp_info_history(Monitor *mon, const QDict *qdict)
{
int i;
{
bool flatview = qdict_get_try_bool(qdict, "flatview", false);
bool dispatch_tree = qdict_get_try_bool(qdict, "dispatch_tree", false);
+ bool owner = qdict_get_try_bool(qdict, "owner", false);
- mtree_info((fprintf_function)monitor_printf, mon, flatview, dispatch_tree);
+ mtree_info((fprintf_function)monitor_printf, mon, flatview, dispatch_tree,
+ owner);
}
static void hmp_info_numa(Monitor *mon, const QDict *qdict)
void qmp_getfd(const char *fdname, Error **errp)
{
mon_fd_t *monfd;
- int fd;
+ int fd, tmp_fd;
fd = qemu_chr_fe_get_msgfd(&cur_mon->chr);
if (fd == -1) {
return;
}
+ qemu_mutex_lock(&cur_mon->mon_lock);
QLIST_FOREACH(monfd, &cur_mon->fds, next) {
if (strcmp(monfd->name, fdname) != 0) {
continue;
}
- close(monfd->fd);
+ tmp_fd = monfd->fd;
monfd->fd = fd;
+ qemu_mutex_unlock(&cur_mon->mon_lock);
+ /* Make sure close() is outside critical section */
+ close(tmp_fd);
return;
}
monfd->fd = fd;
QLIST_INSERT_HEAD(&cur_mon->fds, monfd, next);
+ qemu_mutex_unlock(&cur_mon->mon_lock);
}
void qmp_closefd(const char *fdname, Error **errp)
{
mon_fd_t *monfd;
+ int tmp_fd;
+ qemu_mutex_lock(&cur_mon->mon_lock);
QLIST_FOREACH(monfd, &cur_mon->fds, next) {
if (strcmp(monfd->name, fdname) != 0) {
continue;
}
QLIST_REMOVE(monfd, next);
- close(monfd->fd);
+ tmp_fd = monfd->fd;
g_free(monfd->name);
g_free(monfd);
+ qemu_mutex_unlock(&cur_mon->mon_lock);
+ /* Make sure close() is outside critical section */
+ close(tmp_fd);
return;
}
+ qemu_mutex_unlock(&cur_mon->mon_lock);
error_setg(errp, QERR_FD_NOT_FOUND, fdname);
}
{
mon_fd_t *monfd;
+ qemu_mutex_lock(&mon->mon_lock);
QLIST_FOREACH(monfd, &mon->fds, next) {
int fd;
QLIST_REMOVE(monfd, next);
g_free(monfd->name);
g_free(monfd);
+ qemu_mutex_unlock(&mon->mon_lock);
return fd;
}
+ qemu_mutex_unlock(&mon->mon_lock);
error_setg(errp, "File descriptor named '%s' has not been found", fdname);
return -1;
}
MonFdset *mon_fdset;
MonFdset *mon_fdset_next;
+ qemu_mutex_lock(&mon_fdsets_lock);
QLIST_FOREACH_SAFE(mon_fdset, &mon_fdsets, next, mon_fdset_next) {
monitor_fdset_cleanup(mon_fdset);
}
+ qemu_mutex_unlock(&mon_fdsets_lock);
}
AddfdInfo *qmp_add_fd(bool has_fdset_id, int64_t fdset_id, bool has_opaque,
MonFdsetFd *mon_fdset_fd;
char fd_str[60];
+ qemu_mutex_lock(&mon_fdsets_lock);
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
if (mon_fdset->id != fdset_id) {
continue;
goto error;
}
monitor_fdset_cleanup(mon_fdset);
+ qemu_mutex_unlock(&mon_fdsets_lock);
return;
}
error:
+ qemu_mutex_unlock(&mon_fdsets_lock);
if (has_fd) {
snprintf(fd_str, sizeof(fd_str), "fdset-id:%" PRId64 ", fd:%" PRId64,
fdset_id, fd);
MonFdsetFd *mon_fdset_fd;
FdsetInfoList *fdset_list = NULL;
+ qemu_mutex_lock(&mon_fdsets_lock);
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
FdsetInfoList *fdset_info = g_malloc0(sizeof(*fdset_info));
FdsetFdInfoList *fdsetfd_list = NULL;
fdset_info->next = fdset_list;
fdset_list = fdset_info;
}
+ qemu_mutex_unlock(&mon_fdsets_lock);
return fdset_list;
}
MonFdsetFd *mon_fdset_fd;
AddfdInfo *fdinfo;
+ qemu_mutex_lock(&mon_fdsets_lock);
if (has_fdset_id) {
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
/* Break if match found or match impossible due to ordering by ID */
if (fdset_id < 0) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdset-id",
"a non-negative value");
+ qemu_mutex_unlock(&mon_fdsets_lock);
return NULL;
}
/* Use specified fdset ID */
fdinfo->fdset_id = mon_fdset->id;
fdinfo->fd = mon_fdset_fd->fd;
+ qemu_mutex_unlock(&mon_fdsets_lock);
return fdinfo;
}
int monitor_fdset_get_fd(int64_t fdset_id, int flags)
{
-#ifndef _WIN32
+#ifdef _WIN32
+ return -ENOENT;
+#else
MonFdset *mon_fdset;
MonFdsetFd *mon_fdset_fd;
int mon_fd_flags;
+ int ret;
+ qemu_mutex_lock(&mon_fdsets_lock);
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
if (mon_fdset->id != fdset_id) {
continue;
QLIST_FOREACH(mon_fdset_fd, &mon_fdset->fds, next) {
mon_fd_flags = fcntl(mon_fdset_fd->fd, F_GETFL);
if (mon_fd_flags == -1) {
- return -1;
+ ret = -errno;
+ goto out;
}
if ((flags & O_ACCMODE) == (mon_fd_flags & O_ACCMODE)) {
- return mon_fdset_fd->fd;
+ ret = mon_fdset_fd->fd;
+ goto out;
}
}
- errno = EACCES;
- return -1;
+ ret = -EACCES;
+ goto out;
}
-#endif
+ ret = -ENOENT;
- errno = ENOENT;
- return -1;
+out:
+ qemu_mutex_unlock(&mon_fdsets_lock);
+ return ret;
+#endif
}
int monitor_fdset_dup_fd_add(int64_t fdset_id, int dup_fd)
MonFdset *mon_fdset;
MonFdsetFd *mon_fdset_fd_dup;
+ qemu_mutex_lock(&mon_fdsets_lock);
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
if (mon_fdset->id != fdset_id) {
continue;
}
QLIST_FOREACH(mon_fdset_fd_dup, &mon_fdset->dup_fds, next) {
if (mon_fdset_fd_dup->fd == dup_fd) {
- return -1;
+ goto err;
}
}
mon_fdset_fd_dup = g_malloc0(sizeof(*mon_fdset_fd_dup));
mon_fdset_fd_dup->fd = dup_fd;
QLIST_INSERT_HEAD(&mon_fdset->dup_fds, mon_fdset_fd_dup, next);
+ qemu_mutex_unlock(&mon_fdsets_lock);
return 0;
}
+
+err:
+ qemu_mutex_unlock(&mon_fdsets_lock);
return -1;
}
MonFdset *mon_fdset;
MonFdsetFd *mon_fdset_fd_dup;
+ qemu_mutex_lock(&mon_fdsets_lock);
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
QLIST_FOREACH(mon_fdset_fd_dup, &mon_fdset->dup_fds, next) {
if (mon_fdset_fd_dup->fd == dup_fd) {
if (QLIST_EMPTY(&mon_fdset->dup_fds)) {
monitor_fdset_cleanup(mon_fdset);
}
- return -1;
+ goto err;
} else {
+ qemu_mutex_unlock(&mon_fdsets_lock);
return mon_fdset->id;
}
}
}
}
+
+err:
+ qemu_mutex_unlock(&mon_fdsets_lock);
return -1;
}
(int)(p - cmdp_start), cmdp_start);
return NULL;
}
+ if (runstate_check(RUN_STATE_PRECONFIG) && !cmd_can_preconfig(cmd)) {
+ monitor_printf(mon, "Command '%.*s' not available with -preconfig "
+ "until after exit_preconfig.\n",
+ (int)(p - cmdp_start), cmdp_start);
+ return NULL;
+ }
/* filter out following useless space */
while (qemu_isspace(*p)) {
monitor_printf(mon, "enter a positive value\n");
goto fail;
}
- val <<= 20;
+ val *= MiB;
}
qdict_put_int(qdict, key, val);
}
return qdict;
fail:
- QDECREF(qdict);
+ qobject_unref(qdict);
g_free(key);
return NULL;
}
{
QDict *qdict;
const mon_cmd_t *cmd;
+ const char *cmd_start = cmdline;
trace_handle_hmp_command(mon, cmdline);
qdict = monitor_parse_arguments(mon, &cmdline, cmd);
if (!qdict) {
- monitor_printf(mon, "Try \"help %s\" for more information\n",
- cmd->name);
+ while (cmdline > cmd_start && qemu_isspace(cmdline[-1])) {
+ cmdline--;
+ }
+ monitor_printf(mon, "Try \"help %.*s\" for more information\n",
+ (int)(cmdline - cmd_start), cmd_start);
return;
}
cmd->cmd(mon, qdict);
- QDECREF(qdict);
+ qobject_unref(qdict);
}
static void cmd_completion(Monitor *mon, const char *name, const char *list)
p = list;
for(;;) {
pstart = p;
- p = strchr(p, '|');
- if (!p)
- p = pstart + strlen(pstart);
+ p = qemu_strchrnul(p, '|');
len = p - pstart;
if (len > sizeof(cmd) - 2)
len = sizeof(cmd) - 2;
cmdname = args[0];
readline_set_completion_index(mon->rs, strlen(cmdname));
for (cmd = cmd_table; cmd->name != NULL; cmd++) {
- cmd_completion(mon, cmdname, cmd->name);
+ if (!runstate_check(RUN_STATE_PRECONFIG) ||
+ cmd_can_preconfig(cmd)) {
+ cmd_completion(mon, cmdname, cmd->name);
+ }
}
} else {
/* find the command */
for (cmd = cmd_table; cmd->name != NULL; cmd++) {
- if (compare_cmd(args[0], cmd->name)) {
+ if (compare_cmd(args[0], cmd->name) &&
+ (!runstate_check(RUN_STATE_PRECONFIG) ||
+ cmd_can_preconfig(cmd))) {
break;
}
}
}
/*
- * 1. This function takes ownership of rsp, err, and id.
- * 2. rsp, err, and id may be NULL.
- * 3. If err != NULL then rsp must be NULL.
+ * Emit QMP response @rsp with ID @id to @mon.
+ * Null @rsp can only happen for commands with QCO_NO_SUCCESS_RESP.
+ * Nothing is emitted then.
*/
-static void monitor_qmp_respond(Monitor *mon, QObject *rsp,
- Error *err, QObject *id)
+static void monitor_qmp_respond(Monitor *mon, QDict *rsp, QObject *id)
{
- QDict *qdict = NULL;
+ if (rsp) {
+ if (id) {
+ qdict_put_obj(rsp, "id", qobject_ref(id));
+ }
- if (err) {
- assert(!rsp);
- qdict = qdict_new();
- qdict_put_obj(qdict, "error", qmp_build_error_object(err));
- error_free(err);
- rsp = QOBJECT(qdict);
+ qmp_send_response(mon, rsp);
}
+}
- if (rsp) {
- if (id) {
- /* This is for the qdict below. */
- qobject_incref(id);
- qdict_put_obj(qobject_to(QDict, rsp), "id", id);
+static void monitor_qmp_dispatch(Monitor *mon, QObject *req, QObject *id)
+{
+ Monitor *old_mon;
+ QDict *rsp;
+ QDict *error;
+
+ old_mon = cur_mon;
+ cur_mon = mon;
+
+ rsp = qmp_dispatch(mon->qmp.commands, req, qmp_oob_enabled(mon));
+
+ cur_mon = old_mon;
+
+ if (mon->qmp.commands == &qmp_cap_negotiation_commands) {
+ error = qdict_get_qdict(rsp, "error");
+ if (error
+ && !g_strcmp0(qdict_get_try_str(error, "class"),
+ QapiErrorClass_str(ERROR_CLASS_COMMAND_NOT_FOUND))) {
+ /* Provide a more useful error message */
+ qdict_del(error, "desc");
+ qdict_put_str(error, "desc", "Expecting capabilities negotiation"
+ " with 'qmp_capabilities'");
+ }
+ }
+
+ monitor_qmp_respond(mon, rsp, id);
+ qobject_unref(rsp);
+}
+
+/*
+ * Pop a QMP request from a monitor request queue.
+ * Return the request, or NULL all request queues are empty.
+ * We are using round-robin fashion to pop the request, to avoid
+ * processing commands only on a very busy monitor. To achieve that,
+ * when we process one request on a specific monitor, we put that
+ * monitor to the end of mon_list queue.
+ */
+static QMPRequest *monitor_qmp_requests_pop_any(void)
+{
+ QMPRequest *req_obj = NULL;
+ Monitor *mon;
+
+ qemu_mutex_lock(&monitor_lock);
+
+ QTAILQ_FOREACH(mon, &mon_list, entry) {
+ qemu_mutex_lock(&mon->qmp.qmp_queue_lock);
+ req_obj = g_queue_pop_head(mon->qmp.qmp_requests);
+ qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
+ if (req_obj) {
+ break;
}
+ }
- monitor_json_emitter(mon, rsp);
+ if (req_obj) {
+ /*
+ * We found one request on the monitor. Degrade this monitor's
+ * priority to lowest by re-inserting it to end of queue.
+ */
+ QTAILQ_REMOVE(&mon_list, mon, entry);
+ QTAILQ_INSERT_TAIL(&mon_list, mon, entry);
}
- qobject_decref(id);
- qobject_decref(rsp);
+ qemu_mutex_unlock(&monitor_lock);
+
+ return req_obj;
}
-static void handle_qmp_command(JSONMessageParser *parser, GQueue *tokens)
+static void monitor_qmp_bh_dispatcher(void *data)
{
- QObject *req, *rsp = NULL, *id = NULL;
- QDict *qdict = NULL;
- MonitorQMP *mon_qmp = container_of(parser, MonitorQMP, parser);
- Monitor *old_mon, *mon = container_of(mon_qmp, Monitor, qmp);
+ QMPRequest *req_obj = monitor_qmp_requests_pop_any();
+ QDict *rsp;
+ bool need_resume;
- Error *err = NULL;
+ if (!req_obj) {
+ return;
+ }
- req = json_parser_parse_err(tokens, NULL, &err);
- if (!req && !err) {
- /* json_parser_parse_err() sucks: can fail without setting @err */
- error_setg(&err, QERR_JSON_PARSING);
+ /* qmp_oob_enabled() might change after "qmp_capabilities" */
+ need_resume = !qmp_oob_enabled(req_obj->mon);
+ if (req_obj->req) {
+ trace_monitor_qmp_cmd_in_band(qobject_get_try_str(req_obj->id) ?: "");
+ monitor_qmp_dispatch(req_obj->mon, req_obj->req, req_obj->id);
+ } else {
+ assert(req_obj->err);
+ rsp = qmp_error_response(req_obj->err);
+ req_obj->err = NULL;
+ monitor_qmp_respond(req_obj->mon, rsp, NULL);
+ qobject_unref(rsp);
}
- if (err) {
- goto err_out;
+
+ if (need_resume) {
+ /* Pairs with the monitor_suspend() in handle_qmp_command() */
+ monitor_resume(req_obj->mon);
}
+ qmp_request_free(req_obj);
+
+ /* Reschedule instead of looping so the main loop stays responsive */
+ qemu_bh_schedule(qmp_dispatcher_bh);
+}
+
+#define QMP_REQ_QUEUE_LEN_MAX (8)
+
+static void handle_qmp_command(void *opaque, QObject *req, Error *err)
+{
+ Monitor *mon = opaque;
+ QObject *id = NULL;
+ QDict *qdict;
+ QMPRequest *req_obj;
+
+ assert(!req != !err);
qdict = qobject_to(QDict, req);
if (qdict) {
- id = qdict_get(qdict, "id");
- qobject_incref(id);
+ id = qobject_ref(qdict_get(qdict, "id"));
qdict_del(qdict, "id");
} /* else will fail qmp_dispatch() */
- if (trace_event_get_state_backends(TRACE_HANDLE_QMP_COMMAND)) {
+ if (req && trace_event_get_state_backends(TRACE_HANDLE_QMP_COMMAND)) {
QString *req_json = qobject_to_json(req);
trace_handle_qmp_command(mon, qstring_get_str(req_json));
- QDECREF(req_json);
+ qobject_unref(req_json);
}
- old_mon = cur_mon;
- cur_mon = mon;
+ if (qdict && qmp_is_oob(qdict)) {
+ /* OOB commands are executed immediately */
+ trace_monitor_qmp_cmd_out_of_band(qobject_get_try_str(id)
+ ?: "");
+ monitor_qmp_dispatch(mon, req, id);
+ qobject_unref(req);
+ qobject_unref(id);
+ return;
+ }
- rsp = qmp_dispatch(cur_mon->qmp.commands, req);
+ req_obj = g_new0(QMPRequest, 1);
+ req_obj->mon = mon;
+ req_obj->id = id;
+ req_obj->req = req;
+ req_obj->err = err;
- cur_mon = old_mon;
+ /* Protect qmp_requests and fetching its length. */
+ qemu_mutex_lock(&mon->qmp.qmp_queue_lock);
- if (mon->qmp.commands == &qmp_cap_negotiation_commands) {
- qdict = qdict_get_qdict(qobject_to(QDict, rsp), "error");
- if (qdict
- && !g_strcmp0(qdict_get_try_str(qdict, "class"),
- QapiErrorClass_str(ERROR_CLASS_COMMAND_NOT_FOUND))) {
- /* Provide a more useful error message */
- qdict_del(qdict, "desc");
- qdict_put_str(qdict, "desc", "Expecting capabilities negotiation"
- " with 'qmp_capabilities'");
+ /*
+ * If OOB is not enabled on the current monitor, we'll emulate the
+ * old behavior that we won't process the current monitor any more
+ * until it has responded. This helps make sure that as long as
+ * OOB is not enabled, the server will never drop any command.
+ */
+ if (!qmp_oob_enabled(mon)) {
+ monitor_suspend(mon);
+ } else {
+ /* Drop the request if queue is full. */
+ if (mon->qmp.qmp_requests->length >= QMP_REQ_QUEUE_LEN_MAX) {
+ qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
+ /*
+ * FIXME @id's scope is just @mon, and broadcasting it is
+ * wrong. If another monitor's client has a command with
+ * the same ID in flight, the event will incorrectly claim
+ * that command was dropped.
+ */
+ qapi_event_send_command_dropped(id,
+ COMMAND_DROP_REASON_QUEUE_FULL);
+ qmp_request_free(req_obj);
+ return;
}
}
-err_out:
- monitor_qmp_respond(mon, rsp, err, id);
+ /*
+ * Put the request to the end of queue so that requests will be
+ * handled in time order. Ownership for req_obj, req, id,
+ * etc. will be delivered to the handler side.
+ */
+ g_queue_push_tail(mon->qmp.qmp_requests, req_obj);
+ qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
- qobject_decref(req);
+ /* Kick the dispatcher routine */
+ qemu_bh_schedule(qmp_dispatcher_bh);
}
static void monitor_qmp_read(void *opaque, const uint8_t *buf, int size)
int monitor_suspend(Monitor *mon)
{
- if (!mon->rs)
+ if (monitor_is_hmp_non_interactive(mon)) {
return -ENOTTY;
+ }
+
atomic_inc(&mon->suspend_cnt);
+
+ if (monitor_is_qmp(mon)) {
+ /*
+ * Kick I/O thread to make sure this takes effect. It'll be
+ * evaluated again in prepare() of the watch object.
+ */
+ aio_notify(iothread_get_aio_context(mon_iothread));
+ }
+
+ trace_monitor_suspend(mon, 1);
return 0;
}
void monitor_resume(Monitor *mon)
{
- if (!mon->rs)
+ if (monitor_is_hmp_non_interactive(mon)) {
return;
+ }
+
if (atomic_dec_fetch(&mon->suspend_cnt) == 0) {
- readline_show_prompt(mon->rs);
+ if (monitor_is_qmp(mon)) {
+ /*
+ * For QMP monitors that are running in the I/O thread,
+ * let's kick the thread in case it's sleeping.
+ */
+ if (mon->use_io_thread) {
+ aio_notify(iothread_get_aio_context(mon_iothread));
+ }
+ } else {
+ assert(mon->rs);
+ readline_show_prompt(mon->rs);
+ }
+ qemu_chr_fe_accept_input(&mon->chr);
}
+ trace_monitor_suspend(mon, -1);
}
-static QObject *get_qmp_greeting(Monitor *mon)
+static QDict *qmp_greeting(Monitor *mon)
{
QList *cap_list = qlist_new();
QObject *ver = NULL;
qmp_marshal_query_version(NULL, &ver, NULL);
for (cap = 0; cap < QMP_CAPABILITY__MAX; cap++) {
- if (!mon->use_io_thr && cap == QMP_CAPABILITY_OOB) {
- /* Monitors that are not using IOThread won't support OOB */
- continue;
+ if (mon->qmp.capab_offered[cap]) {
+ qlist_append_str(cap_list, QMPCapability_str(cap));
}
- qlist_append(cap_list, qstring_from_str(QMPCapability_str(cap)));
}
- return qobject_from_jsonf("{'QMP': {'version': %p, 'capabilities': %p}}",
- ver, cap_list);
-}
-
-static void monitor_qmp_caps_reset(Monitor *mon)
-{
- memset(mon->qmp.qmp_caps, 0, sizeof(mon->qmp.qmp_caps));
+ return qdict_from_jsonf_nofail(
+ "{'QMP': {'version': %p, 'capabilities': %p}}",
+ ver, cap_list);
}
static void monitor_qmp_event(void *opaque, int event)
{
- QObject *data;
+ QDict *data;
Monitor *mon = opaque;
switch (event) {
case CHR_EVENT_OPENED:
mon->qmp.commands = &qmp_cap_negotiation_commands;
monitor_qmp_caps_reset(mon);
- data = get_qmp_greeting(mon);
- monitor_json_emitter(mon, data);
- qobject_decref(data);
+ data = qmp_greeting(mon);
+ qmp_send_response(mon, data);
+ qobject_unref(data);
mon_refcount++;
break;
case CHR_EVENT_CLOSED:
+ /*
+ * Note: this is only useful when the output of the chardev
+ * backend is still open. For example, when the backend is
+ * stdio, it's possible that stdout is still open when stdin
+ * is closed.
+ */
+ monitor_qmp_cleanup_queues(mon);
json_message_parser_destroy(&mon->qmp.parser);
- json_message_parser_init(&mon->qmp.parser, handle_qmp_command);
+ json_message_parser_init(&mon->qmp.parser, handle_qmp_command,
+ mon, NULL);
mon_refcount--;
monitor_fdsets_cleanup();
break;
switch (event) {
case CHR_EVENT_MUX_IN:
- qemu_mutex_lock(&mon->out_lock);
+ qemu_mutex_lock(&mon->mon_lock);
mon->mux_out = 0;
- qemu_mutex_unlock(&mon->out_lock);
+ qemu_mutex_unlock(&mon->mon_lock);
if (mon->reset_seen) {
readline_restart(mon->rs);
monitor_resume(mon);
} else {
atomic_inc(&mon->suspend_cnt);
}
- qemu_mutex_lock(&mon->out_lock);
+ qemu_mutex_lock(&mon->mon_lock);
mon->mux_out = 1;
- qemu_mutex_unlock(&mon->out_lock);
+ qemu_mutex_unlock(&mon->mon_lock);
break;
case CHR_EVENT_OPENED:
static GMainContext *monitor_get_io_context(void)
{
- return iothread_get_g_main_context(mon_global.mon_iothread);
+ return iothread_get_g_main_context(mon_iothread);
}
static AioContext *monitor_get_aio_context(void)
{
- return iothread_get_aio_context(mon_global.mon_iothread);
+ return iothread_get_aio_context(mon_iothread);
}
static void monitor_iothread_init(void)
{
- mon_global.mon_iothread = iothread_create("mon_iothread",
- &error_abort);
+ mon_iothread = iothread_create("mon_iothread", &error_abort);
+
+ /*
+ * The dispatcher BH must run in the main loop thread, since we
+ * have commands assuming that context. It would be nice to get
+ * rid of those assumptions.
+ */
+ qmp_dispatcher_bh = aio_bh_new(iohandler_get_aio_context(),
+ monitor_qmp_bh_dispatcher,
+ NULL);
}
void monitor_init_globals(void)
monitor_qapi_event_init();
sortcmdlist();
qemu_mutex_init(&monitor_lock);
+ qemu_mutex_init(&mon_fdsets_lock);
monitor_iothread_init();
}
}
/*
- * Print to current monitor if we have one, else to stderr.
+ * Print to current monitor if we have one, else to stream.
* TODO should return int, so callers can calculate width, but that
* requires surgery to monitor_vprintf(). Left for another day.
*/
-void error_vprintf(const char *fmt, va_list ap)
+void monitor_vfprintf(FILE *stream, const char *fmt, va_list ap)
{
if (cur_mon && !monitor_cur_is_qmp()) {
monitor_vprintf(cur_mon, fmt, ap);
} else {
- vfprintf(stderr, fmt, ap);
+ vfprintf(stream, fmt, ap);
}
}
+/*
+ * Print to current monitor if we have one, else to stderr.
+ * TODO should return int, so callers can calculate width, but that
+ * requires surgery to monitor_vprintf(). Left for another day.
+ */
+void error_vprintf(const char *fmt, va_list ap)
+{
+ monitor_vfprintf(stderr, fmt, ap);
+}
+
void error_vprintf_unless_qmp(const char *fmt, va_list ap)
{
if (cur_mon && !monitor_cur_is_qmp()) {
Monitor *mon = opaque;
GMainContext *context;
- if (mon->use_io_thr) {
- /*
- * When use_io_thr is set, we use the global shared dedicated
- * IO thread for this monitor to handle input/output.
- */
- context = monitor_get_io_context();
- /* We should have inited globals before reaching here. */
- assert(context);
- } else {
- /* The default main loop, which is the main thread */
- context = NULL;
- }
-
+ assert(mon->use_io_thread);
+ context = monitor_get_io_context();
+ assert(context);
qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read, monitor_qmp_read,
monitor_qmp_event, NULL, mon, context, true);
monitor_list_append(mon);
void monitor_init(Chardev *chr, int flags)
{
Monitor *mon = g_malloc(sizeof(*mon));
+ bool use_readline = flags & MONITOR_USE_READLINE;
+ bool use_oob = flags & MONITOR_USE_OOB;
+
+ if (use_oob) {
+ if (CHARDEV_IS_MUX(chr)) {
+ error_report("Monitor out-of-band is not supported with "
+ "MUX typed chardev backend");
+ exit(1);
+ }
+ if (use_readline) {
+ error_report("Monitor out-of-band is only supported by QMP");
+ exit(1);
+ }
+ }
- monitor_data_init(mon, false, false);
+ monitor_data_init(mon, false, use_oob);
qemu_chr_fe_init(&mon->chr, chr, &error_abort);
mon->flags = flags;
- if (flags & MONITOR_USE_READLINE) {
+ if (use_readline) {
mon->rs = readline_init(monitor_readline_printf,
monitor_readline_flush,
mon,
if (monitor_is_qmp(mon)) {
qemu_chr_fe_set_echo(&mon->chr, true);
- json_message_parser_init(&mon->qmp.parser, handle_qmp_command);
- if (mon->use_io_thr) {
+ json_message_parser_init(&mon->qmp.parser, handle_qmp_command,
+ mon, NULL);
+ if (mon->use_io_thread) {
/*
* Make sure the old iowatch is gone. It's possible when
* e.g. the chardev is in client mode, with wait=on.
remove_fd_in_watch(chr);
/*
* We can't call qemu_chr_fe_set_handlers() directly here
- * since during the procedure the chardev will be active
- * and running in monitor iothread, while we'll still do
- * something before returning from it, which is a possible
- * race too. To avoid that, we just create a BH to setup
- * the handlers.
+ * since chardev might be running in the monitor I/O
+ * thread. Schedule a bottom half.
*/
aio_bh_schedule_oneshot(monitor_get_aio_context(),
monitor_qmp_setup_handlers_bh, mon);
- /* We'll add this to mon_list in the BH when setup done */
+ /* The bottom half will add @mon to @mon_list */
return;
} else {
qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read,
Monitor *mon, *next;
/*
- * We need to explicitly stop the iothread (but not destroy it),
- * cleanup the monitor resources, then destroy the iothread since
+ * We need to explicitly stop the I/O thread (but not destroy it),
+ * clean up the monitor resources, then destroy the I/O thread since
* we need to unregister from chardev below in
* monitor_data_destroy(), and chardev is not thread-safe yet
*/
- iothread_stop(mon_global.mon_iothread);
+ iothread_stop(mon_iothread);
+ /* Flush output buffers and destroy monitors */
qemu_mutex_lock(&monitor_lock);
QTAILQ_FOREACH_SAFE(mon, &mon_list, entry, next) {
QTAILQ_REMOVE(&mon_list, mon, entry);
+ monitor_flush(mon);
monitor_data_destroy(mon);
g_free(mon);
}
qemu_mutex_unlock(&monitor_lock);
- iothread_destroy(mon_global.mon_iothread);
- mon_global.mon_iothread = NULL;
+ /* QEMUBHs needs to be deleted before destroying the I/O thread */
+ qemu_bh_delete(qmp_dispatcher_bh);
+ qmp_dispatcher_bh = NULL;
+
+ iothread_destroy(mon_iothread);
+ mon_iothread = NULL;
}
QemuOptsList qemu_mon_opts = {
},{
.name = "pretty",
.type = QEMU_OPT_BOOL,
+ },{
+ .name = "x-oob",
+ .type = QEMU_OPT_BOOL,
},
{ /* end of list */ }
},