static AioContext *qemu_aio_context;
+AioContext *qemu_get_aio_context(void)
+{
+ return qemu_aio_context;
+}
+
void qemu_notify_event(void)
{
if (!qemu_aio_context) {
return 0;
}
-static fd_set rfds, wfds, xfds;
-static int nfds;
static int max_priority;
-/* Load rfds/wfds/xfds into gpollfds. Will be removed a few commits later. */
-static void gpollfds_from_select(void)
-{
- int fd;
- for (fd = 0; fd <= nfds; fd++) {
- int events = 0;
- if (FD_ISSET(fd, &rfds)) {
- events |= G_IO_IN | G_IO_HUP | G_IO_ERR;
- }
- if (FD_ISSET(fd, &wfds)) {
- events |= G_IO_OUT | G_IO_ERR;
- }
- if (FD_ISSET(fd, &xfds)) {
- events |= G_IO_PRI;
- }
- if (events) {
- GPollFD pfd = {
- .fd = fd,
- .events = events,
- };
- g_array_append_val(gpollfds, pfd);
- }
- }
-}
-
-/* Store gpollfds revents into rfds/wfds/xfds. Will be removed a few commits
- * later.
- */
-static void gpollfds_to_select(int ret)
-{
- int i;
-
- FD_ZERO(&rfds);
- FD_ZERO(&wfds);
- FD_ZERO(&xfds);
-
- if (ret <= 0) {
- return;
- }
-
- for (i = 0; i < gpollfds->len; i++) {
- int fd = g_array_index(gpollfds, GPollFD, i).fd;
- int revents = g_array_index(gpollfds, GPollFD, i).revents;
-
- if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) {
- FD_SET(fd, &rfds);
- }
- if (revents & (G_IO_OUT | G_IO_ERR)) {
- FD_SET(fd, &wfds);
- }
- if (revents & G_IO_PRI) {
- FD_SET(fd, &xfds);
- }
- }
-}
-
#ifndef _WIN32
static int glib_pollfds_idx;
static int glib_n_poll_fds;
}
}
+#define MAX_MAIN_LOOP_SPIN (1000)
+
static int os_host_main_loop_wait(uint32_t timeout)
{
int ret;
+ static int spin_counter;
glib_pollfds_fill(&timeout);
+ /* If the I/O thread is very busy or we are incorrectly busy waiting in
+ * the I/O thread, this can lead to starvation of the BQL such that the
+ * VCPU threads never run. To make sure we can detect the later case,
+ * print a message to the screen. If we run into this condition, create
+ * a fake timeout in order to give the VCPU threads a chance to run.
+ */
+ if (spin_counter > MAX_MAIN_LOOP_SPIN) {
+ static bool notified;
+
+ if (!notified) {
+ fprintf(stderr,
+ "main-loop: WARNING: I/O thread spun for %d iterations\n",
+ MAX_MAIN_LOOP_SPIN);
+ notified = true;
+ }
+
+ timeout = 1;
+ }
+
if (timeout > 0) {
+ spin_counter = 0;
qemu_mutex_unlock_iothread();
+ } else {
+ spin_counter++;
}
- /* We'll eventually drop fd_set completely. But for now we still have
- * *_fill() and *_poll() functions that use rfds/wfds/xfds.
- */
- gpollfds_from_select();
-
ret = g_poll((GPollFD *)gpollfds->data, gpollfds->len, timeout);
- gpollfds_to_select(ret);
-
if (timeout > 0) {
qemu_mutex_lock_iothread();
}
GPollFD *pfd = &g_array_index(pollfds, GPollFD, i);
int fd = pfd->fd;
int events = pfd->events;
- if (events & (G_IO_IN | G_IO_HUP | G_IO_ERR)) {
+ if (events & G_IO_IN) {
FD_SET(fd, rfds);
nfds = MAX(nfds, fd);
}
- if (events & (G_IO_OUT | G_IO_ERR)) {
+ if (events & G_IO_OUT) {
FD_SET(fd, wfds);
nfds = MAX(nfds, fd);
}
int revents = 0;
if (FD_ISSET(fd, rfds)) {
- revents |= G_IO_IN | G_IO_HUP | G_IO_ERR;
+ revents |= G_IO_IN;
}
if (FD_ISSET(fd, wfds)) {
- revents |= G_IO_OUT | G_IO_ERR;
+ revents |= G_IO_OUT;
}
if (FD_ISSET(fd, xfds)) {
revents |= G_IO_PRI;
WaitObjects *w = &wait_objects;
gint poll_timeout;
static struct timeval tv0;
+ fd_set rfds, wfds, xfds;
+ int nfds;
/* XXX: need to suppress polling by better using win32 events */
ret = 0;
return ret;
}
+ FD_ZERO(&rfds);
+ FD_ZERO(&wfds);
+ FD_ZERO(&xfds);
+ nfds = pollfds_fill(gpollfds, &rfds, &wfds, &xfds);
+ if (nfds >= 0) {
+ select_ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0);
+ if (select_ret != 0) {
+ timeout = 0;
+ }
+ if (select_ret > 0) {
+ pollfds_poll(gpollfds, nfds, &rfds, &wfds, &xfds);
+ }
+ }
+
g_main_context_prepare(context, &max_priority);
n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout,
poll_fds, ARRAY_SIZE(poll_fds));
g_main_context_dispatch(context);
}
- /* Call select after g_poll to avoid a useless iteration and therefore
- * improve socket latency.
- */
-
- /* This back-and-forth between GPollFDs and select(2) is temporary. We'll
- * drop it in a couple of patches, I promise :).
- */
- gpollfds_from_select();
- FD_ZERO(&rfds);
- FD_ZERO(&wfds);
- FD_ZERO(&xfds);
- nfds = pollfds_fill(gpollfds, &rfds, &wfds, &xfds);
- if (nfds >= 0) {
- select_ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0);
- if (select_ret != 0) {
- timeout = 0;
- }
- if (select_ret > 0) {
- pollfds_poll(gpollfds, nfds, &rfds, &wfds, &xfds);
- }
- }
- gpollfds_to_select(select_ret);
-
return select_ret || g_poll_ret;
}
#endif
/* poll any events */
g_array_set_size(gpollfds, 0); /* reset for new iteration */
/* XXX: separate device handlers from system ones */
- nfds = -1;
- FD_ZERO(&rfds);
- FD_ZERO(&wfds);
- FD_ZERO(&xfds);
-
#ifdef CONFIG_SLIRP
slirp_update_timeout(&timeout);
slirp_pollfds_fill(gpollfds);