]> Git Repo - qemu.git/blobdiff - util/qemu-coroutine.c
vhost-vdpa: map virtqueue notification area if possible
[qemu.git] / util / qemu-coroutine.c
index 737bffa984a71bd86e37bd51feec87ef5d42a982..38fb6d3084dad2dd2145134d9e5da8f03d7169fe 100644 (file)
 
 #include "qemu/osdep.h"
 #include "trace.h"
-#include "qemu-common.h"
 #include "qemu/thread.h"
 #include "qemu/atomic.h"
 #include "qemu/coroutine.h"
 #include "qemu/coroutine_int.h"
+#include "block/aio.h"
 
 enum {
     POOL_BATCH_SIZE = 64,
@@ -60,7 +60,7 @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque)
                  * release_pool_size and the actual size of release_pool.  But
                  * it is just a heuristic, it does not need to be perfect.
                  */
-                alloc_pool_size = atomic_xchg(&release_pool_size, 0);
+                alloc_pool_size = qatomic_xchg(&release_pool_size, 0);
                 QSLIST_MOVE_ATOMIC(&alloc_pool, &release_pool);
                 co = QSLIST_FIRST(&alloc_pool);
             }
@@ -88,7 +88,7 @@ static void coroutine_delete(Coroutine *co)
     if (CONFIG_COROUTINE_POOL) {
         if (release_pool_size < POOL_BATCH_SIZE * 2) {
             QSLIST_INSERT_HEAD_ATOMIC(&release_pool, co, pool_next);
-            atomic_inc(&release_pool_size);
+            qatomic_inc(&release_pool_size);
             return;
         }
         if (alloc_pool_size < POOL_BATCH_SIZE) {
@@ -101,33 +101,79 @@ static void coroutine_delete(Coroutine *co)
     qemu_coroutine_delete(co);
 }
 
-void qemu_coroutine_enter(Coroutine *co)
+void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co)
 {
-    Coroutine *self = qemu_coroutine_self();
-    CoroutineAction ret;
+    QSIMPLEQ_HEAD(, Coroutine) pending = QSIMPLEQ_HEAD_INITIALIZER(pending);
+    Coroutine *from = qemu_coroutine_self();
 
-    trace_qemu_coroutine_enter(self, co, co->entry_arg);
+    QSIMPLEQ_INSERT_TAIL(&pending, co, co_queue_next);
 
-    if (co->caller) {
-        fprintf(stderr, "Co-routine re-entered recursively\n");
-        abort();
-    }
+    /* Run co and any queued coroutines */
+    while (!QSIMPLEQ_EMPTY(&pending)) {
+        Coroutine *to = QSIMPLEQ_FIRST(&pending);
+        CoroutineAction ret;
 
-    co->caller = self;
-    ret = qemu_coroutine_switch(self, co, COROUTINE_ENTER);
+        /* Cannot rely on the read barrier for to in aio_co_wake(), as there are
+         * callers outside of aio_co_wake() */
+        const char *scheduled = qatomic_mb_read(&to->scheduled);
 
-    qemu_co_queue_run_restart(co);
+        QSIMPLEQ_REMOVE_HEAD(&pending, co_queue_next);
 
-    switch (ret) {
-    case COROUTINE_YIELD:
-        return;
-    case COROUTINE_TERMINATE:
-        assert(!co->locks_held);
-        trace_qemu_coroutine_terminate(co);
-        coroutine_delete(co);
-        return;
-    default:
-        abort();
+        trace_qemu_aio_coroutine_enter(ctx, from, to, to->entry_arg);
+
+        /* if the Coroutine has already been scheduled, entering it again will
+         * cause us to enter it twice, potentially even after the coroutine has
+         * been deleted */
+        if (scheduled) {
+            fprintf(stderr,
+                    "%s: Co-routine was already scheduled in '%s'\n",
+                    __func__, scheduled);
+            abort();
+        }
+
+        if (to->caller) {
+            fprintf(stderr, "Co-routine re-entered recursively\n");
+            abort();
+        }
+
+        to->caller = from;
+        to->ctx = ctx;
+
+        /* Store to->ctx before anything that stores to.  Matches
+         * barrier in aio_co_wake and qemu_co_mutex_wake.
+         */
+        smp_wmb();
+
+        ret = qemu_coroutine_switch(from, to, COROUTINE_ENTER);
+
+        /* Queued coroutines are run depth-first; previously pending coroutines
+         * run after those queued more recently.
+         */
+        QSIMPLEQ_PREPEND(&pending, &to->co_queue_wakeup);
+
+        switch (ret) {
+        case COROUTINE_YIELD:
+            break;
+        case COROUTINE_TERMINATE:
+            assert(!to->locks_held);
+            trace_qemu_coroutine_terminate(to);
+            coroutine_delete(to);
+            break;
+        default:
+            abort();
+        }
+    }
+}
+
+void qemu_coroutine_enter(Coroutine *co)
+{
+    qemu_aio_coroutine_enter(qemu_get_current_aio_context(), co);
+}
+
+void qemu_coroutine_enter_if_inactive(Coroutine *co)
+{
+    if (!qemu_coroutine_entered(co)) {
+        qemu_coroutine_enter(co);
     }
 }
 
@@ -151,3 +197,8 @@ bool qemu_coroutine_entered(Coroutine *co)
 {
     return co->caller;
 }
+
+AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co)
+{
+    return co->ctx;
+}
This page took 0.0281 seconds and 4 git commands to generate.