qemu_coroutine_delete(co);
}
-void qemu_coroutine_enter(Coroutine *co)
+void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co)
{
Coroutine *self = qemu_coroutine_self();
CoroutineAction ret;
- trace_qemu_coroutine_enter(self, co, co->entry_arg);
+ /* Cannot rely on the read barrier for co in aio_co_wake(), as there are
+ * callers outside of aio_co_wake() */
+ const char *scheduled = atomic_mb_read(&co->scheduled);
+
+ trace_qemu_aio_coroutine_enter(ctx, self, co, co->entry_arg);
+
+ /* if the Coroutine has already been scheduled, entering it again will
+ * cause us to enter it twice, potentially even after the coroutine has
+ * been deleted */
+ if (scheduled) {
+ fprintf(stderr,
+ "%s: Co-routine was already scheduled in '%s'\n",
+ __func__, scheduled);
+ abort();
+ }
if (co->caller) {
fprintf(stderr, "Co-routine re-entered recursively\n");
}
co->caller = self;
- co->ctx = qemu_get_current_aio_context();
+ co->ctx = ctx;
/* Store co->ctx before anything that stores co. Matches
* barrier in aio_co_wake and qemu_co_mutex_wake.
qemu_co_queue_run_restart(co);
+ /* Beware, if ret == COROUTINE_YIELD and qemu_co_queue_run_restart()
+ * has started any other coroutine, "co" might have been reentered
+ * and even freed by now! So be careful and do not touch it.
+ */
+
switch (ret) {
case COROUTINE_YIELD:
return;
}
}
+void qemu_coroutine_enter(Coroutine *co)
+{
+ qemu_aio_coroutine_enter(qemu_get_current_aio_context(), co);
+}
+
void qemu_coroutine_enter_if_inactive(Coroutine *co)
{
if (!qemu_coroutine_entered(co)) {