X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/84a5a8014801a83d1b8d15fa7f0fde03db081530..548ebcaf361443420a9f3ab0e2118c4d1f0a1b72:/async.c diff --git a/async.c b/async.c index b4bf205a0c..f30d011ebc 100644 --- a/async.c +++ b/async.c @@ -29,6 +29,7 @@ #include "block/thread-pool.h" #include "qemu/main-loop.h" #include "qemu/atomic.h" +#include "block/raw-aio.h" /***********************************************************/ /* bottom halves (can be seen as timers which expire ASAP) */ @@ -43,6 +44,25 @@ struct QEMUBH { bool deleted; }; +void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque) +{ + QEMUBH *bh; + bh = g_new(QEMUBH, 1); + *bh = (QEMUBH){ + .ctx = ctx, + .cb = cb, + .opaque = opaque, + }; + qemu_mutex_lock(&ctx->bh_lock); + bh->next = ctx->first_bh; + bh->scheduled = 1; + bh->deleted = 1; + /* Make sure that the members are ready before putting bh into list */ + smp_wmb(); + ctx->first_bh = bh; + qemu_mutex_unlock(&ctx->bh_lock); +} + QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) { QEMUBH *bh; @@ -85,7 +105,7 @@ int aio_bh_poll(AioContext *ctx) * thread sees the zero before bh->cb has run, and thus will call * aio_notify again if necessary. */ - if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) { + if (atomic_xchg(&bh->scheduled, 0)) { /* Idle BHs and the notify BH don't count as progress */ if (!bh->idle && bh != ctx->notify_dummy_bh) { ret = 1; @@ -103,7 +123,7 @@ int aio_bh_poll(AioContext *ctx) bhp = &ctx->first_bh; while (*bhp) { bh = *bhp; - if (bh->deleted) { + if (bh->deleted && !bh->scheduled) { *bhp = bh->next; g_free(bh); } else { @@ -167,7 +187,7 @@ aio_compute_timeout(AioContext *ctx) QEMUBH *bh; for (bh = ctx->first_bh; bh; bh = bh->next) { - if (!bh->deleted && bh->scheduled) { + if (bh->scheduled) { if (bh->idle) { /* idle bottom halves will be polled at least * every 10ms */ @@ -215,9 +235,9 @@ aio_ctx_check(GSource *source) aio_notify_accept(ctx); for (bh = ctx->first_bh; bh; bh = bh->next) { - if (!bh->deleted && bh->scheduled) { + if (bh->scheduled) { return true; - } + } } return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); } @@ -242,6 +262,14 @@ aio_ctx_finalize(GSource *source) qemu_bh_delete(ctx->notify_dummy_bh); thread_pool_free(ctx->thread_pool); +#ifdef CONFIG_LINUX_AIO + if (ctx->linux_aio) { + laio_detach_aio_context(ctx->linux_aio, ctx); + laio_cleanup(ctx->linux_aio); + ctx->linux_aio = NULL; + } +#endif + qemu_mutex_lock(&ctx->bh_lock); while (ctx->first_bh) { QEMUBH *next = ctx->first_bh->next; @@ -282,6 +310,17 @@ ThreadPool *aio_get_thread_pool(AioContext *ctx) return ctx->thread_pool; } +#ifdef CONFIG_LINUX_AIO +LinuxAioState *aio_get_linux_aio(AioContext *ctx) +{ + if (!ctx->linux_aio) { + ctx->linux_aio = laio_init(); + laio_attach_aio_context(ctx->linux_aio, ctx); + } + return ctx->linux_aio; +} +#endif + void aio_notify(AioContext *ctx) { /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs @@ -327,14 +366,10 @@ AioContext *aio_context_new(Error **errp) { int ret; AioContext *ctx; - Error *local_err = NULL; ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); - aio_context_setup(ctx, &local_err); - if (local_err) { - error_propagate(errp, local_err); - goto fail; - } + aio_context_setup(ctx); + ret = event_notifier_init(&ctx->notifier, false); if (ret < 0) { error_setg_errno(errp, -ret, "Failed to initialize event notifier"); @@ -345,6 +380,9 @@ AioContext *aio_context_new(Error **errp) false, (EventNotifierHandler *) event_notifier_dummy_cb); +#ifdef CONFIG_LINUX_AIO + ctx->linux_aio = NULL; +#endif ctx->thread_pool = NULL; qemu_mutex_init(&ctx->bh_lock); rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);