X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/f708e736d0dafc05f8b7e9e73d6440c930b94686..396374caeadf044bad0aae9447eeeb109ea8651c:/async.c diff --git a/async.c b/async.c index f2d47ba96d..d4dd2cc799 100644 --- a/async.c +++ b/async.c @@ -22,9 +22,12 @@ * THE SOFTWARE. */ +#include "qemu/osdep.h" #include "qemu-common.h" #include "block/aio.h" +#include "block/thread-pool.h" #include "qemu/main-loop.h" +#include "qemu/atomic.h" /***********************************************************/ /* bottom halves (can be seen as timers which expire ASAP) */ @@ -42,15 +45,27 @@ struct QEMUBH { QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) { QEMUBH *bh; - bh = g_malloc0(sizeof(QEMUBH)); - bh->ctx = ctx; - bh->cb = cb; - bh->opaque = opaque; + bh = g_new(QEMUBH, 1); + *bh = (QEMUBH){ + .ctx = ctx, + .cb = cb, + .opaque = opaque, + }; + qemu_mutex_lock(&ctx->bh_lock); bh->next = ctx->first_bh; + /* Make sure that the members are ready before putting bh into list */ + smp_wmb(); ctx->first_bh = bh; + qemu_mutex_unlock(&ctx->bh_lock); return bh; } +void aio_bh_call(QEMUBH *bh) +{ + bh->cb(bh->opaque); +} + +/* Multiple occurrences of aio_bh_poll cannot be called concurrently */ int aio_bh_poll(AioContext *ctx) { QEMUBH *bh, **bhp, *next; @@ -60,13 +75,22 @@ int aio_bh_poll(AioContext *ctx) ret = 0; for (bh = ctx->first_bh; bh; bh = next) { + /* Make sure that fetching bh happens before accessing its members */ + smp_read_barrier_depends(); next = bh->next; - if (!bh->deleted && bh->scheduled) { - bh->scheduled = 0; - if (!bh->idle) + /* The atomic_xchg is paired with the one in qemu_bh_schedule. The + * implicit memory barrier ensures that the callback sees all writes + * done by the scheduling thread. It also ensures that the scheduling + * thread sees the zero before bh->cb has run, and thus will call + * aio_notify again if necessary. + */ + if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) { + /* Idle BHs and the notify BH don't count as progress */ + if (!bh->idle && bh != ctx->notify_dummy_bh) { ret = 1; + } bh->idle = 0; - bh->cb(bh->opaque); + aio_bh_call(bh); } } @@ -74,6 +98,7 @@ int aio_bh_poll(AioContext *ctx) /* remove deleted bhs */ if (!ctx->walking_bh) { + qemu_mutex_lock(&ctx->bh_lock); bhp = &ctx->first_bh; while (*bhp) { bh = *bhp; @@ -84,6 +109,7 @@ int aio_bh_poll(AioContext *ctx) bhp = &bh->next; } } + qemu_mutex_unlock(&ctx->bh_lock); } return ret; @@ -91,36 +117,52 @@ int aio_bh_poll(AioContext *ctx) void qemu_bh_schedule_idle(QEMUBH *bh) { - if (bh->scheduled) - return; - bh->scheduled = 1; bh->idle = 1; + /* Make sure that idle & any writes needed by the callback are done + * before the locations are read in the aio_bh_poll. + */ + atomic_mb_set(&bh->scheduled, 1); } void qemu_bh_schedule(QEMUBH *bh) { - if (bh->scheduled) - return; - bh->scheduled = 1; + AioContext *ctx; + + ctx = bh->ctx; bh->idle = 0; - aio_notify(bh->ctx); + /* The memory barrier implicit in atomic_xchg makes sure that: + * 1. idle & any writes needed by the callback are done before the + * locations are read in the aio_bh_poll. + * 2. ctx is loaded before scheduled is set and the callback has a chance + * to execute. + */ + if (atomic_xchg(&bh->scheduled, 1) == 0) { + aio_notify(ctx); + } } + +/* This func is async. + */ void qemu_bh_cancel(QEMUBH *bh) { bh->scheduled = 0; } +/* This func is async.The bottom half will do the delete action at the finial + * end. + */ void qemu_bh_delete(QEMUBH *bh) { bh->scheduled = 0; bh->deleted = 1; } -static gboolean -aio_ctx_prepare(GSource *source, gint *timeout) +int64_t +aio_compute_timeout(AioContext *ctx) { - AioContext *ctx = (AioContext *) source; + int64_t deadline; + int timeout = -1; QEMUBH *bh; for (bh = ctx->first_bh; bh; bh = bh->next) { @@ -128,17 +170,38 @@ aio_ctx_prepare(GSource *source, gint *timeout) if (bh->idle) { /* idle bottom halves will be polled at least * every 10ms */ - *timeout = 10; + timeout = 10000000; } else { /* non-idle bottom halves will be executed * immediately */ - *timeout = 0; - return true; + return 0; } } } - return false; + deadline = timerlistgroup_deadline_ns(&ctx->tlg); + if (deadline == 0) { + return 0; + } else { + return qemu_soonest_timeout(timeout, deadline); + } +} + +static gboolean +aio_ctx_prepare(GSource *source, gint *timeout) +{ + AioContext *ctx = (AioContext *) source; + + atomic_or(&ctx->notify_me, 1); + + /* We assume there is no timeout already supplied */ + *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); + + if (aio_prepare(ctx)) { + *timeout = 0; + } + + return *timeout == 0; } static gboolean @@ -147,12 +210,15 @@ aio_ctx_check(GSource *source) AioContext *ctx = (AioContext *) source; QEMUBH *bh; + atomic_and(&ctx->notify_me, ~1); + aio_notify_accept(ctx); + for (bh = ctx->first_bh; bh; bh = bh->next) { if (!bh->deleted && bh->scheduled) { return true; } } - return aio_pending(ctx); + return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); } static gboolean @@ -163,7 +229,7 @@ aio_ctx_dispatch(GSource *source, AioContext *ctx = (AioContext *) source; assert(callback == NULL); - aio_poll(ctx, false); + aio_dispatch(ctx); return true; } @@ -172,9 +238,26 @@ aio_ctx_finalize(GSource *source) { AioContext *ctx = (AioContext *) source; - aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL); + qemu_bh_delete(ctx->notify_dummy_bh); + thread_pool_free(ctx->thread_pool); + + qemu_mutex_lock(&ctx->bh_lock); + while (ctx->first_bh) { + QEMUBH *next = ctx->first_bh->next; + + /* qemu_bh_delete() must have been called on BHs in this AioContext */ + assert(ctx->first_bh->deleted); + + g_free(ctx->first_bh); + ctx->first_bh = next; + } + qemu_mutex_unlock(&ctx->bh_lock); + + aio_set_event_notifier(ctx, &ctx->notifier, false, NULL); event_notifier_cleanup(&ctx->notifier); - g_array_free(ctx->pollfds, TRUE); + rfifolock_destroy(&ctx->lock); + qemu_mutex_destroy(&ctx->bh_lock); + timerlistgroup_deinit(&ctx->tlg); } static GSourceFuncs aio_source_funcs = { @@ -190,22 +273,88 @@ GSource *aio_get_g_source(AioContext *ctx) return &ctx->source; } +ThreadPool *aio_get_thread_pool(AioContext *ctx) +{ + if (!ctx->thread_pool) { + ctx->thread_pool = thread_pool_new(ctx); + } + return ctx->thread_pool; +} + void aio_notify(AioContext *ctx) { - event_notifier_set(&ctx->notifier); + /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs + * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll. + */ + smp_mb(); + if (ctx->notify_me) { + event_notifier_set(&ctx->notifier); + atomic_mb_set(&ctx->notified, true); + } +} + +void aio_notify_accept(AioContext *ctx) +{ + if (atomic_xchg(&ctx->notified, false)) { + event_notifier_test_and_clear(&ctx->notifier); + } +} + +static void aio_timerlist_notify(void *opaque) +{ + aio_notify(opaque); +} + +static void aio_rfifolock_cb(void *opaque) +{ + AioContext *ctx = opaque; + + /* Kick owner thread in case they are blocked in aio_poll() */ + qemu_bh_schedule(ctx->notify_dummy_bh); +} + +static void notify_dummy_bh(void *opaque) +{ + /* Do nothing, we were invoked just to force the event loop to iterate */ +} + +static void event_notifier_dummy_cb(EventNotifier *e) +{ } -AioContext *aio_context_new(void) +AioContext *aio_context_new(Error **errp) { + int ret; AioContext *ctx; + Error *local_err = NULL; + ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); - ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD)); - event_notifier_init(&ctx->notifier, false); - aio_set_event_notifier(ctx, &ctx->notifier, + aio_context_setup(ctx, &local_err); + if (local_err) { + error_propagate(errp, local_err); + goto fail; + } + ret = event_notifier_init(&ctx->notifier, false); + if (ret < 0) { + error_setg_errno(errp, -ret, "Failed to initialize event notifier"); + goto fail; + } + g_source_set_can_recurse(&ctx->source, true); + aio_set_event_notifier(ctx, &ctx->notifier, + false, (EventNotifierHandler *) - event_notifier_test_and_clear, NULL); + event_notifier_dummy_cb); + ctx->thread_pool = NULL; + qemu_mutex_init(&ctx->bh_lock); + rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx); + timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); + + ctx->notify_dummy_bh = aio_bh_new(ctx, notify_dummy_bh, NULL); return ctx; +fail: + g_source_destroy(&ctx->source); + return NULL; } void aio_context_ref(AioContext *ctx) @@ -217,3 +366,13 @@ void aio_context_unref(AioContext *ctx) { g_source_unref(&ctx->source); } + +void aio_context_acquire(AioContext *ctx) +{ + rfifolock_lock(&ctx->lock); +} + +void aio_context_release(AioContext *ctx) +{ + rfifolock_unlock(&ctx->lock); +}