* THE SOFTWARE.
*/
+#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "qemu-common.h"
#include "block/aio.h"
#include "block/thread-pool.h"
#include "qemu/main-loop.h"
#include "qemu/atomic.h"
+#include "block/raw-aio.h"
/***********************************************************/
/* bottom halves (can be seen as timers which expire ASAP) */
bool deleted;
};
+void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
+{
+ QEMUBH *bh;
+ bh = g_new(QEMUBH, 1);
+ *bh = (QEMUBH){
+ .ctx = ctx,
+ .cb = cb,
+ .opaque = opaque,
+ };
+ qemu_lockcnt_lock(&ctx->list_lock);
+ bh->next = ctx->first_bh;
+ bh->scheduled = 1;
+ bh->deleted = 1;
+ /* Make sure that the members are ready before putting bh into list */
+ smp_wmb();
+ ctx->first_bh = bh;
+ qemu_lockcnt_unlock(&ctx->list_lock);
+ aio_notify(ctx);
+}
+
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
{
QEMUBH *bh;
- bh = g_new0(QEMUBH, 1);
- bh->ctx = ctx;
- bh->cb = cb;
- bh->opaque = opaque;
- qemu_mutex_lock(&ctx->bh_lock);
+ bh = g_new(QEMUBH, 1);
+ *bh = (QEMUBH){
+ .ctx = ctx,
+ .cb = cb,
+ .opaque = opaque,
+ };
+ qemu_lockcnt_lock(&ctx->list_lock);
bh->next = ctx->first_bh;
/* Make sure that the members are ready before putting bh into list */
smp_wmb();
ctx->first_bh = bh;
- qemu_mutex_unlock(&ctx->bh_lock);
+ qemu_lockcnt_unlock(&ctx->list_lock);
return bh;
}
+void aio_bh_call(QEMUBH *bh)
+{
+ bh->cb(bh->opaque);
+}
+
/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
int aio_bh_poll(AioContext *ctx)
{
QEMUBH *bh, **bhp, *next;
int ret;
+ bool deleted = false;
- ctx->walking_bh++;
+ qemu_lockcnt_inc(&ctx->list_lock);
ret = 0;
- for (bh = ctx->first_bh; bh; bh = next) {
- /* Make sure that fetching bh happens before accessing its members */
- smp_read_barrier_depends();
- next = bh->next;
- if (!bh->deleted && bh->scheduled) {
- bh->scheduled = 0;
- /* Paired with write barrier in bh schedule to ensure reading for
- * idle & callbacks coming after bh's scheduling.
- */
- smp_rmb();
- if (!bh->idle)
+ for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) {
+ next = atomic_rcu_read(&bh->next);
+ /* The atomic_xchg is paired with the one in qemu_bh_schedule. The
+ * implicit memory barrier ensures that the callback sees all writes
+ * done by the scheduling thread. It also ensures that the scheduling
+ * thread sees the zero before bh->cb has run, and thus will call
+ * aio_notify again if necessary.
+ */
+ if (atomic_xchg(&bh->scheduled, 0)) {
+ /* Idle BHs don't count as progress */
+ if (!bh->idle) {
ret = 1;
+ }
bh->idle = 0;
- bh->cb(bh->opaque);
+ aio_bh_call(bh);
+ }
+ if (bh->deleted) {
+ deleted = true;
}
}
- ctx->walking_bh--;
-
/* remove deleted bhs */
- if (!ctx->walking_bh) {
- qemu_mutex_lock(&ctx->bh_lock);
+ if (!deleted) {
+ qemu_lockcnt_dec(&ctx->list_lock);
+ return ret;
+ }
+
+ if (qemu_lockcnt_dec_and_lock(&ctx->list_lock)) {
bhp = &ctx->first_bh;
while (*bhp) {
bh = *bhp;
- if (bh->deleted) {
+ if (bh->deleted && !bh->scheduled) {
*bhp = bh->next;
g_free(bh);
} else {
bhp = &bh->next;
}
}
- qemu_mutex_unlock(&ctx->bh_lock);
+ qemu_lockcnt_unlock(&ctx->list_lock);
}
-
return ret;
}
void qemu_bh_schedule_idle(QEMUBH *bh)
{
- if (bh->scheduled)
- return;
bh->idle = 1;
/* Make sure that idle & any writes needed by the callback are done
* before the locations are read in the aio_bh_poll.
*/
- smp_wmb();
- bh->scheduled = 1;
+ atomic_mb_set(&bh->scheduled, 1);
}
void qemu_bh_schedule(QEMUBH *bh)
{
AioContext *ctx;
- if (bh->scheduled)
- return;
ctx = bh->ctx;
bh->idle = 0;
- /* Make sure that:
+ /* The memory barrier implicit in atomic_xchg makes sure that:
* 1. idle & any writes needed by the callback are done before the
* locations are read in the aio_bh_poll.
* 2. ctx is loaded before scheduled is set and the callback has a chance
* to execute.
*/
- smp_mb();
- bh->scheduled = 1;
- aio_notify(ctx);
+ if (atomic_xchg(&bh->scheduled, 1) == 0) {
+ aio_notify(ctx);
+ }
}
int timeout = -1;
QEMUBH *bh;
- for (bh = ctx->first_bh; bh; bh = bh->next) {
- if (!bh->deleted && bh->scheduled) {
+ for (bh = atomic_rcu_read(&ctx->first_bh); bh;
+ bh = atomic_rcu_read(&bh->next)) {
+ if (bh->scheduled) {
if (bh->idle) {
/* idle bottom halves will be polled at least
* every 10ms */
{
AioContext *ctx = (AioContext *) source;
+ atomic_or(&ctx->notify_me, 1);
+
/* We assume there is no timeout already supplied */
*timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
AioContext *ctx = (AioContext *) source;
QEMUBH *bh;
+ atomic_and(&ctx->notify_me, ~1);
+ aio_notify_accept(ctx);
+
for (bh = ctx->first_bh; bh; bh = bh->next) {
- if (!bh->deleted && bh->scheduled) {
+ if (bh->scheduled) {
return true;
- }
+ }
}
return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
}
AioContext *ctx = (AioContext *) source;
assert(callback == NULL);
- aio_dispatch(ctx);
+ aio_dispatch(ctx, true);
return true;
}
AioContext *ctx = (AioContext *) source;
thread_pool_free(ctx->thread_pool);
- aio_set_event_notifier(ctx, &ctx->notifier, NULL);
+
+#ifdef CONFIG_LINUX_AIO
+ if (ctx->linux_aio) {
+ laio_detach_aio_context(ctx->linux_aio, ctx);
+ laio_cleanup(ctx->linux_aio);
+ ctx->linux_aio = NULL;
+ }
+#endif
+
+ qemu_lockcnt_lock(&ctx->list_lock);
+ assert(!qemu_lockcnt_count(&ctx->list_lock));
+ while (ctx->first_bh) {
+ QEMUBH *next = ctx->first_bh->next;
+
+ /* qemu_bh_delete() must have been called on BHs in this AioContext */
+ assert(ctx->first_bh->deleted);
+
+ g_free(ctx->first_bh);
+ ctx->first_bh = next;
+ }
+ qemu_lockcnt_unlock(&ctx->list_lock);
+
+ aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL);
event_notifier_cleanup(&ctx->notifier);
- rfifolock_destroy(&ctx->lock);
- qemu_mutex_destroy(&ctx->bh_lock);
- g_array_free(ctx->pollfds, TRUE);
+ qemu_rec_mutex_destroy(&ctx->lock);
+ qemu_lockcnt_destroy(&ctx->list_lock);
timerlistgroup_deinit(&ctx->tlg);
}
return ctx->thread_pool;
}
-void aio_set_dispatching(AioContext *ctx, bool dispatching)
+#ifdef CONFIG_LINUX_AIO
+LinuxAioState *aio_get_linux_aio(AioContext *ctx)
{
- ctx->dispatching = dispatching;
- if (!dispatching) {
- /* Write ctx->dispatching before reading e.g. bh->scheduled.
- * Optimization: this is only needed when we're entering the "unsafe"
- * phase where other threads must call event_notifier_set.
- */
- smp_mb();
+ if (!ctx->linux_aio) {
+ ctx->linux_aio = laio_init();
+ laio_attach_aio_context(ctx->linux_aio, ctx);
}
+ return ctx->linux_aio;
}
+#endif
void aio_notify(AioContext *ctx)
{
- /* Write e.g. bh->scheduled before reading ctx->dispatching. */
+ /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
+ * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
+ */
smp_mb();
- if (!ctx->dispatching) {
+ if (ctx->notify_me) {
event_notifier_set(&ctx->notifier);
+ atomic_mb_set(&ctx->notified, true);
+ }
+}
+
+void aio_notify_accept(AioContext *ctx)
+{
+ if (atomic_xchg(&ctx->notified, false)) {
+ event_notifier_test_and_clear(&ctx->notifier);
}
}
aio_notify(opaque);
}
-static void aio_rfifolock_cb(void *opaque)
+static void event_notifier_dummy_cb(EventNotifier *e)
{
- /* Kick owner thread in case they are blocked in aio_poll() */
- aio_notify(opaque);
+}
+
+/* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
+static bool event_notifier_poll(void *opaque)
+{
+ EventNotifier *e = opaque;
+ AioContext *ctx = container_of(e, AioContext, notifier);
+
+ return atomic_read(&ctx->notified);
}
AioContext *aio_context_new(Error **errp)
{
int ret;
AioContext *ctx;
+
ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
+ aio_context_setup(ctx);
+
ret = event_notifier_init(&ctx->notifier, false);
if (ret < 0) {
- g_source_destroy(&ctx->source);
error_setg_errno(errp, -ret, "Failed to initialize event notifier");
- return NULL;
+ goto fail;
}
+ g_source_set_can_recurse(&ctx->source, true);
+ qemu_lockcnt_init(&ctx->list_lock);
aio_set_event_notifier(ctx, &ctx->notifier,
+ false,
(EventNotifierHandler *)
- event_notifier_test_and_clear);
- ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
+ event_notifier_dummy_cb,
+ event_notifier_poll);
+#ifdef CONFIG_LINUX_AIO
+ ctx->linux_aio = NULL;
+#endif
ctx->thread_pool = NULL;
- qemu_mutex_init(&ctx->bh_lock);
- rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
+ qemu_rec_mutex_init(&ctx->lock);
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
+ ctx->poll_ns = 0;
+ ctx->poll_max_ns = 0;
+ ctx->poll_grow = 0;
+ ctx->poll_shrink = 0;
+
return ctx;
+fail:
+ g_source_destroy(&ctx->source);
+ return NULL;
}
void aio_context_ref(AioContext *ctx)
void aio_context_acquire(AioContext *ctx)
{
- rfifolock_lock(&ctx->lock);
+ qemu_rec_mutex_lock(&ctx->lock);
}
void aio_context_release(AioContext *ctx)
{
- rfifolock_unlock(&ctx->lock);
+ qemu_rec_mutex_unlock(&ctx->lock);
}