.cb = cb,
.opaque = opaque,
};
- qemu_mutex_lock(&ctx->bh_lock);
+ qemu_lockcnt_lock(&ctx->list_lock);
bh->next = ctx->first_bh;
bh->scheduled = 1;
bh->deleted = 1;
/* Make sure that the members are ready before putting bh into list */
smp_wmb();
ctx->first_bh = bh;
- qemu_mutex_unlock(&ctx->bh_lock);
+ qemu_lockcnt_unlock(&ctx->list_lock);
aio_notify(ctx);
}
.cb = cb,
.opaque = opaque,
};
- qemu_mutex_lock(&ctx->bh_lock);
+ qemu_lockcnt_lock(&ctx->list_lock);
bh->next = ctx->first_bh;
/* Make sure that the members are ready before putting bh into list */
smp_wmb();
ctx->first_bh = bh;
- qemu_mutex_unlock(&ctx->bh_lock);
+ qemu_lockcnt_unlock(&ctx->list_lock);
return bh;
}
{
QEMUBH *bh, **bhp, *next;
int ret;
+ bool deleted = false;
- ctx->walking_bh++;
+ qemu_lockcnt_inc(&ctx->list_lock);
ret = 0;
- for (bh = ctx->first_bh; bh; bh = next) {
- /* Make sure that fetching bh happens before accessing its members */
- smp_read_barrier_depends();
- next = bh->next;
+ for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) {
+ next = atomic_rcu_read(&bh->next);
/* The atomic_xchg is paired with the one in qemu_bh_schedule. The
* implicit memory barrier ensures that the callback sees all writes
* done by the scheduling thread. It also ensures that the scheduling
bh->idle = 0;
aio_bh_call(bh);
}
+ if (bh->deleted) {
+ deleted = true;
+ }
}
- ctx->walking_bh--;
-
/* remove deleted bhs */
- if (!ctx->walking_bh) {
- qemu_mutex_lock(&ctx->bh_lock);
+ if (!deleted) {
+ qemu_lockcnt_dec(&ctx->list_lock);
+ return ret;
+ }
+
+ if (qemu_lockcnt_dec_and_lock(&ctx->list_lock)) {
bhp = &ctx->first_bh;
while (*bhp) {
bh = *bhp;
bhp = &bh->next;
}
}
- qemu_mutex_unlock(&ctx->bh_lock);
+ qemu_lockcnt_unlock(&ctx->list_lock);
}
-
return ret;
}
int timeout = -1;
QEMUBH *bh;
- for (bh = ctx->first_bh; bh; bh = bh->next) {
+ for (bh = atomic_rcu_read(&ctx->first_bh); bh;
+ bh = atomic_rcu_read(&bh->next)) {
if (bh->scheduled) {
if (bh->idle) {
/* idle bottom halves will be polled at least
AioContext *ctx = (AioContext *) source;
assert(callback == NULL);
- aio_dispatch(ctx);
+ aio_dispatch(ctx, true);
return true;
}
}
#endif
- qemu_mutex_lock(&ctx->bh_lock);
+ qemu_lockcnt_lock(&ctx->list_lock);
+ assert(!qemu_lockcnt_count(&ctx->list_lock));
while (ctx->first_bh) {
QEMUBH *next = ctx->first_bh->next;
g_free(ctx->first_bh);
ctx->first_bh = next;
}
- qemu_mutex_unlock(&ctx->bh_lock);
+ qemu_lockcnt_unlock(&ctx->list_lock);
- aio_set_event_notifier(ctx, &ctx->notifier, false, NULL);
+ aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL);
event_notifier_cleanup(&ctx->notifier);
qemu_rec_mutex_destroy(&ctx->lock);
- qemu_mutex_destroy(&ctx->bh_lock);
+ qemu_lockcnt_destroy(&ctx->list_lock);
timerlistgroup_deinit(&ctx->tlg);
}
{
}
+/* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
+static bool event_notifier_poll(void *opaque)
+{
+ EventNotifier *e = opaque;
+ AioContext *ctx = container_of(e, AioContext, notifier);
+
+ return atomic_read(&ctx->notified);
+}
+
AioContext *aio_context_new(Error **errp)
{
int ret;
goto fail;
}
g_source_set_can_recurse(&ctx->source, true);
+ qemu_lockcnt_init(&ctx->list_lock);
aio_set_event_notifier(ctx, &ctx->notifier,
false,
(EventNotifierHandler *)
- event_notifier_dummy_cb);
+ event_notifier_dummy_cb,
+ event_notifier_poll);
#ifdef CONFIG_LINUX_AIO
ctx->linux_aio = NULL;
#endif
ctx->thread_pool = NULL;
- qemu_mutex_init(&ctx->bh_lock);
qemu_rec_mutex_init(&ctx->lock);
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
+ ctx->poll_ns = 0;
+ ctx->poll_max_ns = 0;
+ ctx->poll_grow = 0;
+ ctx->poll_shrink = 0;
+
return ctx;
fail:
g_source_destroy(&ctx->source);