4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "qapi/error.h"
27 #include "qemu-common.h"
28 #include "block/aio.h"
29 #include "block/thread-pool.h"
30 #include "qemu/main-loop.h"
31 #include "qemu/atomic.h"
32 #include "block/raw-aio.h"
34 /***********************************************************/
35 /* bottom halves (can be seen as timers which expire ASAP) */
47 void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
50 bh = g_new(QEMUBH, 1);
56 qemu_mutex_lock(&ctx->bh_lock);
57 bh->next = ctx->first_bh;
60 /* Make sure that the members are ready before putting bh into list */
63 qemu_mutex_unlock(&ctx->bh_lock);
66 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
69 bh = g_new(QEMUBH, 1);
75 qemu_mutex_lock(&ctx->bh_lock);
76 bh->next = ctx->first_bh;
77 /* Make sure that the members are ready before putting bh into list */
80 qemu_mutex_unlock(&ctx->bh_lock);
84 void aio_bh_call(QEMUBH *bh)
89 /* Multiple occurrences of aio_bh_poll cannot be called concurrently */
90 int aio_bh_poll(AioContext *ctx)
92 QEMUBH *bh, **bhp, *next;
98 for (bh = ctx->first_bh; bh; bh = next) {
99 /* Make sure that fetching bh happens before accessing its members */
100 smp_read_barrier_depends();
102 /* The atomic_xchg is paired with the one in qemu_bh_schedule. The
103 * implicit memory barrier ensures that the callback sees all writes
104 * done by the scheduling thread. It also ensures that the scheduling
105 * thread sees the zero before bh->cb has run, and thus will call
106 * aio_notify again if necessary.
108 if (atomic_xchg(&bh->scheduled, 0)) {
109 /* Idle BHs and the notify BH don't count as progress */
110 if (!bh->idle && bh != ctx->notify_dummy_bh) {
120 /* remove deleted bhs */
121 if (!ctx->walking_bh) {
122 qemu_mutex_lock(&ctx->bh_lock);
123 bhp = &ctx->first_bh;
126 if (bh->deleted && !bh->scheduled) {
133 qemu_mutex_unlock(&ctx->bh_lock);
139 void qemu_bh_schedule_idle(QEMUBH *bh)
142 /* Make sure that idle & any writes needed by the callback are done
143 * before the locations are read in the aio_bh_poll.
145 atomic_mb_set(&bh->scheduled, 1);
148 void qemu_bh_schedule(QEMUBH *bh)
154 /* The memory barrier implicit in atomic_xchg makes sure that:
155 * 1. idle & any writes needed by the callback are done before the
156 * locations are read in the aio_bh_poll.
157 * 2. ctx is loaded before scheduled is set and the callback has a chance
160 if (atomic_xchg(&bh->scheduled, 1) == 0) {
166 /* This func is async.
168 void qemu_bh_cancel(QEMUBH *bh)
173 /* This func is async.The bottom half will do the delete action at the finial
176 void qemu_bh_delete(QEMUBH *bh)
183 aio_compute_timeout(AioContext *ctx)
189 for (bh = ctx->first_bh; bh; bh = bh->next) {
192 /* idle bottom halves will be polled at least
196 /* non-idle bottom halves will be executed
203 deadline = timerlistgroup_deadline_ns(&ctx->tlg);
207 return qemu_soonest_timeout(timeout, deadline);
212 aio_ctx_prepare(GSource *source, gint *timeout)
214 AioContext *ctx = (AioContext *) source;
216 atomic_or(&ctx->notify_me, 1);
218 /* We assume there is no timeout already supplied */
219 *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
221 if (aio_prepare(ctx)) {
225 return *timeout == 0;
229 aio_ctx_check(GSource *source)
231 AioContext *ctx = (AioContext *) source;
234 atomic_and(&ctx->notify_me, ~1);
235 aio_notify_accept(ctx);
237 for (bh = ctx->first_bh; bh; bh = bh->next) {
242 return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
246 aio_ctx_dispatch(GSource *source,
247 GSourceFunc callback,
250 AioContext *ctx = (AioContext *) source;
252 assert(callback == NULL);
258 aio_ctx_finalize(GSource *source)
260 AioContext *ctx = (AioContext *) source;
262 qemu_bh_delete(ctx->notify_dummy_bh);
263 thread_pool_free(ctx->thread_pool);
265 #ifdef CONFIG_LINUX_AIO
266 if (ctx->linux_aio) {
267 laio_detach_aio_context(ctx->linux_aio, ctx);
268 laio_cleanup(ctx->linux_aio);
269 ctx->linux_aio = NULL;
273 qemu_mutex_lock(&ctx->bh_lock);
274 while (ctx->first_bh) {
275 QEMUBH *next = ctx->first_bh->next;
277 /* qemu_bh_delete() must have been called on BHs in this AioContext */
278 assert(ctx->first_bh->deleted);
280 g_free(ctx->first_bh);
281 ctx->first_bh = next;
283 qemu_mutex_unlock(&ctx->bh_lock);
285 aio_set_event_notifier(ctx, &ctx->notifier, false, NULL);
286 event_notifier_cleanup(&ctx->notifier);
287 rfifolock_destroy(&ctx->lock);
288 qemu_mutex_destroy(&ctx->bh_lock);
289 timerlistgroup_deinit(&ctx->tlg);
292 static GSourceFuncs aio_source_funcs = {
299 GSource *aio_get_g_source(AioContext *ctx)
301 g_source_ref(&ctx->source);
305 ThreadPool *aio_get_thread_pool(AioContext *ctx)
307 if (!ctx->thread_pool) {
308 ctx->thread_pool = thread_pool_new(ctx);
310 return ctx->thread_pool;
313 #ifdef CONFIG_LINUX_AIO
314 LinuxAioState *aio_get_linux_aio(AioContext *ctx)
316 if (!ctx->linux_aio) {
317 ctx->linux_aio = laio_init();
318 laio_attach_aio_context(ctx->linux_aio, ctx);
320 return ctx->linux_aio;
324 void aio_notify(AioContext *ctx)
326 /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
327 * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
330 if (ctx->notify_me) {
331 event_notifier_set(&ctx->notifier);
332 atomic_mb_set(&ctx->notified, true);
336 void aio_notify_accept(AioContext *ctx)
338 if (atomic_xchg(&ctx->notified, false)) {
339 event_notifier_test_and_clear(&ctx->notifier);
343 static void aio_timerlist_notify(void *opaque)
348 static void aio_rfifolock_cb(void *opaque)
350 AioContext *ctx = opaque;
352 /* Kick owner thread in case they are blocked in aio_poll() */
353 qemu_bh_schedule(ctx->notify_dummy_bh);
356 static void notify_dummy_bh(void *opaque)
358 /* Do nothing, we were invoked just to force the event loop to iterate */
361 static void event_notifier_dummy_cb(EventNotifier *e)
365 AioContext *aio_context_new(Error **errp)
370 ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
371 aio_context_setup(ctx);
373 ret = event_notifier_init(&ctx->notifier, false);
375 error_setg_errno(errp, -ret, "Failed to initialize event notifier");
378 g_source_set_can_recurse(&ctx->source, true);
379 aio_set_event_notifier(ctx, &ctx->notifier,
381 (EventNotifierHandler *)
382 event_notifier_dummy_cb);
383 #ifdef CONFIG_LINUX_AIO
384 ctx->linux_aio = NULL;
386 ctx->thread_pool = NULL;
387 qemu_mutex_init(&ctx->bh_lock);
388 rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
389 timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
391 ctx->notify_dummy_bh = aio_bh_new(ctx, notify_dummy_bh, NULL);
395 g_source_destroy(&ctx->source);
399 void aio_context_ref(AioContext *ctx)
401 g_source_ref(&ctx->source);
404 void aio_context_unref(AioContext *ctx)
406 g_source_unref(&ctx->source);
409 void aio_context_acquire(AioContext *ctx)
411 rfifolock_lock(&ctx->lock);
414 void aio_context_release(AioContext *ctx)
416 rfifolock_unlock(&ctx->lock);