4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu-common.h"
26 #include "block/aio.h"
27 #include "qemu/main-loop.h"
29 /***********************************************************/
30 /* bottom halves (can be seen as timers which expire ASAP) */
42 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
45 bh = g_malloc0(sizeof(QEMUBH));
49 bh->next = ctx->first_bh;
54 int aio_bh_poll(AioContext *ctx)
56 QEMUBH *bh, **bhp, *next;
62 for (bh = ctx->first_bh; bh; bh = next) {
64 if (!bh->deleted && bh->scheduled) {
75 /* remove deleted bhs */
76 if (!ctx->walking_bh) {
92 void qemu_bh_schedule_idle(QEMUBH *bh)
100 void qemu_bh_schedule(QEMUBH *bh)
109 void qemu_bh_cancel(QEMUBH *bh)
114 void qemu_bh_delete(QEMUBH *bh)
121 aio_ctx_prepare(GSource *source, gint *timeout)
123 AioContext *ctx = (AioContext *) source;
126 for (bh = ctx->first_bh; bh; bh = bh->next) {
127 if (!bh->deleted && bh->scheduled) {
129 /* idle bottom halves will be polled at least
133 /* non-idle bottom halves will be executed
145 aio_ctx_check(GSource *source)
147 AioContext *ctx = (AioContext *) source;
150 for (bh = ctx->first_bh; bh; bh = bh->next) {
151 if (!bh->deleted && bh->scheduled) {
155 return aio_pending(ctx);
159 aio_ctx_dispatch(GSource *source,
160 GSourceFunc callback,
163 AioContext *ctx = (AioContext *) source;
165 assert(callback == NULL);
166 aio_poll(ctx, false);
171 aio_ctx_finalize(GSource *source)
173 AioContext *ctx = (AioContext *) source;
175 aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
176 event_notifier_cleanup(&ctx->notifier);
177 g_array_free(ctx->pollfds, TRUE);
180 static GSourceFuncs aio_source_funcs = {
187 GSource *aio_get_g_source(AioContext *ctx)
189 g_source_ref(&ctx->source);
193 void aio_notify(AioContext *ctx)
195 event_notifier_set(&ctx->notifier);
198 AioContext *aio_context_new(void)
201 ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
202 ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
203 event_notifier_init(&ctx->notifier, false);
204 aio_set_event_notifier(ctx, &ctx->notifier,
205 (EventNotifierHandler *)
206 event_notifier_test_and_clear, NULL);
211 void aio_context_ref(AioContext *ctx)
213 g_source_ref(&ctx->source);
216 void aio_context_unref(AioContext *ctx)
218 g_source_unref(&ctx->source);