]> Git Repo - qemu.git/blame - include/block/aio.h
aio: add AioPollFn and io_poll() interface
[qemu.git] / include / block / aio.h
CommitLineData
a76bab49
AL
1/*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#ifndef QEMU_AIO_H
15#define QEMU_AIO_H
16
17#include "qemu-common.h"
1de7afc9
PB
18#include "qemu/queue.h"
19#include "qemu/event_notifier.h"
dcc772e2 20#include "qemu/thread.h"
dae21b98 21#include "qemu/timer.h"
a76bab49 22
7c84b1b8 23typedef struct BlockAIOCB BlockAIOCB;
097310b5 24typedef void BlockCompletionFunc(void *opaque, int ret);
85e8dab1 25
d7331bed 26typedef struct AIOCBInfo {
7c84b1b8
MA
27 void (*cancel_async)(BlockAIOCB *acb);
28 AioContext *(*get_aio_context)(BlockAIOCB *acb);
8c82e9a4 29 size_t aiocb_size;
d7331bed 30} AIOCBInfo;
85e8dab1 31
7c84b1b8 32struct BlockAIOCB {
d7331bed 33 const AIOCBInfo *aiocb_info;
85e8dab1 34 BlockDriverState *bs;
097310b5 35 BlockCompletionFunc *cb;
85e8dab1 36 void *opaque;
f197fe2b 37 int refcnt;
85e8dab1
PB
38};
39
d7331bed 40void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
097310b5 41 BlockCompletionFunc *cb, void *opaque);
8007429a 42void qemu_aio_unref(void *p);
f197fe2b 43void qemu_aio_ref(void *p);
85e8dab1 44
f627aab1
PB
45typedef struct AioHandler AioHandler;
46typedef void QEMUBHFunc(void *opaque);
f6a51c84 47typedef bool AioPollFn(void *opaque);
f627aab1
PB
48typedef void IOHandler(void *opaque);
49
0187f5c9
PB
50struct ThreadPool;
51struct LinuxAioState;
52
6a1751b7 53struct AioContext {
e3713e00
PB
54 GSource source;
55
98563fc3 56 /* Protects all fields from multi-threaded access */
3fe71223 57 QemuRecMutex lock;
98563fc3 58
a915f4bc
PB
59 /* The list of registered AIO handlers */
60 QLIST_HEAD(, AioHandler) aio_handlers;
61
62 /* This is a simple lock used to protect the aio_handlers list.
63 * Specifically, it's used to ensure that no callbacks are removed while
64 * we're walking and dispatching callbacks.
65 */
66 int walking_handlers;
67
eabc9779
PB
68 /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
69 * accessed with atomic primitives. If this field is 0, everything
70 * (file descriptors, bottom halves, timers) will be re-evaluated
71 * before the next blocking poll(), thus the event_notifier_set call
72 * can be skipped. If it is non-zero, you may need to wake up a
73 * concurrent aio_poll or the glib main event loop, making
74 * event_notifier_set necessary.
75 *
76 * Bit 0 is reserved for GSource usage of the AioContext, and is 1
54a16a63 77 * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
eabc9779
PB
78 * Bits 1-31 simply count the number of active calls to aio_poll
79 * that are in the prepare or poll phase.
80 *
81 * The GSource and aio_poll must use a different mechanism because
82 * there is no certainty that a call to GSource's prepare callback
83 * (via g_main_context_prepare) is indeed followed by check and
84 * dispatch. It's not clear whether this would be a bug, but let's
85 * play safe and allow it---it will just cause extra calls to
86 * event_notifier_set until the next call to dispatch.
87 *
88 * Instead, the aio_poll calls include both the prepare and the
89 * dispatch phase, hence a simple counter is enough for them.
0ceb849b 90 */
eabc9779 91 uint32_t notify_me;
0ceb849b 92
dcc772e2
LPF
93 /* lock to protect between bh's adders and deleter */
94 QemuMutex bh_lock;
0ceb849b 95
f627aab1
PB
96 /* Anchor of the list of Bottom Halves belonging to the context */
97 struct QEMUBH *first_bh;
98
99 /* A simple lock used to protect the first_bh list, and ensure that
100 * no callbacks are removed while we're walking and dispatching callbacks.
101 */
102 int walking_bh;
2f4dc3c1 103
05e514b1
PB
104 /* Used by aio_notify.
105 *
106 * "notified" is used to avoid expensive event_notifier_test_and_clear
107 * calls. When it is clear, the EventNotifier is clear, or one thread
108 * is going to clear "notified" before processing more events. False
109 * positives are possible, i.e. "notified" could be set even though the
110 * EventNotifier is clear.
111 *
112 * Note that event_notifier_set *cannot* be optimized the same way. For
113 * more information on the problem that would result, see "#ifdef BUG2"
114 * in the docs/aio_notify_accept.promela formal model.
115 */
116 bool notified;
2f4dc3c1 117 EventNotifier notifier;
6b5f8762 118
9b34277d
SH
119 /* Thread pool for performing work and receiving completion callbacks */
120 struct ThreadPool *thread_pool;
dae21b98 121
0187f5c9
PB
122#ifdef CONFIG_LINUX_AIO
123 /* State for native Linux AIO. Uses aio_context_acquire/release for
124 * locking.
125 */
126 struct LinuxAioState *linux_aio;
127#endif
128
dae21b98
AB
129 /* TimerLists for calling timers - one per clock type */
130 QEMUTimerListGroup tlg;
c1e1e5fa
FZ
131
132 int external_disable_cnt;
fbe3fc5c
FZ
133
134 /* epoll(7) state used when built with CONFIG_EPOLL */
135 int epollfd;
136 bool epoll_enabled;
137 bool epoll_available;
6a1751b7 138};
f627aab1 139
f627aab1
PB
140/**
141 * aio_context_new: Allocate a new AioContext.
142 *
143 * AioContext provide a mini event-loop that can be waited on synchronously.
144 * They also provide bottom halves, a service to execute a piece of code
145 * as soon as possible.
146 */
2f78e491 147AioContext *aio_context_new(Error **errp);
f627aab1 148
e3713e00
PB
149/**
150 * aio_context_ref:
151 * @ctx: The AioContext to operate on.
152 *
153 * Add a reference to an AioContext.
154 */
155void aio_context_ref(AioContext *ctx);
156
157/**
158 * aio_context_unref:
159 * @ctx: The AioContext to operate on.
160 *
161 * Drop a reference to an AioContext.
162 */
163void aio_context_unref(AioContext *ctx);
164
98563fc3 165/* Take ownership of the AioContext. If the AioContext will be shared between
49110174
PB
166 * threads, and a thread does not want to be interrupted, it will have to
167 * take ownership around calls to aio_poll(). Otherwise, aio_poll()
168 * automatically takes care of calling aio_context_acquire and
169 * aio_context_release.
98563fc3 170 *
49110174
PB
171 * Access to timers and BHs from a thread that has not acquired AioContext
172 * is possible. Access to callbacks for now must be done while the AioContext
173 * is owned by the thread (FIXME).
98563fc3
SH
174 */
175void aio_context_acquire(AioContext *ctx);
176
177/* Relinquish ownership of the AioContext. */
178void aio_context_release(AioContext *ctx);
179
5b8bb359
PB
180/**
181 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
182 * only once and as soon as possible.
183 */
184void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
185
f627aab1
PB
186/**
187 * aio_bh_new: Allocate a new bottom half structure.
188 *
189 * Bottom halves are lightweight callbacks whose invocation is guaranteed
190 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
191 * is opaque and must be allocated prior to its use.
192 */
193QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
194
2f4dc3c1
PB
195/**
196 * aio_notify: Force processing of pending events.
197 *
198 * Similar to signaling a condition variable, aio_notify forces
722f8d90
YB
199 * aio_poll to exit, so that the next call will re-examine pending events.
200 * The caller of aio_notify will usually call aio_poll again very soon,
2f4dc3c1
PB
201 * or go through another iteration of the GLib main loop. Hence, aio_notify
202 * also has the side effect of recalculating the sets of file descriptors
203 * that the main loop waits for.
204 *
205 * Calling aio_notify is rarely necessary, because for example scheduling
206 * a bottom half calls it already.
207 */
208void aio_notify(AioContext *ctx);
209
05e514b1
PB
210/**
211 * aio_notify_accept: Acknowledge receiving an aio_notify.
212 *
213 * aio_notify() uses an EventNotifier in order to wake up a sleeping
214 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are
215 * usually rare, but the AioContext has to clear the EventNotifier on
216 * every aio_poll() or g_main_context_iteration() in order to avoid
217 * busy waiting. This event_notifier_test_and_clear() cannot be done
218 * using the usual aio_context_set_event_notifier(), because it must
219 * be done before processing all events (file descriptors, bottom halves,
220 * timers).
221 *
222 * aio_notify_accept() is an optimized event_notifier_test_and_clear()
223 * that is specific to an AioContext's notifier; it is used internally
224 * to clear the EventNotifier only if aio_notify() had been called.
225 */
226void aio_notify_accept(AioContext *ctx);
227
df281b80
PD
228/**
229 * aio_bh_call: Executes callback function of the specified BH.
230 */
231void aio_bh_call(QEMUBH *bh);
232
f627aab1
PB
233/**
234 * aio_bh_poll: Poll bottom halves for an AioContext.
235 *
236 * These are internal functions used by the QEMU main loop.
dcc772e2
LPF
237 * And notice that multiple occurrences of aio_bh_poll cannot
238 * be called concurrently
f627aab1
PB
239 */
240int aio_bh_poll(AioContext *ctx);
f627aab1
PB
241
242/**
243 * qemu_bh_schedule: Schedule a bottom half.
244 *
245 * Scheduling a bottom half interrupts the main loop and causes the
246 * execution of the callback that was passed to qemu_bh_new.
247 *
248 * Bottom halves that are scheduled from a bottom half handler are instantly
249 * invoked. This can create an infinite loop if a bottom half handler
250 * schedules itself.
251 *
252 * @bh: The bottom half to be scheduled.
253 */
254void qemu_bh_schedule(QEMUBH *bh);
255
256/**
257 * qemu_bh_cancel: Cancel execution of a bottom half.
258 *
259 * Canceling execution of a bottom half undoes the effect of calls to
260 * qemu_bh_schedule without freeing its resources yet. While cancellation
261 * itself is also wait-free and thread-safe, it can of course race with the
262 * loop that executes bottom halves unless you are holding the iothread
263 * mutex. This makes it mostly useless if you are not holding the mutex.
264 *
265 * @bh: The bottom half to be canceled.
266 */
267void qemu_bh_cancel(QEMUBH *bh);
268
269/**
270 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
271 *
272 * Deleting a bottom half frees the memory that was allocated for it by
273 * qemu_bh_new. It also implies canceling the bottom half if it was
274 * scheduled.
dcc772e2
LPF
275 * This func is async. The bottom half will do the delete action at the finial
276 * end.
f627aab1
PB
277 *
278 * @bh: The bottom half to be deleted.
279 */
280void qemu_bh_delete(QEMUBH *bh);
281
cd9ba1eb 282/* Return whether there are any pending callbacks from the GSource
a3462c65
PB
283 * attached to the AioContext, before g_poll is invoked.
284 *
285 * This is used internally in the implementation of the GSource.
286 */
287bool aio_prepare(AioContext *ctx);
288
289/* Return whether there are any pending callbacks from the GSource
290 * attached to the AioContext, after g_poll is invoked.
cd9ba1eb
PB
291 *
292 * This is used internally in the implementation of the GSource.
293 */
294bool aio_pending(AioContext *ctx);
295
e4c7e2d1
PB
296/* Dispatch any pending callbacks from the GSource attached to the AioContext.
297 *
298 * This is used internally in the implementation of the GSource.
721671ad
SH
299 *
300 * @dispatch_fds: true to process fds, false to skip them
301 * (can be used as an optimization by callers that know there
302 * are no fds ready)
e4c7e2d1 303 */
721671ad 304bool aio_dispatch(AioContext *ctx, bool dispatch_fds);
e4c7e2d1 305
7c0628b2
PB
306/* Progress in completing AIO work to occur. This can issue new pending
307 * aio as a result of executing I/O completion or bh callbacks.
bcdc1857 308 *
acfb23ad
PB
309 * Return whether any progress was made by executing AIO or bottom half
310 * handlers. If @blocking == true, this should always be true except
311 * if someone called aio_notify.
7c0628b2
PB
312 *
313 * If there are no pending bottom halves, but there are pending AIO
314 * operations, it may not be possible to make any progress without
315 * blocking. If @blocking is true, this function will wait until one
316 * or more AIO events have completed, to ensure something has moved
317 * before returning.
7c0628b2
PB
318 */
319bool aio_poll(AioContext *ctx, bool blocking);
a76bab49
AL
320
321/* Register a file descriptor and associated callbacks. Behaves very similarly
6484e422 322 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will
87f68d31 323 * be invoked when using aio_poll().
a76bab49
AL
324 *
325 * Code that invokes AIO completion functions should rely on this function
326 * instead of qemu_set_fd_handler[2].
327 */
a915f4bc
PB
328void aio_set_fd_handler(AioContext *ctx,
329 int fd,
dca21ef2 330 bool is_external,
a915f4bc
PB
331 IOHandler *io_read,
332 IOHandler *io_write,
f6a51c84 333 AioPollFn *io_poll,
a915f4bc 334 void *opaque);
9958c351
PB
335
336/* Register an event notifier and associated callbacks. Behaves very similarly
337 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
87f68d31 338 * will be invoked when using aio_poll().
9958c351
PB
339 *
340 * Code that invokes AIO completion functions should rely on this function
341 * instead of event_notifier_set_handler.
342 */
a915f4bc
PB
343void aio_set_event_notifier(AioContext *ctx,
344 EventNotifier *notifier,
dca21ef2 345 bool is_external,
f6a51c84
SH
346 EventNotifierHandler *io_read,
347 AioPollFn *io_poll);
a915f4bc 348
e3713e00
PB
349/* Return a GSource that lets the main loop poll the file descriptors attached
350 * to this AioContext.
351 */
352GSource *aio_get_g_source(AioContext *ctx);
353
9b34277d
SH
354/* Return the ThreadPool bound to this AioContext */
355struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
356
0187f5c9
PB
357/* Return the LinuxAioState bound to this AioContext */
358struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
359
4e29e831
AB
360/**
361 * aio_timer_new:
362 * @ctx: the aio context
363 * @type: the clock type
364 * @scale: the scale
365 * @cb: the callback to call on timer expiry
366 * @opaque: the opaque pointer to pass to the callback
367 *
368 * Allocate a new timer attached to the context @ctx.
369 * The function is responsible for memory allocation.
370 *
371 * The preferred interface is aio_timer_init. Use that
372 * unless you really need dynamic memory allocation.
373 *
374 * Returns: a pointer to the new timer
375 */
376static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
377 int scale,
378 QEMUTimerCB *cb, void *opaque)
379{
380 return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque);
381}
382
383/**
384 * aio_timer_init:
385 * @ctx: the aio context
386 * @ts: the timer
387 * @type: the clock type
388 * @scale: the scale
389 * @cb: the callback to call on timer expiry
390 * @opaque: the opaque pointer to pass to the callback
391 *
392 * Initialise a new timer attached to the context @ctx.
393 * The caller is responsible for memory allocation.
394 */
395static inline void aio_timer_init(AioContext *ctx,
396 QEMUTimer *ts, QEMUClockType type,
397 int scale,
398 QEMUTimerCB *cb, void *opaque)
399{
f186aa97 400 timer_init_tl(ts, ctx->tlg.tl[type], scale, cb, opaque);
4e29e831
AB
401}
402
845ca10d
PB
403/**
404 * aio_compute_timeout:
405 * @ctx: the aio context
406 *
407 * Compute the timeout that a blocking aio_poll should use.
408 */
409int64_t aio_compute_timeout(AioContext *ctx);
410
c1e1e5fa
FZ
411/**
412 * aio_disable_external:
413 * @ctx: the aio context
414 *
415 * Disable the further processing of external clients.
416 */
417static inline void aio_disable_external(AioContext *ctx)
418{
419 atomic_inc(&ctx->external_disable_cnt);
420}
421
422/**
423 * aio_enable_external:
424 * @ctx: the aio context
425 *
426 * Enable the processing of external clients.
427 */
428static inline void aio_enable_external(AioContext *ctx)
429{
430 assert(ctx->external_disable_cnt > 0);
431 atomic_dec(&ctx->external_disable_cnt);
432}
433
5ceb9e39
FZ
434/**
435 * aio_external_disabled:
436 * @ctx: the aio context
437 *
438 * Return true if the external clients are disabled.
439 */
440static inline bool aio_external_disabled(AioContext *ctx)
441{
442 return atomic_read(&ctx->external_disable_cnt);
443}
444
c1e1e5fa
FZ
445/**
446 * aio_node_check:
447 * @ctx: the aio context
448 * @is_external: Whether or not the checked node is an external event source.
449 *
450 * Check if the node's is_external flag is okay to be polled by the ctx at this
451 * moment. True means green light.
452 */
453static inline bool aio_node_check(AioContext *ctx, bool is_external)
454{
455 return !is_external || !atomic_read(&ctx->external_disable_cnt);
456}
457
e4370165
PB
458/**
459 * Return the AioContext whose event loop runs in the current thread.
460 *
461 * If called from an IOThread this will be the IOThread's AioContext. If
462 * called from another thread it will be the main loop AioContext.
463 */
464AioContext *qemu_get_current_aio_context(void);
465
466/**
467 * @ctx: the aio context
468 *
469 * Return whether we are running in the I/O thread that manages @ctx.
470 */
471static inline bool aio_context_in_iothread(AioContext *ctx)
472{
473 return ctx == qemu_get_current_aio_context();
474}
475
37fcee5d
FZ
476/**
477 * aio_context_setup:
478 * @ctx: the aio context
479 *
480 * Initialize the aio context.
481 */
7e003465 482void aio_context_setup(AioContext *ctx);
37fcee5d 483
a76bab49 484#endif
This page took 0.68389 seconds and 4 git commands to generate.