]>
Commit | Line | Data |
---|---|---|
a76bab49 AL |
1 | /* |
2 | * QEMU aio implementation | |
3 | * | |
4 | * Copyright IBM, Corp. 2008 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #ifndef QEMU_AIO_H | |
15 | #define QEMU_AIO_H | |
16 | ||
6a1751b7 | 17 | #include "qemu/typedefs.h" |
a76bab49 | 18 | #include "qemu-common.h" |
1de7afc9 PB |
19 | #include "qemu/queue.h" |
20 | #include "qemu/event_notifier.h" | |
dcc772e2 | 21 | #include "qemu/thread.h" |
dae21b98 | 22 | #include "qemu/timer.h" |
a76bab49 | 23 | |
85e8dab1 PB |
24 | typedef struct BlockDriverAIOCB BlockDriverAIOCB; |
25 | typedef void BlockDriverCompletionFunc(void *opaque, int ret); | |
26 | ||
d7331bed | 27 | typedef struct AIOCBInfo { |
85e8dab1 | 28 | void (*cancel)(BlockDriverAIOCB *acb); |
8c82e9a4 | 29 | size_t aiocb_size; |
d7331bed | 30 | } AIOCBInfo; |
85e8dab1 PB |
31 | |
32 | struct BlockDriverAIOCB { | |
d7331bed | 33 | const AIOCBInfo *aiocb_info; |
85e8dab1 PB |
34 | BlockDriverState *bs; |
35 | BlockDriverCompletionFunc *cb; | |
36 | void *opaque; | |
85e8dab1 PB |
37 | }; |
38 | ||
d7331bed | 39 | void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, |
85e8dab1 PB |
40 | BlockDriverCompletionFunc *cb, void *opaque); |
41 | void qemu_aio_release(void *p); | |
42 | ||
f627aab1 PB |
43 | typedef struct AioHandler AioHandler; |
44 | typedef void QEMUBHFunc(void *opaque); | |
45 | typedef void IOHandler(void *opaque); | |
46 | ||
6a1751b7 | 47 | struct AioContext { |
e3713e00 PB |
48 | GSource source; |
49 | ||
a915f4bc PB |
50 | /* The list of registered AIO handlers */ |
51 | QLIST_HEAD(, AioHandler) aio_handlers; | |
52 | ||
53 | /* This is a simple lock used to protect the aio_handlers list. | |
54 | * Specifically, it's used to ensure that no callbacks are removed while | |
55 | * we're walking and dispatching callbacks. | |
56 | */ | |
57 | int walking_handlers; | |
58 | ||
dcc772e2 LPF |
59 | /* lock to protect between bh's adders and deleter */ |
60 | QemuMutex bh_lock; | |
f627aab1 PB |
61 | /* Anchor of the list of Bottom Halves belonging to the context */ |
62 | struct QEMUBH *first_bh; | |
63 | ||
64 | /* A simple lock used to protect the first_bh list, and ensure that | |
65 | * no callbacks are removed while we're walking and dispatching callbacks. | |
66 | */ | |
67 | int walking_bh; | |
2f4dc3c1 PB |
68 | |
69 | /* Used for aio_notify. */ | |
70 | EventNotifier notifier; | |
6b5f8762 SH |
71 | |
72 | /* GPollFDs for aio_poll() */ | |
73 | GArray *pollfds; | |
9b34277d SH |
74 | |
75 | /* Thread pool for performing work and receiving completion callbacks */ | |
76 | struct ThreadPool *thread_pool; | |
dae21b98 AB |
77 | |
78 | /* TimerLists for calling timers - one per clock type */ | |
79 | QEMUTimerListGroup tlg; | |
6a1751b7 | 80 | }; |
f627aab1 | 81 | |
f627aab1 PB |
82 | /** |
83 | * aio_context_new: Allocate a new AioContext. | |
84 | * | |
85 | * AioContext provide a mini event-loop that can be waited on synchronously. | |
86 | * They also provide bottom halves, a service to execute a piece of code | |
87 | * as soon as possible. | |
88 | */ | |
89 | AioContext *aio_context_new(void); | |
90 | ||
e3713e00 PB |
91 | /** |
92 | * aio_context_ref: | |
93 | * @ctx: The AioContext to operate on. | |
94 | * | |
95 | * Add a reference to an AioContext. | |
96 | */ | |
97 | void aio_context_ref(AioContext *ctx); | |
98 | ||
99 | /** | |
100 | * aio_context_unref: | |
101 | * @ctx: The AioContext to operate on. | |
102 | * | |
103 | * Drop a reference to an AioContext. | |
104 | */ | |
105 | void aio_context_unref(AioContext *ctx); | |
106 | ||
f627aab1 PB |
107 | /** |
108 | * aio_bh_new: Allocate a new bottom half structure. | |
109 | * | |
110 | * Bottom halves are lightweight callbacks whose invocation is guaranteed | |
111 | * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure | |
112 | * is opaque and must be allocated prior to its use. | |
113 | */ | |
114 | QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque); | |
115 | ||
2f4dc3c1 PB |
116 | /** |
117 | * aio_notify: Force processing of pending events. | |
118 | * | |
119 | * Similar to signaling a condition variable, aio_notify forces | |
120 | * aio_wait to exit, so that the next call will re-examine pending events. | |
121 | * The caller of aio_notify will usually call aio_wait again very soon, | |
122 | * or go through another iteration of the GLib main loop. Hence, aio_notify | |
123 | * also has the side effect of recalculating the sets of file descriptors | |
124 | * that the main loop waits for. | |
125 | * | |
126 | * Calling aio_notify is rarely necessary, because for example scheduling | |
127 | * a bottom half calls it already. | |
128 | */ | |
129 | void aio_notify(AioContext *ctx); | |
130 | ||
f627aab1 PB |
131 | /** |
132 | * aio_bh_poll: Poll bottom halves for an AioContext. | |
133 | * | |
134 | * These are internal functions used by the QEMU main loop. | |
dcc772e2 LPF |
135 | * And notice that multiple occurrences of aio_bh_poll cannot |
136 | * be called concurrently | |
f627aab1 PB |
137 | */ |
138 | int aio_bh_poll(AioContext *ctx); | |
f627aab1 PB |
139 | |
140 | /** | |
141 | * qemu_bh_schedule: Schedule a bottom half. | |
142 | * | |
143 | * Scheduling a bottom half interrupts the main loop and causes the | |
144 | * execution of the callback that was passed to qemu_bh_new. | |
145 | * | |
146 | * Bottom halves that are scheduled from a bottom half handler are instantly | |
147 | * invoked. This can create an infinite loop if a bottom half handler | |
148 | * schedules itself. | |
149 | * | |
150 | * @bh: The bottom half to be scheduled. | |
151 | */ | |
152 | void qemu_bh_schedule(QEMUBH *bh); | |
153 | ||
154 | /** | |
155 | * qemu_bh_cancel: Cancel execution of a bottom half. | |
156 | * | |
157 | * Canceling execution of a bottom half undoes the effect of calls to | |
158 | * qemu_bh_schedule without freeing its resources yet. While cancellation | |
159 | * itself is also wait-free and thread-safe, it can of course race with the | |
160 | * loop that executes bottom halves unless you are holding the iothread | |
161 | * mutex. This makes it mostly useless if you are not holding the mutex. | |
162 | * | |
163 | * @bh: The bottom half to be canceled. | |
164 | */ | |
165 | void qemu_bh_cancel(QEMUBH *bh); | |
166 | ||
167 | /** | |
168 | *qemu_bh_delete: Cancel execution of a bottom half and free its resources. | |
169 | * | |
170 | * Deleting a bottom half frees the memory that was allocated for it by | |
171 | * qemu_bh_new. It also implies canceling the bottom half if it was | |
172 | * scheduled. | |
dcc772e2 LPF |
173 | * This func is async. The bottom half will do the delete action at the finial |
174 | * end. | |
f627aab1 PB |
175 | * |
176 | * @bh: The bottom half to be deleted. | |
177 | */ | |
178 | void qemu_bh_delete(QEMUBH *bh); | |
179 | ||
cd9ba1eb PB |
180 | /* Return whether there are any pending callbacks from the GSource |
181 | * attached to the AioContext. | |
182 | * | |
183 | * This is used internally in the implementation of the GSource. | |
184 | */ | |
185 | bool aio_pending(AioContext *ctx); | |
186 | ||
7c0628b2 PB |
187 | /* Progress in completing AIO work to occur. This can issue new pending |
188 | * aio as a result of executing I/O completion or bh callbacks. | |
bcdc1857 | 189 | * |
7c0628b2 | 190 | * If there is no pending AIO operation or completion (bottom half), |
2ea9b58f KW |
191 | * return false. If there are pending AIO operations of bottom halves, |
192 | * return true. | |
7c0628b2 PB |
193 | * |
194 | * If there are no pending bottom halves, but there are pending AIO | |
195 | * operations, it may not be possible to make any progress without | |
196 | * blocking. If @blocking is true, this function will wait until one | |
197 | * or more AIO events have completed, to ensure something has moved | |
198 | * before returning. | |
7c0628b2 PB |
199 | */ |
200 | bool aio_poll(AioContext *ctx, bool blocking); | |
a76bab49 | 201 | |
9958c351 | 202 | #ifdef CONFIG_POSIX |
a76bab49 AL |
203 | /* Register a file descriptor and associated callbacks. Behaves very similarly |
204 | * to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will | |
c57b6656 | 205 | * be invoked when using qemu_aio_wait(). |
a76bab49 AL |
206 | * |
207 | * Code that invokes AIO completion functions should rely on this function | |
208 | * instead of qemu_set_fd_handler[2]. | |
209 | */ | |
a915f4bc PB |
210 | void aio_set_fd_handler(AioContext *ctx, |
211 | int fd, | |
212 | IOHandler *io_read, | |
213 | IOHandler *io_write, | |
a915f4bc | 214 | void *opaque); |
9958c351 PB |
215 | #endif |
216 | ||
217 | /* Register an event notifier and associated callbacks. Behaves very similarly | |
218 | * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks | |
c57b6656 | 219 | * will be invoked when using qemu_aio_wait(). |
9958c351 PB |
220 | * |
221 | * Code that invokes AIO completion functions should rely on this function | |
222 | * instead of event_notifier_set_handler. | |
223 | */ | |
a915f4bc PB |
224 | void aio_set_event_notifier(AioContext *ctx, |
225 | EventNotifier *notifier, | |
f2e5dca4 | 226 | EventNotifierHandler *io_read); |
a915f4bc | 227 | |
e3713e00 PB |
228 | /* Return a GSource that lets the main loop poll the file descriptors attached |
229 | * to this AioContext. | |
230 | */ | |
231 | GSource *aio_get_g_source(AioContext *ctx); | |
232 | ||
9b34277d SH |
233 | /* Return the ThreadPool bound to this AioContext */ |
234 | struct ThreadPool *aio_get_thread_pool(AioContext *ctx); | |
235 | ||
a915f4bc PB |
236 | /* Functions to operate on the main QEMU AioContext. */ |
237 | ||
a915f4bc | 238 | bool qemu_aio_wait(void); |
9958c351 | 239 | void qemu_aio_set_event_notifier(EventNotifier *notifier, |
f2e5dca4 | 240 | EventNotifierHandler *io_read); |
a76bab49 | 241 | |
a915f4bc PB |
242 | #ifdef CONFIG_POSIX |
243 | void qemu_aio_set_fd_handler(int fd, | |
244 | IOHandler *io_read, | |
245 | IOHandler *io_write, | |
a915f4bc PB |
246 | void *opaque); |
247 | #endif | |
248 | ||
4e29e831 AB |
249 | /** |
250 | * aio_timer_new: | |
251 | * @ctx: the aio context | |
252 | * @type: the clock type | |
253 | * @scale: the scale | |
254 | * @cb: the callback to call on timer expiry | |
255 | * @opaque: the opaque pointer to pass to the callback | |
256 | * | |
257 | * Allocate a new timer attached to the context @ctx. | |
258 | * The function is responsible for memory allocation. | |
259 | * | |
260 | * The preferred interface is aio_timer_init. Use that | |
261 | * unless you really need dynamic memory allocation. | |
262 | * | |
263 | * Returns: a pointer to the new timer | |
264 | */ | |
265 | static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, | |
266 | int scale, | |
267 | QEMUTimerCB *cb, void *opaque) | |
268 | { | |
269 | return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque); | |
270 | } | |
271 | ||
272 | /** | |
273 | * aio_timer_init: | |
274 | * @ctx: the aio context | |
275 | * @ts: the timer | |
276 | * @type: the clock type | |
277 | * @scale: the scale | |
278 | * @cb: the callback to call on timer expiry | |
279 | * @opaque: the opaque pointer to pass to the callback | |
280 | * | |
281 | * Initialise a new timer attached to the context @ctx. | |
282 | * The caller is responsible for memory allocation. | |
283 | */ | |
284 | static inline void aio_timer_init(AioContext *ctx, | |
285 | QEMUTimer *ts, QEMUClockType type, | |
286 | int scale, | |
287 | QEMUTimerCB *cb, void *opaque) | |
288 | { | |
289 | timer_init(ts, ctx->tlg.tl[type], scale, cb, opaque); | |
290 | } | |
291 | ||
a76bab49 | 292 | #endif |