]>
Commit | Line | Data |
---|---|---|
a76bab49 AL |
1 | /* |
2 | * QEMU aio implementation | |
3 | * | |
4 | * Copyright IBM, Corp. 2008 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #ifndef QEMU_AIO_H | |
15 | #define QEMU_AIO_H | |
16 | ||
6a1751b7 | 17 | #include "qemu/typedefs.h" |
a76bab49 | 18 | #include "qemu-common.h" |
1de7afc9 PB |
19 | #include "qemu/queue.h" |
20 | #include "qemu/event_notifier.h" | |
dcc772e2 | 21 | #include "qemu/thread.h" |
98563fc3 | 22 | #include "qemu/rfifolock.h" |
dae21b98 | 23 | #include "qemu/timer.h" |
a76bab49 | 24 | |
7c84b1b8 | 25 | typedef struct BlockAIOCB BlockAIOCB; |
097310b5 | 26 | typedef void BlockCompletionFunc(void *opaque, int ret); |
85e8dab1 | 27 | |
d7331bed | 28 | typedef struct AIOCBInfo { |
7c84b1b8 MA |
29 | void (*cancel_async)(BlockAIOCB *acb); |
30 | AioContext *(*get_aio_context)(BlockAIOCB *acb); | |
8c82e9a4 | 31 | size_t aiocb_size; |
d7331bed | 32 | } AIOCBInfo; |
85e8dab1 | 33 | |
7c84b1b8 | 34 | struct BlockAIOCB { |
d7331bed | 35 | const AIOCBInfo *aiocb_info; |
85e8dab1 | 36 | BlockDriverState *bs; |
097310b5 | 37 | BlockCompletionFunc *cb; |
85e8dab1 | 38 | void *opaque; |
f197fe2b | 39 | int refcnt; |
85e8dab1 PB |
40 | }; |
41 | ||
d7331bed | 42 | void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, |
097310b5 | 43 | BlockCompletionFunc *cb, void *opaque); |
8007429a | 44 | void qemu_aio_unref(void *p); |
f197fe2b | 45 | void qemu_aio_ref(void *p); |
85e8dab1 | 46 | |
f627aab1 PB |
47 | typedef struct AioHandler AioHandler; |
48 | typedef void QEMUBHFunc(void *opaque); | |
49 | typedef void IOHandler(void *opaque); | |
50 | ||
6a1751b7 | 51 | struct AioContext { |
e3713e00 PB |
52 | GSource source; |
53 | ||
98563fc3 SH |
54 | /* Protects all fields from multi-threaded access */ |
55 | RFifoLock lock; | |
56 | ||
a915f4bc PB |
57 | /* The list of registered AIO handlers */ |
58 | QLIST_HEAD(, AioHandler) aio_handlers; | |
59 | ||
60 | /* This is a simple lock used to protect the aio_handlers list. | |
61 | * Specifically, it's used to ensure that no callbacks are removed while | |
62 | * we're walking and dispatching callbacks. | |
63 | */ | |
64 | int walking_handlers; | |
65 | ||
0ceb849b PB |
66 | /* Used to avoid unnecessary event_notifier_set calls in aio_notify. |
67 | * Writes protected by lock or BQL, reads are lockless. | |
68 | */ | |
69 | bool dispatching; | |
70 | ||
dcc772e2 LPF |
71 | /* lock to protect between bh's adders and deleter */ |
72 | QemuMutex bh_lock; | |
0ceb849b | 73 | |
f627aab1 PB |
74 | /* Anchor of the list of Bottom Halves belonging to the context */ |
75 | struct QEMUBH *first_bh; | |
76 | ||
77 | /* A simple lock used to protect the first_bh list, and ensure that | |
78 | * no callbacks are removed while we're walking and dispatching callbacks. | |
79 | */ | |
80 | int walking_bh; | |
2f4dc3c1 PB |
81 | |
82 | /* Used for aio_notify. */ | |
83 | EventNotifier notifier; | |
6b5f8762 SH |
84 | |
85 | /* GPollFDs for aio_poll() */ | |
86 | GArray *pollfds; | |
9b34277d SH |
87 | |
88 | /* Thread pool for performing work and receiving completion callbacks */ | |
89 | struct ThreadPool *thread_pool; | |
dae21b98 AB |
90 | |
91 | /* TimerLists for calling timers - one per clock type */ | |
92 | QEMUTimerListGroup tlg; | |
6a1751b7 | 93 | }; |
f627aab1 | 94 | |
0ceb849b PB |
95 | /* Used internally to synchronize aio_poll against qemu_bh_schedule. */ |
96 | void aio_set_dispatching(AioContext *ctx, bool dispatching); | |
97 | ||
f627aab1 PB |
98 | /** |
99 | * aio_context_new: Allocate a new AioContext. | |
100 | * | |
101 | * AioContext provide a mini event-loop that can be waited on synchronously. | |
102 | * They also provide bottom halves, a service to execute a piece of code | |
103 | * as soon as possible. | |
104 | */ | |
2f78e491 | 105 | AioContext *aio_context_new(Error **errp); |
f627aab1 | 106 | |
e3713e00 PB |
107 | /** |
108 | * aio_context_ref: | |
109 | * @ctx: The AioContext to operate on. | |
110 | * | |
111 | * Add a reference to an AioContext. | |
112 | */ | |
113 | void aio_context_ref(AioContext *ctx); | |
114 | ||
115 | /** | |
116 | * aio_context_unref: | |
117 | * @ctx: The AioContext to operate on. | |
118 | * | |
119 | * Drop a reference to an AioContext. | |
120 | */ | |
121 | void aio_context_unref(AioContext *ctx); | |
122 | ||
98563fc3 SH |
123 | /* Take ownership of the AioContext. If the AioContext will be shared between |
124 | * threads, a thread must have ownership when calling aio_poll(). | |
125 | * | |
126 | * Note that multiple threads calling aio_poll() means timers, BHs, and | |
127 | * callbacks may be invoked from a different thread than they were registered | |
128 | * from. Therefore, code must use AioContext acquire/release or use | |
129 | * fine-grained synchronization to protect shared state if other threads will | |
130 | * be accessing it simultaneously. | |
131 | */ | |
132 | void aio_context_acquire(AioContext *ctx); | |
133 | ||
134 | /* Relinquish ownership of the AioContext. */ | |
135 | void aio_context_release(AioContext *ctx); | |
136 | ||
f627aab1 PB |
137 | /** |
138 | * aio_bh_new: Allocate a new bottom half structure. | |
139 | * | |
140 | * Bottom halves are lightweight callbacks whose invocation is guaranteed | |
141 | * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure | |
142 | * is opaque and must be allocated prior to its use. | |
143 | */ | |
144 | QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque); | |
145 | ||
2f4dc3c1 PB |
146 | /** |
147 | * aio_notify: Force processing of pending events. | |
148 | * | |
149 | * Similar to signaling a condition variable, aio_notify forces | |
150 | * aio_wait to exit, so that the next call will re-examine pending events. | |
151 | * The caller of aio_notify will usually call aio_wait again very soon, | |
152 | * or go through another iteration of the GLib main loop. Hence, aio_notify | |
153 | * also has the side effect of recalculating the sets of file descriptors | |
154 | * that the main loop waits for. | |
155 | * | |
156 | * Calling aio_notify is rarely necessary, because for example scheduling | |
157 | * a bottom half calls it already. | |
158 | */ | |
159 | void aio_notify(AioContext *ctx); | |
160 | ||
f627aab1 PB |
161 | /** |
162 | * aio_bh_poll: Poll bottom halves for an AioContext. | |
163 | * | |
164 | * These are internal functions used by the QEMU main loop. | |
dcc772e2 LPF |
165 | * And notice that multiple occurrences of aio_bh_poll cannot |
166 | * be called concurrently | |
f627aab1 PB |
167 | */ |
168 | int aio_bh_poll(AioContext *ctx); | |
f627aab1 PB |
169 | |
170 | /** | |
171 | * qemu_bh_schedule: Schedule a bottom half. | |
172 | * | |
173 | * Scheduling a bottom half interrupts the main loop and causes the | |
174 | * execution of the callback that was passed to qemu_bh_new. | |
175 | * | |
176 | * Bottom halves that are scheduled from a bottom half handler are instantly | |
177 | * invoked. This can create an infinite loop if a bottom half handler | |
178 | * schedules itself. | |
179 | * | |
180 | * @bh: The bottom half to be scheduled. | |
181 | */ | |
182 | void qemu_bh_schedule(QEMUBH *bh); | |
183 | ||
184 | /** | |
185 | * qemu_bh_cancel: Cancel execution of a bottom half. | |
186 | * | |
187 | * Canceling execution of a bottom half undoes the effect of calls to | |
188 | * qemu_bh_schedule without freeing its resources yet. While cancellation | |
189 | * itself is also wait-free and thread-safe, it can of course race with the | |
190 | * loop that executes bottom halves unless you are holding the iothread | |
191 | * mutex. This makes it mostly useless if you are not holding the mutex. | |
192 | * | |
193 | * @bh: The bottom half to be canceled. | |
194 | */ | |
195 | void qemu_bh_cancel(QEMUBH *bh); | |
196 | ||
197 | /** | |
198 | *qemu_bh_delete: Cancel execution of a bottom half and free its resources. | |
199 | * | |
200 | * Deleting a bottom half frees the memory that was allocated for it by | |
201 | * qemu_bh_new. It also implies canceling the bottom half if it was | |
202 | * scheduled. | |
dcc772e2 LPF |
203 | * This func is async. The bottom half will do the delete action at the finial |
204 | * end. | |
f627aab1 PB |
205 | * |
206 | * @bh: The bottom half to be deleted. | |
207 | */ | |
208 | void qemu_bh_delete(QEMUBH *bh); | |
209 | ||
cd9ba1eb | 210 | /* Return whether there are any pending callbacks from the GSource |
a3462c65 PB |
211 | * attached to the AioContext, before g_poll is invoked. |
212 | * | |
213 | * This is used internally in the implementation of the GSource. | |
214 | */ | |
215 | bool aio_prepare(AioContext *ctx); | |
216 | ||
217 | /* Return whether there are any pending callbacks from the GSource | |
218 | * attached to the AioContext, after g_poll is invoked. | |
cd9ba1eb PB |
219 | * |
220 | * This is used internally in the implementation of the GSource. | |
221 | */ | |
222 | bool aio_pending(AioContext *ctx); | |
223 | ||
e4c7e2d1 PB |
224 | /* Dispatch any pending callbacks from the GSource attached to the AioContext. |
225 | * | |
226 | * This is used internally in the implementation of the GSource. | |
227 | */ | |
228 | bool aio_dispatch(AioContext *ctx); | |
229 | ||
7c0628b2 PB |
230 | /* Progress in completing AIO work to occur. This can issue new pending |
231 | * aio as a result of executing I/O completion or bh callbacks. | |
bcdc1857 | 232 | * |
acfb23ad PB |
233 | * Return whether any progress was made by executing AIO or bottom half |
234 | * handlers. If @blocking == true, this should always be true except | |
235 | * if someone called aio_notify. | |
7c0628b2 PB |
236 | * |
237 | * If there are no pending bottom halves, but there are pending AIO | |
238 | * operations, it may not be possible to make any progress without | |
239 | * blocking. If @blocking is true, this function will wait until one | |
240 | * or more AIO events have completed, to ensure something has moved | |
241 | * before returning. | |
7c0628b2 PB |
242 | */ |
243 | bool aio_poll(AioContext *ctx, bool blocking); | |
a76bab49 AL |
244 | |
245 | /* Register a file descriptor and associated callbacks. Behaves very similarly | |
246 | * to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will | |
87f68d31 | 247 | * be invoked when using aio_poll(). |
a76bab49 AL |
248 | * |
249 | * Code that invokes AIO completion functions should rely on this function | |
250 | * instead of qemu_set_fd_handler[2]. | |
251 | */ | |
a915f4bc PB |
252 | void aio_set_fd_handler(AioContext *ctx, |
253 | int fd, | |
254 | IOHandler *io_read, | |
255 | IOHandler *io_write, | |
a915f4bc | 256 | void *opaque); |
9958c351 PB |
257 | |
258 | /* Register an event notifier and associated callbacks. Behaves very similarly | |
259 | * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks | |
87f68d31 | 260 | * will be invoked when using aio_poll(). |
9958c351 PB |
261 | * |
262 | * Code that invokes AIO completion functions should rely on this function | |
263 | * instead of event_notifier_set_handler. | |
264 | */ | |
a915f4bc PB |
265 | void aio_set_event_notifier(AioContext *ctx, |
266 | EventNotifier *notifier, | |
f2e5dca4 | 267 | EventNotifierHandler *io_read); |
a915f4bc | 268 | |
e3713e00 PB |
269 | /* Return a GSource that lets the main loop poll the file descriptors attached |
270 | * to this AioContext. | |
271 | */ | |
272 | GSource *aio_get_g_source(AioContext *ctx); | |
273 | ||
9b34277d SH |
274 | /* Return the ThreadPool bound to this AioContext */ |
275 | struct ThreadPool *aio_get_thread_pool(AioContext *ctx); | |
276 | ||
4e29e831 AB |
277 | /** |
278 | * aio_timer_new: | |
279 | * @ctx: the aio context | |
280 | * @type: the clock type | |
281 | * @scale: the scale | |
282 | * @cb: the callback to call on timer expiry | |
283 | * @opaque: the opaque pointer to pass to the callback | |
284 | * | |
285 | * Allocate a new timer attached to the context @ctx. | |
286 | * The function is responsible for memory allocation. | |
287 | * | |
288 | * The preferred interface is aio_timer_init. Use that | |
289 | * unless you really need dynamic memory allocation. | |
290 | * | |
291 | * Returns: a pointer to the new timer | |
292 | */ | |
293 | static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, | |
294 | int scale, | |
295 | QEMUTimerCB *cb, void *opaque) | |
296 | { | |
297 | return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque); | |
298 | } | |
299 | ||
300 | /** | |
301 | * aio_timer_init: | |
302 | * @ctx: the aio context | |
303 | * @ts: the timer | |
304 | * @type: the clock type | |
305 | * @scale: the scale | |
306 | * @cb: the callback to call on timer expiry | |
307 | * @opaque: the opaque pointer to pass to the callback | |
308 | * | |
309 | * Initialise a new timer attached to the context @ctx. | |
310 | * The caller is responsible for memory allocation. | |
311 | */ | |
312 | static inline void aio_timer_init(AioContext *ctx, | |
313 | QEMUTimer *ts, QEMUClockType type, | |
314 | int scale, | |
315 | QEMUTimerCB *cb, void *opaque) | |
316 | { | |
f186aa97 | 317 | timer_init_tl(ts, ctx->tlg.tl[type], scale, cb, opaque); |
4e29e831 AB |
318 | } |
319 | ||
845ca10d PB |
320 | /** |
321 | * aio_compute_timeout: | |
322 | * @ctx: the aio context | |
323 | * | |
324 | * Compute the timeout that a blocking aio_poll should use. | |
325 | */ | |
326 | int64_t aio_compute_timeout(AioContext *ctx); | |
327 | ||
a76bab49 | 328 | #endif |