]>
Commit | Line | Data |
---|---|---|
a76bab49 AL |
1 | /* |
2 | * QEMU aio implementation | |
3 | * | |
4 | * Copyright IBM, Corp. 2008 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #ifndef QEMU_AIO_H | |
15 | #define QEMU_AIO_H | |
16 | ||
6a1751b7 | 17 | #include "qemu/typedefs.h" |
a76bab49 | 18 | #include "qemu-common.h" |
1de7afc9 PB |
19 | #include "qemu/queue.h" |
20 | #include "qemu/event_notifier.h" | |
dcc772e2 | 21 | #include "qemu/thread.h" |
98563fc3 | 22 | #include "qemu/rfifolock.h" |
dae21b98 | 23 | #include "qemu/timer.h" |
a76bab49 | 24 | |
85e8dab1 PB |
25 | typedef struct BlockDriverAIOCB BlockDriverAIOCB; |
26 | typedef void BlockDriverCompletionFunc(void *opaque, int ret); | |
27 | ||
d7331bed | 28 | typedef struct AIOCBInfo { |
85e8dab1 | 29 | void (*cancel)(BlockDriverAIOCB *acb); |
8c82e9a4 | 30 | size_t aiocb_size; |
d7331bed | 31 | } AIOCBInfo; |
85e8dab1 PB |
32 | |
33 | struct BlockDriverAIOCB { | |
d7331bed | 34 | const AIOCBInfo *aiocb_info; |
85e8dab1 PB |
35 | BlockDriverState *bs; |
36 | BlockDriverCompletionFunc *cb; | |
37 | void *opaque; | |
85e8dab1 PB |
38 | }; |
39 | ||
d7331bed | 40 | void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, |
85e8dab1 PB |
41 | BlockDriverCompletionFunc *cb, void *opaque); |
42 | void qemu_aio_release(void *p); | |
43 | ||
f627aab1 PB |
44 | typedef struct AioHandler AioHandler; |
45 | typedef void QEMUBHFunc(void *opaque); | |
46 | typedef void IOHandler(void *opaque); | |
47 | ||
6a1751b7 | 48 | struct AioContext { |
e3713e00 PB |
49 | GSource source; |
50 | ||
98563fc3 SH |
51 | /* Protects all fields from multi-threaded access */ |
52 | RFifoLock lock; | |
53 | ||
a915f4bc PB |
54 | /* The list of registered AIO handlers */ |
55 | QLIST_HEAD(, AioHandler) aio_handlers; | |
56 | ||
57 | /* This is a simple lock used to protect the aio_handlers list. | |
58 | * Specifically, it's used to ensure that no callbacks are removed while | |
59 | * we're walking and dispatching callbacks. | |
60 | */ | |
61 | int walking_handlers; | |
62 | ||
0ceb849b PB |
63 | /* Used to avoid unnecessary event_notifier_set calls in aio_notify. |
64 | * Writes protected by lock or BQL, reads are lockless. | |
65 | */ | |
66 | bool dispatching; | |
67 | ||
dcc772e2 LPF |
68 | /* lock to protect between bh's adders and deleter */ |
69 | QemuMutex bh_lock; | |
0ceb849b | 70 | |
f627aab1 PB |
71 | /* Anchor of the list of Bottom Halves belonging to the context */ |
72 | struct QEMUBH *first_bh; | |
73 | ||
74 | /* A simple lock used to protect the first_bh list, and ensure that | |
75 | * no callbacks are removed while we're walking and dispatching callbacks. | |
76 | */ | |
77 | int walking_bh; | |
2f4dc3c1 PB |
78 | |
79 | /* Used for aio_notify. */ | |
80 | EventNotifier notifier; | |
6b5f8762 SH |
81 | |
82 | /* GPollFDs for aio_poll() */ | |
83 | GArray *pollfds; | |
9b34277d SH |
84 | |
85 | /* Thread pool for performing work and receiving completion callbacks */ | |
86 | struct ThreadPool *thread_pool; | |
dae21b98 AB |
87 | |
88 | /* TimerLists for calling timers - one per clock type */ | |
89 | QEMUTimerListGroup tlg; | |
6a1751b7 | 90 | }; |
f627aab1 | 91 | |
0ceb849b PB |
92 | /* Used internally to synchronize aio_poll against qemu_bh_schedule. */ |
93 | void aio_set_dispatching(AioContext *ctx, bool dispatching); | |
94 | ||
f627aab1 PB |
95 | /** |
96 | * aio_context_new: Allocate a new AioContext. | |
97 | * | |
98 | * AioContext provide a mini event-loop that can be waited on synchronously. | |
99 | * They also provide bottom halves, a service to execute a piece of code | |
100 | * as soon as possible. | |
101 | */ | |
102 | AioContext *aio_context_new(void); | |
103 | ||
e3713e00 PB |
104 | /** |
105 | * aio_context_ref: | |
106 | * @ctx: The AioContext to operate on. | |
107 | * | |
108 | * Add a reference to an AioContext. | |
109 | */ | |
110 | void aio_context_ref(AioContext *ctx); | |
111 | ||
112 | /** | |
113 | * aio_context_unref: | |
114 | * @ctx: The AioContext to operate on. | |
115 | * | |
116 | * Drop a reference to an AioContext. | |
117 | */ | |
118 | void aio_context_unref(AioContext *ctx); | |
119 | ||
98563fc3 SH |
120 | /* Take ownership of the AioContext. If the AioContext will be shared between |
121 | * threads, a thread must have ownership when calling aio_poll(). | |
122 | * | |
123 | * Note that multiple threads calling aio_poll() means timers, BHs, and | |
124 | * callbacks may be invoked from a different thread than they were registered | |
125 | * from. Therefore, code must use AioContext acquire/release or use | |
126 | * fine-grained synchronization to protect shared state if other threads will | |
127 | * be accessing it simultaneously. | |
128 | */ | |
129 | void aio_context_acquire(AioContext *ctx); | |
130 | ||
131 | /* Relinquish ownership of the AioContext. */ | |
132 | void aio_context_release(AioContext *ctx); | |
133 | ||
f627aab1 PB |
134 | /** |
135 | * aio_bh_new: Allocate a new bottom half structure. | |
136 | * | |
137 | * Bottom halves are lightweight callbacks whose invocation is guaranteed | |
138 | * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure | |
139 | * is opaque and must be allocated prior to its use. | |
140 | */ | |
141 | QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque); | |
142 | ||
2f4dc3c1 PB |
143 | /** |
144 | * aio_notify: Force processing of pending events. | |
145 | * | |
146 | * Similar to signaling a condition variable, aio_notify forces | |
147 | * aio_wait to exit, so that the next call will re-examine pending events. | |
148 | * The caller of aio_notify will usually call aio_wait again very soon, | |
149 | * or go through another iteration of the GLib main loop. Hence, aio_notify | |
150 | * also has the side effect of recalculating the sets of file descriptors | |
151 | * that the main loop waits for. | |
152 | * | |
153 | * Calling aio_notify is rarely necessary, because for example scheduling | |
154 | * a bottom half calls it already. | |
155 | */ | |
156 | void aio_notify(AioContext *ctx); | |
157 | ||
f627aab1 PB |
158 | /** |
159 | * aio_bh_poll: Poll bottom halves for an AioContext. | |
160 | * | |
161 | * These are internal functions used by the QEMU main loop. | |
dcc772e2 LPF |
162 | * And notice that multiple occurrences of aio_bh_poll cannot |
163 | * be called concurrently | |
f627aab1 PB |
164 | */ |
165 | int aio_bh_poll(AioContext *ctx); | |
f627aab1 PB |
166 | |
167 | /** | |
168 | * qemu_bh_schedule: Schedule a bottom half. | |
169 | * | |
170 | * Scheduling a bottom half interrupts the main loop and causes the | |
171 | * execution of the callback that was passed to qemu_bh_new. | |
172 | * | |
173 | * Bottom halves that are scheduled from a bottom half handler are instantly | |
174 | * invoked. This can create an infinite loop if a bottom half handler | |
175 | * schedules itself. | |
176 | * | |
177 | * @bh: The bottom half to be scheduled. | |
178 | */ | |
179 | void qemu_bh_schedule(QEMUBH *bh); | |
180 | ||
181 | /** | |
182 | * qemu_bh_cancel: Cancel execution of a bottom half. | |
183 | * | |
184 | * Canceling execution of a bottom half undoes the effect of calls to | |
185 | * qemu_bh_schedule without freeing its resources yet. While cancellation | |
186 | * itself is also wait-free and thread-safe, it can of course race with the | |
187 | * loop that executes bottom halves unless you are holding the iothread | |
188 | * mutex. This makes it mostly useless if you are not holding the mutex. | |
189 | * | |
190 | * @bh: The bottom half to be canceled. | |
191 | */ | |
192 | void qemu_bh_cancel(QEMUBH *bh); | |
193 | ||
194 | /** | |
195 | *qemu_bh_delete: Cancel execution of a bottom half and free its resources. | |
196 | * | |
197 | * Deleting a bottom half frees the memory that was allocated for it by | |
198 | * qemu_bh_new. It also implies canceling the bottom half if it was | |
199 | * scheduled. | |
dcc772e2 LPF |
200 | * This func is async. The bottom half will do the delete action at the finial |
201 | * end. | |
f627aab1 PB |
202 | * |
203 | * @bh: The bottom half to be deleted. | |
204 | */ | |
205 | void qemu_bh_delete(QEMUBH *bh); | |
206 | ||
cd9ba1eb PB |
207 | /* Return whether there are any pending callbacks from the GSource |
208 | * attached to the AioContext. | |
209 | * | |
210 | * This is used internally in the implementation of the GSource. | |
211 | */ | |
212 | bool aio_pending(AioContext *ctx); | |
213 | ||
e4c7e2d1 PB |
214 | /* Dispatch any pending callbacks from the GSource attached to the AioContext. |
215 | * | |
216 | * This is used internally in the implementation of the GSource. | |
217 | */ | |
218 | bool aio_dispatch(AioContext *ctx); | |
219 | ||
7c0628b2 PB |
220 | /* Progress in completing AIO work to occur. This can issue new pending |
221 | * aio as a result of executing I/O completion or bh callbacks. | |
bcdc1857 | 222 | * |
acfb23ad PB |
223 | * Return whether any progress was made by executing AIO or bottom half |
224 | * handlers. If @blocking == true, this should always be true except | |
225 | * if someone called aio_notify. | |
7c0628b2 PB |
226 | * |
227 | * If there are no pending bottom halves, but there are pending AIO | |
228 | * operations, it may not be possible to make any progress without | |
229 | * blocking. If @blocking is true, this function will wait until one | |
230 | * or more AIO events have completed, to ensure something has moved | |
231 | * before returning. | |
7c0628b2 PB |
232 | */ |
233 | bool aio_poll(AioContext *ctx, bool blocking); | |
a76bab49 | 234 | |
9958c351 | 235 | #ifdef CONFIG_POSIX |
a76bab49 AL |
236 | /* Register a file descriptor and associated callbacks. Behaves very similarly |
237 | * to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will | |
87f68d31 | 238 | * be invoked when using aio_poll(). |
a76bab49 AL |
239 | * |
240 | * Code that invokes AIO completion functions should rely on this function | |
241 | * instead of qemu_set_fd_handler[2]. | |
242 | */ | |
a915f4bc PB |
243 | void aio_set_fd_handler(AioContext *ctx, |
244 | int fd, | |
245 | IOHandler *io_read, | |
246 | IOHandler *io_write, | |
a915f4bc | 247 | void *opaque); |
9958c351 PB |
248 | #endif |
249 | ||
250 | /* Register an event notifier and associated callbacks. Behaves very similarly | |
251 | * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks | |
87f68d31 | 252 | * will be invoked when using aio_poll(). |
9958c351 PB |
253 | * |
254 | * Code that invokes AIO completion functions should rely on this function | |
255 | * instead of event_notifier_set_handler. | |
256 | */ | |
a915f4bc PB |
257 | void aio_set_event_notifier(AioContext *ctx, |
258 | EventNotifier *notifier, | |
f2e5dca4 | 259 | EventNotifierHandler *io_read); |
a915f4bc | 260 | |
e3713e00 PB |
261 | /* Return a GSource that lets the main loop poll the file descriptors attached |
262 | * to this AioContext. | |
263 | */ | |
264 | GSource *aio_get_g_source(AioContext *ctx); | |
265 | ||
9b34277d SH |
266 | /* Return the ThreadPool bound to this AioContext */ |
267 | struct ThreadPool *aio_get_thread_pool(AioContext *ctx); | |
268 | ||
4e29e831 AB |
269 | /** |
270 | * aio_timer_new: | |
271 | * @ctx: the aio context | |
272 | * @type: the clock type | |
273 | * @scale: the scale | |
274 | * @cb: the callback to call on timer expiry | |
275 | * @opaque: the opaque pointer to pass to the callback | |
276 | * | |
277 | * Allocate a new timer attached to the context @ctx. | |
278 | * The function is responsible for memory allocation. | |
279 | * | |
280 | * The preferred interface is aio_timer_init. Use that | |
281 | * unless you really need dynamic memory allocation. | |
282 | * | |
283 | * Returns: a pointer to the new timer | |
284 | */ | |
285 | static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, | |
286 | int scale, | |
287 | QEMUTimerCB *cb, void *opaque) | |
288 | { | |
289 | return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque); | |
290 | } | |
291 | ||
292 | /** | |
293 | * aio_timer_init: | |
294 | * @ctx: the aio context | |
295 | * @ts: the timer | |
296 | * @type: the clock type | |
297 | * @scale: the scale | |
298 | * @cb: the callback to call on timer expiry | |
299 | * @opaque: the opaque pointer to pass to the callback | |
300 | * | |
301 | * Initialise a new timer attached to the context @ctx. | |
302 | * The caller is responsible for memory allocation. | |
303 | */ | |
304 | static inline void aio_timer_init(AioContext *ctx, | |
305 | QEMUTimer *ts, QEMUClockType type, | |
306 | int scale, | |
307 | QEMUTimerCB *cb, void *opaque) | |
308 | { | |
309 | timer_init(ts, ctx->tlg.tl[type], scale, cb, opaque); | |
310 | } | |
311 | ||
845ca10d PB |
312 | /** |
313 | * aio_compute_timeout: | |
314 | * @ctx: the aio context | |
315 | * | |
316 | * Compute the timeout that a blocking aio_poll should use. | |
317 | */ | |
318 | int64_t aio_compute_timeout(AioContext *ctx); | |
319 | ||
a76bab49 | 320 | #endif |