]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU aio implementation | |
3 | * | |
4 | * Copyright IBM, Corp. 2008 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #ifndef QEMU_AIO_H | |
15 | #define QEMU_AIO_H | |
16 | ||
17 | #include "qemu/typedefs.h" | |
18 | #include "qemu-common.h" | |
19 | #include "qemu/queue.h" | |
20 | #include "qemu/event_notifier.h" | |
21 | #include "qemu/thread.h" | |
22 | #include "qemu/rfifolock.h" | |
23 | #include "qemu/timer.h" | |
24 | ||
25 | typedef struct BlockAIOCB BlockAIOCB; | |
26 | typedef void BlockCompletionFunc(void *opaque, int ret); | |
27 | ||
28 | typedef struct AIOCBInfo { | |
29 | void (*cancel_async)(BlockAIOCB *acb); | |
30 | AioContext *(*get_aio_context)(BlockAIOCB *acb); | |
31 | size_t aiocb_size; | |
32 | } AIOCBInfo; | |
33 | ||
34 | struct BlockAIOCB { | |
35 | const AIOCBInfo *aiocb_info; | |
36 | BlockDriverState *bs; | |
37 | BlockCompletionFunc *cb; | |
38 | void *opaque; | |
39 | int refcnt; | |
40 | }; | |
41 | ||
42 | void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, | |
43 | BlockCompletionFunc *cb, void *opaque); | |
44 | void qemu_aio_unref(void *p); | |
45 | void qemu_aio_ref(void *p); | |
46 | ||
47 | typedef struct AioHandler AioHandler; | |
48 | typedef void QEMUBHFunc(void *opaque); | |
49 | typedef void IOHandler(void *opaque); | |
50 | ||
51 | struct AioContext { | |
52 | GSource source; | |
53 | ||
54 | /* Protects all fields from multi-threaded access */ | |
55 | RFifoLock lock; | |
56 | ||
57 | /* The list of registered AIO handlers */ | |
58 | QLIST_HEAD(, AioHandler) aio_handlers; | |
59 | ||
60 | /* This is a simple lock used to protect the aio_handlers list. | |
61 | * Specifically, it's used to ensure that no callbacks are removed while | |
62 | * we're walking and dispatching callbacks. | |
63 | */ | |
64 | int walking_handlers; | |
65 | ||
66 | /* Used to avoid unnecessary event_notifier_set calls in aio_notify. | |
67 | * Writes protected by lock or BQL, reads are lockless. | |
68 | */ | |
69 | bool dispatching; | |
70 | ||
71 | /* lock to protect between bh's adders and deleter */ | |
72 | QemuMutex bh_lock; | |
73 | ||
74 | /* Anchor of the list of Bottom Halves belonging to the context */ | |
75 | struct QEMUBH *first_bh; | |
76 | ||
77 | /* A simple lock used to protect the first_bh list, and ensure that | |
78 | * no callbacks are removed while we're walking and dispatching callbacks. | |
79 | */ | |
80 | int walking_bh; | |
81 | ||
82 | /* Used for aio_notify. */ | |
83 | EventNotifier notifier; | |
84 | ||
85 | /* GPollFDs for aio_poll() */ | |
86 | GArray *pollfds; | |
87 | ||
88 | /* Thread pool for performing work and receiving completion callbacks */ | |
89 | struct ThreadPool *thread_pool; | |
90 | ||
91 | /* TimerLists for calling timers - one per clock type */ | |
92 | QEMUTimerListGroup tlg; | |
93 | }; | |
94 | ||
95 | /* Used internally to synchronize aio_poll against qemu_bh_schedule. */ | |
96 | void aio_set_dispatching(AioContext *ctx, bool dispatching); | |
97 | ||
98 | /** | |
99 | * aio_context_new: Allocate a new AioContext. | |
100 | * | |
101 | * AioContext provide a mini event-loop that can be waited on synchronously. | |
102 | * They also provide bottom halves, a service to execute a piece of code | |
103 | * as soon as possible. | |
104 | */ | |
105 | AioContext *aio_context_new(Error **errp); | |
106 | ||
107 | /** | |
108 | * aio_context_ref: | |
109 | * @ctx: The AioContext to operate on. | |
110 | * | |
111 | * Add a reference to an AioContext. | |
112 | */ | |
113 | void aio_context_ref(AioContext *ctx); | |
114 | ||
115 | /** | |
116 | * aio_context_unref: | |
117 | * @ctx: The AioContext to operate on. | |
118 | * | |
119 | * Drop a reference to an AioContext. | |
120 | */ | |
121 | void aio_context_unref(AioContext *ctx); | |
122 | ||
123 | /* Take ownership of the AioContext. If the AioContext will be shared between | |
124 | * threads, a thread must have ownership when calling aio_poll(). | |
125 | * | |
126 | * Note that multiple threads calling aio_poll() means timers, BHs, and | |
127 | * callbacks may be invoked from a different thread than they were registered | |
128 | * from. Therefore, code must use AioContext acquire/release or use | |
129 | * fine-grained synchronization to protect shared state if other threads will | |
130 | * be accessing it simultaneously. | |
131 | */ | |
132 | void aio_context_acquire(AioContext *ctx); | |
133 | ||
134 | /* Relinquish ownership of the AioContext. */ | |
135 | void aio_context_release(AioContext *ctx); | |
136 | ||
137 | /** | |
138 | * aio_bh_new: Allocate a new bottom half structure. | |
139 | * | |
140 | * Bottom halves are lightweight callbacks whose invocation is guaranteed | |
141 | * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure | |
142 | * is opaque and must be allocated prior to its use. | |
143 | */ | |
144 | QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque); | |
145 | ||
146 | /** | |
147 | * aio_notify: Force processing of pending events. | |
148 | * | |
149 | * Similar to signaling a condition variable, aio_notify forces | |
150 | * aio_wait to exit, so that the next call will re-examine pending events. | |
151 | * The caller of aio_notify will usually call aio_wait again very soon, | |
152 | * or go through another iteration of the GLib main loop. Hence, aio_notify | |
153 | * also has the side effect of recalculating the sets of file descriptors | |
154 | * that the main loop waits for. | |
155 | * | |
156 | * Calling aio_notify is rarely necessary, because for example scheduling | |
157 | * a bottom half calls it already. | |
158 | */ | |
159 | void aio_notify(AioContext *ctx); | |
160 | ||
161 | /** | |
162 | * aio_bh_poll: Poll bottom halves for an AioContext. | |
163 | * | |
164 | * These are internal functions used by the QEMU main loop. | |
165 | * And notice that multiple occurrences of aio_bh_poll cannot | |
166 | * be called concurrently | |
167 | */ | |
168 | int aio_bh_poll(AioContext *ctx); | |
169 | ||
170 | /** | |
171 | * qemu_bh_schedule: Schedule a bottom half. | |
172 | * | |
173 | * Scheduling a bottom half interrupts the main loop and causes the | |
174 | * execution of the callback that was passed to qemu_bh_new. | |
175 | * | |
176 | * Bottom halves that are scheduled from a bottom half handler are instantly | |
177 | * invoked. This can create an infinite loop if a bottom half handler | |
178 | * schedules itself. | |
179 | * | |
180 | * @bh: The bottom half to be scheduled. | |
181 | */ | |
182 | void qemu_bh_schedule(QEMUBH *bh); | |
183 | ||
184 | /** | |
185 | * qemu_bh_cancel: Cancel execution of a bottom half. | |
186 | * | |
187 | * Canceling execution of a bottom half undoes the effect of calls to | |
188 | * qemu_bh_schedule without freeing its resources yet. While cancellation | |
189 | * itself is also wait-free and thread-safe, it can of course race with the | |
190 | * loop that executes bottom halves unless you are holding the iothread | |
191 | * mutex. This makes it mostly useless if you are not holding the mutex. | |
192 | * | |
193 | * @bh: The bottom half to be canceled. | |
194 | */ | |
195 | void qemu_bh_cancel(QEMUBH *bh); | |
196 | ||
197 | /** | |
198 | *qemu_bh_delete: Cancel execution of a bottom half and free its resources. | |
199 | * | |
200 | * Deleting a bottom half frees the memory that was allocated for it by | |
201 | * qemu_bh_new. It also implies canceling the bottom half if it was | |
202 | * scheduled. | |
203 | * This func is async. The bottom half will do the delete action at the finial | |
204 | * end. | |
205 | * | |
206 | * @bh: The bottom half to be deleted. | |
207 | */ | |
208 | void qemu_bh_delete(QEMUBH *bh); | |
209 | ||
210 | /* Return whether there are any pending callbacks from the GSource | |
211 | * attached to the AioContext, before g_poll is invoked. | |
212 | * | |
213 | * This is used internally in the implementation of the GSource. | |
214 | */ | |
215 | bool aio_prepare(AioContext *ctx); | |
216 | ||
217 | /* Return whether there are any pending callbacks from the GSource | |
218 | * attached to the AioContext, after g_poll is invoked. | |
219 | * | |
220 | * This is used internally in the implementation of the GSource. | |
221 | */ | |
222 | bool aio_pending(AioContext *ctx); | |
223 | ||
224 | /* Dispatch any pending callbacks from the GSource attached to the AioContext. | |
225 | * | |
226 | * This is used internally in the implementation of the GSource. | |
227 | */ | |
228 | bool aio_dispatch(AioContext *ctx); | |
229 | ||
230 | /* Progress in completing AIO work to occur. This can issue new pending | |
231 | * aio as a result of executing I/O completion or bh callbacks. | |
232 | * | |
233 | * Return whether any progress was made by executing AIO or bottom half | |
234 | * handlers. If @blocking == true, this should always be true except | |
235 | * if someone called aio_notify. | |
236 | * | |
237 | * If there are no pending bottom halves, but there are pending AIO | |
238 | * operations, it may not be possible to make any progress without | |
239 | * blocking. If @blocking is true, this function will wait until one | |
240 | * or more AIO events have completed, to ensure something has moved | |
241 | * before returning. | |
242 | */ | |
243 | bool aio_poll(AioContext *ctx, bool blocking); | |
244 | ||
245 | /* Register a file descriptor and associated callbacks. Behaves very similarly | |
246 | * to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will | |
247 | * be invoked when using aio_poll(). | |
248 | * | |
249 | * Code that invokes AIO completion functions should rely on this function | |
250 | * instead of qemu_set_fd_handler[2]. | |
251 | */ | |
252 | void aio_set_fd_handler(AioContext *ctx, | |
253 | int fd, | |
254 | IOHandler *io_read, | |
255 | IOHandler *io_write, | |
256 | void *opaque); | |
257 | ||
258 | /* Register an event notifier and associated callbacks. Behaves very similarly | |
259 | * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks | |
260 | * will be invoked when using aio_poll(). | |
261 | * | |
262 | * Code that invokes AIO completion functions should rely on this function | |
263 | * instead of event_notifier_set_handler. | |
264 | */ | |
265 | void aio_set_event_notifier(AioContext *ctx, | |
266 | EventNotifier *notifier, | |
267 | EventNotifierHandler *io_read); | |
268 | ||
269 | /* Return a GSource that lets the main loop poll the file descriptors attached | |
270 | * to this AioContext. | |
271 | */ | |
272 | GSource *aio_get_g_source(AioContext *ctx); | |
273 | ||
274 | /* Return the ThreadPool bound to this AioContext */ | |
275 | struct ThreadPool *aio_get_thread_pool(AioContext *ctx); | |
276 | ||
277 | /** | |
278 | * aio_timer_new: | |
279 | * @ctx: the aio context | |
280 | * @type: the clock type | |
281 | * @scale: the scale | |
282 | * @cb: the callback to call on timer expiry | |
283 | * @opaque: the opaque pointer to pass to the callback | |
284 | * | |
285 | * Allocate a new timer attached to the context @ctx. | |
286 | * The function is responsible for memory allocation. | |
287 | * | |
288 | * The preferred interface is aio_timer_init. Use that | |
289 | * unless you really need dynamic memory allocation. | |
290 | * | |
291 | * Returns: a pointer to the new timer | |
292 | */ | |
293 | static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, | |
294 | int scale, | |
295 | QEMUTimerCB *cb, void *opaque) | |
296 | { | |
297 | return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque); | |
298 | } | |
299 | ||
300 | /** | |
301 | * aio_timer_init: | |
302 | * @ctx: the aio context | |
303 | * @ts: the timer | |
304 | * @type: the clock type | |
305 | * @scale: the scale | |
306 | * @cb: the callback to call on timer expiry | |
307 | * @opaque: the opaque pointer to pass to the callback | |
308 | * | |
309 | * Initialise a new timer attached to the context @ctx. | |
310 | * The caller is responsible for memory allocation. | |
311 | */ | |
312 | static inline void aio_timer_init(AioContext *ctx, | |
313 | QEMUTimer *ts, QEMUClockType type, | |
314 | int scale, | |
315 | QEMUTimerCB *cb, void *opaque) | |
316 | { | |
317 | timer_init_tl(ts, ctx->tlg.tl[type], scale, cb, opaque); | |
318 | } | |
319 | ||
320 | /** | |
321 | * aio_compute_timeout: | |
322 | * @ctx: the aio context | |
323 | * | |
324 | * Compute the timeout that a blocking aio_poll should use. | |
325 | */ | |
326 | int64_t aio_compute_timeout(AioContext *ctx); | |
327 | ||
328 | #endif |