]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU aio implementation | |
3 | * | |
4 | * Copyright IBM, Corp. 2008 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #ifndef QEMU_AIO_H | |
15 | #define QEMU_AIO_H | |
16 | ||
17 | #include "qemu-common.h" | |
18 | #include "qemu/queue.h" | |
19 | #include "qemu/event_notifier.h" | |
20 | #include "qemu/thread.h" | |
21 | #include "qemu/timer.h" | |
22 | ||
23 | typedef struct BlockAIOCB BlockAIOCB; | |
24 | typedef void BlockCompletionFunc(void *opaque, int ret); | |
25 | ||
26 | typedef struct AIOCBInfo { | |
27 | void (*cancel_async)(BlockAIOCB *acb); | |
28 | AioContext *(*get_aio_context)(BlockAIOCB *acb); | |
29 | size_t aiocb_size; | |
30 | } AIOCBInfo; | |
31 | ||
32 | struct BlockAIOCB { | |
33 | const AIOCBInfo *aiocb_info; | |
34 | BlockDriverState *bs; | |
35 | BlockCompletionFunc *cb; | |
36 | void *opaque; | |
37 | int refcnt; | |
38 | }; | |
39 | ||
40 | void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, | |
41 | BlockCompletionFunc *cb, void *opaque); | |
42 | void qemu_aio_unref(void *p); | |
43 | void qemu_aio_ref(void *p); | |
44 | ||
45 | typedef struct AioHandler AioHandler; | |
46 | typedef void QEMUBHFunc(void *opaque); | |
47 | typedef bool AioPollFn(void *opaque); | |
48 | typedef void IOHandler(void *opaque); | |
49 | ||
50 | struct Coroutine; | |
51 | struct ThreadPool; | |
52 | struct LinuxAioState; | |
53 | ||
54 | struct AioContext { | |
55 | GSource source; | |
56 | ||
57 | /* Used by AioContext users to protect from multi-threaded access. */ | |
58 | QemuRecMutex lock; | |
59 | ||
60 | /* The list of registered AIO handlers. Protected by ctx->list_lock. */ | |
61 | QLIST_HEAD(, AioHandler) aio_handlers; | |
62 | ||
63 | /* Used to avoid unnecessary event_notifier_set calls in aio_notify; | |
64 | * accessed with atomic primitives. If this field is 0, everything | |
65 | * (file descriptors, bottom halves, timers) will be re-evaluated | |
66 | * before the next blocking poll(), thus the event_notifier_set call | |
67 | * can be skipped. If it is non-zero, you may need to wake up a | |
68 | * concurrent aio_poll or the glib main event loop, making | |
69 | * event_notifier_set necessary. | |
70 | * | |
71 | * Bit 0 is reserved for GSource usage of the AioContext, and is 1 | |
72 | * between a call to aio_ctx_prepare and the next call to aio_ctx_check. | |
73 | * Bits 1-31 simply count the number of active calls to aio_poll | |
74 | * that are in the prepare or poll phase. | |
75 | * | |
76 | * The GSource and aio_poll must use a different mechanism because | |
77 | * there is no certainty that a call to GSource's prepare callback | |
78 | * (via g_main_context_prepare) is indeed followed by check and | |
79 | * dispatch. It's not clear whether this would be a bug, but let's | |
80 | * play safe and allow it---it will just cause extra calls to | |
81 | * event_notifier_set until the next call to dispatch. | |
82 | * | |
83 | * Instead, the aio_poll calls include both the prepare and the | |
84 | * dispatch phase, hence a simple counter is enough for them. | |
85 | */ | |
86 | uint32_t notify_me; | |
87 | ||
88 | /* A lock to protect between QEMUBH and AioHandler adders and deleter, | |
89 | * and to ensure that no callbacks are removed while we're walking and | |
90 | * dispatching them. | |
91 | */ | |
92 | QemuLockCnt list_lock; | |
93 | ||
94 | /* Anchor of the list of Bottom Halves belonging to the context */ | |
95 | struct QEMUBH *first_bh; | |
96 | ||
97 | /* Used by aio_notify. | |
98 | * | |
99 | * "notified" is used to avoid expensive event_notifier_test_and_clear | |
100 | * calls. When it is clear, the EventNotifier is clear, or one thread | |
101 | * is going to clear "notified" before processing more events. False | |
102 | * positives are possible, i.e. "notified" could be set even though the | |
103 | * EventNotifier is clear. | |
104 | * | |
105 | * Note that event_notifier_set *cannot* be optimized the same way. For | |
106 | * more information on the problem that would result, see "#ifdef BUG2" | |
107 | * in the docs/aio_notify_accept.promela formal model. | |
108 | */ | |
109 | bool notified; | |
110 | EventNotifier notifier; | |
111 | ||
112 | QSLIST_HEAD(, Coroutine) scheduled_coroutines; | |
113 | QEMUBH *co_schedule_bh; | |
114 | ||
115 | /* Thread pool for performing work and receiving completion callbacks. | |
116 | * Has its own locking. | |
117 | */ | |
118 | struct ThreadPool *thread_pool; | |
119 | ||
120 | #ifdef CONFIG_LINUX_AIO | |
121 | /* State for native Linux AIO. Uses aio_context_acquire/release for | |
122 | * locking. | |
123 | */ | |
124 | struct LinuxAioState *linux_aio; | |
125 | #endif | |
126 | ||
127 | /* TimerLists for calling timers - one per clock type. Has its own | |
128 | * locking. | |
129 | */ | |
130 | QEMUTimerListGroup tlg; | |
131 | ||
132 | int external_disable_cnt; | |
133 | ||
134 | /* Number of AioHandlers without .io_poll() */ | |
135 | int poll_disable_cnt; | |
136 | ||
137 | /* Polling mode parameters */ | |
138 | int64_t poll_ns; /* current polling time in nanoseconds */ | |
139 | int64_t poll_max_ns; /* maximum polling time in nanoseconds */ | |
140 | int64_t poll_grow; /* polling time growth factor */ | |
141 | int64_t poll_shrink; /* polling time shrink factor */ | |
142 | ||
143 | /* Are we in polling mode or monitoring file descriptors? */ | |
144 | bool poll_started; | |
145 | ||
146 | /* epoll(7) state used when built with CONFIG_EPOLL */ | |
147 | int epollfd; | |
148 | bool epoll_enabled; | |
149 | bool epoll_available; | |
150 | }; | |
151 | ||
152 | /** | |
153 | * aio_context_new: Allocate a new AioContext. | |
154 | * | |
155 | * AioContext provide a mini event-loop that can be waited on synchronously. | |
156 | * They also provide bottom halves, a service to execute a piece of code | |
157 | * as soon as possible. | |
158 | */ | |
159 | AioContext *aio_context_new(Error **errp); | |
160 | ||
161 | /** | |
162 | * aio_context_ref: | |
163 | * @ctx: The AioContext to operate on. | |
164 | * | |
165 | * Add a reference to an AioContext. | |
166 | */ | |
167 | void aio_context_ref(AioContext *ctx); | |
168 | ||
169 | /** | |
170 | * aio_context_unref: | |
171 | * @ctx: The AioContext to operate on. | |
172 | * | |
173 | * Drop a reference to an AioContext. | |
174 | */ | |
175 | void aio_context_unref(AioContext *ctx); | |
176 | ||
177 | /* Take ownership of the AioContext. If the AioContext will be shared between | |
178 | * threads, and a thread does not want to be interrupted, it will have to | |
179 | * take ownership around calls to aio_poll(). Otherwise, aio_poll() | |
180 | * automatically takes care of calling aio_context_acquire and | |
181 | * aio_context_release. | |
182 | * | |
183 | * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A | |
184 | * thread still has to call those to avoid being interrupted by the guest. | |
185 | * | |
186 | * Bottom halves, timers and callbacks can be created or removed without | |
187 | * acquiring the AioContext. | |
188 | */ | |
189 | void aio_context_acquire(AioContext *ctx); | |
190 | ||
191 | /* Relinquish ownership of the AioContext. */ | |
192 | void aio_context_release(AioContext *ctx); | |
193 | ||
194 | /** | |
195 | * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run | |
196 | * only once and as soon as possible. | |
197 | */ | |
198 | void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque); | |
199 | ||
200 | /** | |
201 | * aio_bh_new: Allocate a new bottom half structure. | |
202 | * | |
203 | * Bottom halves are lightweight callbacks whose invocation is guaranteed | |
204 | * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure | |
205 | * is opaque and must be allocated prior to its use. | |
206 | */ | |
207 | QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque); | |
208 | ||
209 | /** | |
210 | * aio_notify: Force processing of pending events. | |
211 | * | |
212 | * Similar to signaling a condition variable, aio_notify forces | |
213 | * aio_poll to exit, so that the next call will re-examine pending events. | |
214 | * The caller of aio_notify will usually call aio_poll again very soon, | |
215 | * or go through another iteration of the GLib main loop. Hence, aio_notify | |
216 | * also has the side effect of recalculating the sets of file descriptors | |
217 | * that the main loop waits for. | |
218 | * | |
219 | * Calling aio_notify is rarely necessary, because for example scheduling | |
220 | * a bottom half calls it already. | |
221 | */ | |
222 | void aio_notify(AioContext *ctx); | |
223 | ||
224 | /** | |
225 | * aio_notify_accept: Acknowledge receiving an aio_notify. | |
226 | * | |
227 | * aio_notify() uses an EventNotifier in order to wake up a sleeping | |
228 | * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are | |
229 | * usually rare, but the AioContext has to clear the EventNotifier on | |
230 | * every aio_poll() or g_main_context_iteration() in order to avoid | |
231 | * busy waiting. This event_notifier_test_and_clear() cannot be done | |
232 | * using the usual aio_context_set_event_notifier(), because it must | |
233 | * be done before processing all events (file descriptors, bottom halves, | |
234 | * timers). | |
235 | * | |
236 | * aio_notify_accept() is an optimized event_notifier_test_and_clear() | |
237 | * that is specific to an AioContext's notifier; it is used internally | |
238 | * to clear the EventNotifier only if aio_notify() had been called. | |
239 | */ | |
240 | void aio_notify_accept(AioContext *ctx); | |
241 | ||
242 | /** | |
243 | * aio_bh_call: Executes callback function of the specified BH. | |
244 | */ | |
245 | void aio_bh_call(QEMUBH *bh); | |
246 | ||
247 | /** | |
248 | * aio_bh_poll: Poll bottom halves for an AioContext. | |
249 | * | |
250 | * These are internal functions used by the QEMU main loop. | |
251 | * And notice that multiple occurrences of aio_bh_poll cannot | |
252 | * be called concurrently | |
253 | */ | |
254 | int aio_bh_poll(AioContext *ctx); | |
255 | ||
256 | /** | |
257 | * qemu_bh_schedule: Schedule a bottom half. | |
258 | * | |
259 | * Scheduling a bottom half interrupts the main loop and causes the | |
260 | * execution of the callback that was passed to qemu_bh_new. | |
261 | * | |
262 | * Bottom halves that are scheduled from a bottom half handler are instantly | |
263 | * invoked. This can create an infinite loop if a bottom half handler | |
264 | * schedules itself. | |
265 | * | |
266 | * @bh: The bottom half to be scheduled. | |
267 | */ | |
268 | void qemu_bh_schedule(QEMUBH *bh); | |
269 | ||
270 | /** | |
271 | * qemu_bh_cancel: Cancel execution of a bottom half. | |
272 | * | |
273 | * Canceling execution of a bottom half undoes the effect of calls to | |
274 | * qemu_bh_schedule without freeing its resources yet. While cancellation | |
275 | * itself is also wait-free and thread-safe, it can of course race with the | |
276 | * loop that executes bottom halves unless you are holding the iothread | |
277 | * mutex. This makes it mostly useless if you are not holding the mutex. | |
278 | * | |
279 | * @bh: The bottom half to be canceled. | |
280 | */ | |
281 | void qemu_bh_cancel(QEMUBH *bh); | |
282 | ||
283 | /** | |
284 | *qemu_bh_delete: Cancel execution of a bottom half and free its resources. | |
285 | * | |
286 | * Deleting a bottom half frees the memory that was allocated for it by | |
287 | * qemu_bh_new. It also implies canceling the bottom half if it was | |
288 | * scheduled. | |
289 | * This func is async. The bottom half will do the delete action at the finial | |
290 | * end. | |
291 | * | |
292 | * @bh: The bottom half to be deleted. | |
293 | */ | |
294 | void qemu_bh_delete(QEMUBH *bh); | |
295 | ||
296 | /* Return whether there are any pending callbacks from the GSource | |
297 | * attached to the AioContext, before g_poll is invoked. | |
298 | * | |
299 | * This is used internally in the implementation of the GSource. | |
300 | */ | |
301 | bool aio_prepare(AioContext *ctx); | |
302 | ||
303 | /* Return whether there are any pending callbacks from the GSource | |
304 | * attached to the AioContext, after g_poll is invoked. | |
305 | * | |
306 | * This is used internally in the implementation of the GSource. | |
307 | */ | |
308 | bool aio_pending(AioContext *ctx); | |
309 | ||
310 | /* Dispatch any pending callbacks from the GSource attached to the AioContext. | |
311 | * | |
312 | * This is used internally in the implementation of the GSource. | |
313 | */ | |
314 | void aio_dispatch(AioContext *ctx); | |
315 | ||
316 | /* Progress in completing AIO work to occur. This can issue new pending | |
317 | * aio as a result of executing I/O completion or bh callbacks. | |
318 | * | |
319 | * Return whether any progress was made by executing AIO or bottom half | |
320 | * handlers. If @blocking == true, this should always be true except | |
321 | * if someone called aio_notify. | |
322 | * | |
323 | * If there are no pending bottom halves, but there are pending AIO | |
324 | * operations, it may not be possible to make any progress without | |
325 | * blocking. If @blocking is true, this function will wait until one | |
326 | * or more AIO events have completed, to ensure something has moved | |
327 | * before returning. | |
328 | */ | |
329 | bool aio_poll(AioContext *ctx, bool blocking); | |
330 | ||
331 | /* Register a file descriptor and associated callbacks. Behaves very similarly | |
332 | * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will | |
333 | * be invoked when using aio_poll(). | |
334 | * | |
335 | * Code that invokes AIO completion functions should rely on this function | |
336 | * instead of qemu_set_fd_handler[2]. | |
337 | */ | |
338 | void aio_set_fd_handler(AioContext *ctx, | |
339 | int fd, | |
340 | bool is_external, | |
341 | IOHandler *io_read, | |
342 | IOHandler *io_write, | |
343 | AioPollFn *io_poll, | |
344 | void *opaque); | |
345 | ||
346 | /* Set polling begin/end callbacks for a file descriptor that has already been | |
347 | * registered with aio_set_fd_handler. Do nothing if the file descriptor is | |
348 | * not registered. | |
349 | */ | |
350 | void aio_set_fd_poll(AioContext *ctx, int fd, | |
351 | IOHandler *io_poll_begin, | |
352 | IOHandler *io_poll_end); | |
353 | ||
354 | /* Register an event notifier and associated callbacks. Behaves very similarly | |
355 | * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks | |
356 | * will be invoked when using aio_poll(). | |
357 | * | |
358 | * Code that invokes AIO completion functions should rely on this function | |
359 | * instead of event_notifier_set_handler. | |
360 | */ | |
361 | void aio_set_event_notifier(AioContext *ctx, | |
362 | EventNotifier *notifier, | |
363 | bool is_external, | |
364 | EventNotifierHandler *io_read, | |
365 | AioPollFn *io_poll); | |
366 | ||
367 | /* Set polling begin/end callbacks for an event notifier that has already been | |
368 | * registered with aio_set_event_notifier. Do nothing if the event notifier is | |
369 | * not registered. | |
370 | */ | |
371 | void aio_set_event_notifier_poll(AioContext *ctx, | |
372 | EventNotifier *notifier, | |
373 | EventNotifierHandler *io_poll_begin, | |
374 | EventNotifierHandler *io_poll_end); | |
375 | ||
376 | /* Return a GSource that lets the main loop poll the file descriptors attached | |
377 | * to this AioContext. | |
378 | */ | |
379 | GSource *aio_get_g_source(AioContext *ctx); | |
380 | ||
381 | /* Return the ThreadPool bound to this AioContext */ | |
382 | struct ThreadPool *aio_get_thread_pool(AioContext *ctx); | |
383 | ||
384 | /* Setup the LinuxAioState bound to this AioContext */ | |
385 | struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp); | |
386 | ||
387 | /* Return the LinuxAioState bound to this AioContext */ | |
388 | struct LinuxAioState *aio_get_linux_aio(AioContext *ctx); | |
389 | ||
390 | /** | |
391 | * aio_timer_new_with_attrs: | |
392 | * @ctx: the aio context | |
393 | * @type: the clock type | |
394 | * @scale: the scale | |
395 | * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values | |
396 | * to assign | |
397 | * @cb: the callback to call on timer expiry | |
398 | * @opaque: the opaque pointer to pass to the callback | |
399 | * | |
400 | * Allocate a new timer (with attributes) attached to the context @ctx. | |
401 | * The function is responsible for memory allocation. | |
402 | * | |
403 | * The preferred interface is aio_timer_init or aio_timer_init_with_attrs. | |
404 | * Use that unless you really need dynamic memory allocation. | |
405 | * | |
406 | * Returns: a pointer to the new timer | |
407 | */ | |
408 | static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx, | |
409 | QEMUClockType type, | |
410 | int scale, int attributes, | |
411 | QEMUTimerCB *cb, void *opaque) | |
412 | { | |
413 | return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque); | |
414 | } | |
415 | ||
416 | /** | |
417 | * aio_timer_new: | |
418 | * @ctx: the aio context | |
419 | * @type: the clock type | |
420 | * @scale: the scale | |
421 | * @cb: the callback to call on timer expiry | |
422 | * @opaque: the opaque pointer to pass to the callback | |
423 | * | |
424 | * Allocate a new timer attached to the context @ctx. | |
425 | * See aio_timer_new_with_attrs for details. | |
426 | * | |
427 | * Returns: a pointer to the new timer | |
428 | */ | |
429 | static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, | |
430 | int scale, | |
431 | QEMUTimerCB *cb, void *opaque) | |
432 | { | |
433 | return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque); | |
434 | } | |
435 | ||
436 | /** | |
437 | * aio_timer_init_with_attrs: | |
438 | * @ctx: the aio context | |
439 | * @ts: the timer | |
440 | * @type: the clock type | |
441 | * @scale: the scale | |
442 | * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values | |
443 | * to assign | |
444 | * @cb: the callback to call on timer expiry | |
445 | * @opaque: the opaque pointer to pass to the callback | |
446 | * | |
447 | * Initialise a new timer (with attributes) attached to the context @ctx. | |
448 | * The caller is responsible for memory allocation. | |
449 | */ | |
450 | static inline void aio_timer_init_with_attrs(AioContext *ctx, | |
451 | QEMUTimer *ts, QEMUClockType type, | |
452 | int scale, int attributes, | |
453 | QEMUTimerCB *cb, void *opaque) | |
454 | { | |
455 | timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque); | |
456 | } | |
457 | ||
458 | /** | |
459 | * aio_timer_init: | |
460 | * @ctx: the aio context | |
461 | * @ts: the timer | |
462 | * @type: the clock type | |
463 | * @scale: the scale | |
464 | * @cb: the callback to call on timer expiry | |
465 | * @opaque: the opaque pointer to pass to the callback | |
466 | * | |
467 | * Initialise a new timer attached to the context @ctx. | |
468 | * See aio_timer_init_with_attrs for details. | |
469 | */ | |
470 | static inline void aio_timer_init(AioContext *ctx, | |
471 | QEMUTimer *ts, QEMUClockType type, | |
472 | int scale, | |
473 | QEMUTimerCB *cb, void *opaque) | |
474 | { | |
475 | timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque); | |
476 | } | |
477 | ||
478 | /** | |
479 | * aio_compute_timeout: | |
480 | * @ctx: the aio context | |
481 | * | |
482 | * Compute the timeout that a blocking aio_poll should use. | |
483 | */ | |
484 | int64_t aio_compute_timeout(AioContext *ctx); | |
485 | ||
486 | /** | |
487 | * aio_disable_external: | |
488 | * @ctx: the aio context | |
489 | * | |
490 | * Disable the further processing of external clients. | |
491 | */ | |
492 | static inline void aio_disable_external(AioContext *ctx) | |
493 | { | |
494 | atomic_inc(&ctx->external_disable_cnt); | |
495 | } | |
496 | ||
497 | /** | |
498 | * aio_enable_external: | |
499 | * @ctx: the aio context | |
500 | * | |
501 | * Enable the processing of external clients. | |
502 | */ | |
503 | static inline void aio_enable_external(AioContext *ctx) | |
504 | { | |
505 | int old; | |
506 | ||
507 | old = atomic_fetch_dec(&ctx->external_disable_cnt); | |
508 | assert(old > 0); | |
509 | if (old == 1) { | |
510 | /* Kick event loop so it re-arms file descriptors */ | |
511 | aio_notify(ctx); | |
512 | } | |
513 | } | |
514 | ||
515 | /** | |
516 | * aio_external_disabled: | |
517 | * @ctx: the aio context | |
518 | * | |
519 | * Return true if the external clients are disabled. | |
520 | */ | |
521 | static inline bool aio_external_disabled(AioContext *ctx) | |
522 | { | |
523 | return atomic_read(&ctx->external_disable_cnt); | |
524 | } | |
525 | ||
526 | /** | |
527 | * aio_node_check: | |
528 | * @ctx: the aio context | |
529 | * @is_external: Whether or not the checked node is an external event source. | |
530 | * | |
531 | * Check if the node's is_external flag is okay to be polled by the ctx at this | |
532 | * moment. True means green light. | |
533 | */ | |
534 | static inline bool aio_node_check(AioContext *ctx, bool is_external) | |
535 | { | |
536 | return !is_external || !atomic_read(&ctx->external_disable_cnt); | |
537 | } | |
538 | ||
539 | /** | |
540 | * aio_co_schedule: | |
541 | * @ctx: the aio context | |
542 | * @co: the coroutine | |
543 | * | |
544 | * Start a coroutine on a remote AioContext. | |
545 | * | |
546 | * The coroutine must not be entered by anyone else while aio_co_schedule() | |
547 | * is active. In addition the coroutine must have yielded unless ctx | |
548 | * is the context in which the coroutine is running (i.e. the value of | |
549 | * qemu_get_current_aio_context() from the coroutine itself). | |
550 | */ | |
551 | void aio_co_schedule(AioContext *ctx, struct Coroutine *co); | |
552 | ||
553 | /** | |
554 | * aio_co_wake: | |
555 | * @co: the coroutine | |
556 | * | |
557 | * Restart a coroutine on the AioContext where it was running last, thus | |
558 | * preventing coroutines from jumping from one context to another when they | |
559 | * go to sleep. | |
560 | * | |
561 | * aio_co_wake may be executed either in coroutine or non-coroutine | |
562 | * context. The coroutine must not be entered by anyone else while | |
563 | * aio_co_wake() is active. | |
564 | */ | |
565 | void aio_co_wake(struct Coroutine *co); | |
566 | ||
567 | /** | |
568 | * aio_co_enter: | |
569 | * @ctx: the context to run the coroutine | |
570 | * @co: the coroutine to run | |
571 | * | |
572 | * Enter a coroutine in the specified AioContext. | |
573 | */ | |
574 | void aio_co_enter(AioContext *ctx, struct Coroutine *co); | |
575 | ||
576 | /** | |
577 | * Return the AioContext whose event loop runs in the current thread. | |
578 | * | |
579 | * If called from an IOThread this will be the IOThread's AioContext. If | |
580 | * called from another thread it will be the main loop AioContext. | |
581 | */ | |
582 | AioContext *qemu_get_current_aio_context(void); | |
583 | ||
584 | /** | |
585 | * in_aio_context_home_thread: | |
586 | * @ctx: the aio context | |
587 | * | |
588 | * Return whether we are running in the thread that normally runs @ctx. Note | |
589 | * that acquiring/releasing ctx does not affect the outcome, each AioContext | |
590 | * still only has one home thread that is responsible for running it. | |
591 | */ | |
592 | static inline bool in_aio_context_home_thread(AioContext *ctx) | |
593 | { | |
594 | return ctx == qemu_get_current_aio_context(); | |
595 | } | |
596 | ||
597 | /** | |
598 | * aio_context_setup: | |
599 | * @ctx: the aio context | |
600 | * | |
601 | * Initialize the aio context. | |
602 | */ | |
603 | void aio_context_setup(AioContext *ctx); | |
604 | ||
605 | /** | |
606 | * aio_context_destroy: | |
607 | * @ctx: the aio context | |
608 | * | |
609 | * Destroy the aio context. | |
610 | */ | |
611 | void aio_context_destroy(AioContext *ctx); | |
612 | ||
613 | /** | |
614 | * aio_context_set_poll_params: | |
615 | * @ctx: the aio context | |
616 | * @max_ns: how long to busy poll for, in nanoseconds | |
617 | * @grow: polling time growth factor | |
618 | * @shrink: polling time shrink factor | |
619 | * | |
620 | * Poll mode can be disabled by setting poll_max_ns to 0. | |
621 | */ | |
622 | void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, | |
623 | int64_t grow, int64_t shrink, | |
624 | Error **errp); | |
625 | ||
626 | #endif |