]> Git Repo - qemu.git/blame - util/async.c
replay: fix event queue flush for qemu shutdown
[qemu.git] / util / async.c
CommitLineData
4f999d05 1/*
c2b38b27 2 * Data plane event loop
4f999d05
KW
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
c2b38b27 5 * Copyright (c) 2009-2017 QEMU contributors
4f999d05
KW
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
d38ea87a 26#include "qemu/osdep.h"
da34e65c 27#include "qapi/error.h"
737e150e 28#include "block/aio.h"
9b34277d 29#include "block/thread-pool.h"
1de7afc9 30#include "qemu/main-loop.h"
0ceb849b 31#include "qemu/atomic.h"
8c6b0356 32#include "qemu/rcu_queue.h"
0187f5c9 33#include "block/raw-aio.h"
0c330a73 34#include "qemu/coroutine_int.h"
47b74464 35#include "qemu/coroutine-tls.h"
0c330a73 36#include "trace.h"
9a1e9481 37
4f999d05
KW
38/***********************************************************/
39/* bottom halves (can be seen as timers which expire ASAP) */
40
8c6b0356
SH
41/* QEMUBH::flags values */
42enum {
43 /* Already enqueued and waiting for aio_bh_poll() */
44 BH_PENDING = (1 << 0),
45
46 /* Invoke the callback */
47 BH_SCHEDULED = (1 << 1),
48
49 /* Delete without invoking callback */
50 BH_DELETED = (1 << 2),
51
52 /* Delete after invoking callback */
53 BH_ONESHOT = (1 << 3),
54
55 /* Schedule periodically when the event loop is idle */
56 BH_IDLE = (1 << 4),
57};
58
4f999d05 59struct QEMUBH {
2f4dc3c1 60 AioContext *ctx;
0f08586c 61 const char *name;
4f999d05
KW
62 QEMUBHFunc *cb;
63 void *opaque;
8c6b0356
SH
64 QSLIST_ENTRY(QEMUBH) next;
65 unsigned flags;
4f999d05
KW
66};
67
8c6b0356
SH
68/* Called concurrently from any thread */
69static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
70{
71 AioContext *ctx = bh->ctx;
72 unsigned old_flags;
73
74 /*
d73415a3 75 * The memory barrier implicit in qatomic_fetch_or makes sure that:
8c6b0356
SH
76 * 1. idle & any writes needed by the callback are done before the
77 * locations are read in the aio_bh_poll.
78 * 2. ctx is loaded before the callback has a chance to execute and bh
79 * could be freed.
80 */
d73415a3 81 old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
8c6b0356
SH
82 if (!(old_flags & BH_PENDING)) {
83 QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
84 }
85
86 aio_notify(ctx);
87}
88
89/* Only called from aio_bh_poll() and aio_ctx_finalize() */
90static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
91{
92 QEMUBH *bh = QSLIST_FIRST_RCU(head);
93
94 if (!bh) {
95 return NULL;
96 }
97
98 QSLIST_REMOVE_HEAD(head, next);
99
100 /*
d73415a3 101 * The qatomic_and is paired with aio_bh_enqueue(). The implicit memory
8c6b0356
SH
102 * barrier ensures that the callback sees all writes done by the scheduling
103 * thread. It also ensures that the scheduling thread sees the cleared
104 * flag before bh->cb has run, and thus will call aio_notify again if
105 * necessary.
106 */
d73415a3 107 *flags = qatomic_fetch_and(&bh->flags,
8c6b0356
SH
108 ~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
109 return bh;
110}
111
0f08586c
SH
112void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
113 void *opaque, const char *name)
5b8bb359
PB
114{
115 QEMUBH *bh;
116 bh = g_new(QEMUBH, 1);
117 *bh = (QEMUBH){
118 .ctx = ctx,
119 .cb = cb,
120 .opaque = opaque,
0f08586c 121 .name = name,
5b8bb359 122 };
8c6b0356 123 aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT);
5b8bb359
PB
124}
125
0f08586c
SH
126QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
127 const char *name)
4f999d05
KW
128{
129 QEMUBH *bh;
ee82310f
PB
130 bh = g_new(QEMUBH, 1);
131 *bh = (QEMUBH){
132 .ctx = ctx,
133 .cb = cb,
134 .opaque = opaque,
0f08586c 135 .name = name,
ee82310f 136 };
4f999d05
KW
137 return bh;
138}
139
df281b80
PD
140void aio_bh_call(QEMUBH *bh)
141{
142 bh->cb(bh->opaque);
143}
144
8c6b0356 145/* Multiple occurrences of aio_bh_poll cannot be called concurrently. */
f627aab1 146int aio_bh_poll(AioContext *ctx)
4f999d05 147{
8c6b0356
SH
148 BHListSlice slice;
149 BHListSlice *s;
150 int ret = 0;
151
152 QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
153 QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
154
155 while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) {
156 QEMUBH *bh;
157 unsigned flags;
158
159 bh = aio_bh_dequeue(&s->bh_list, &flags);
160 if (!bh) {
161 QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next);
162 continue;
163 }
164
165 if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
65c1b5b6 166 /* Idle BHs don't count as progress */
8c6b0356 167 if (!(flags & BH_IDLE)) {
4f999d05 168 ret = 1;
ca96ac44 169 }
df281b80 170 aio_bh_call(bh);
4f999d05 171 }
8c6b0356
SH
172 if (flags & (BH_DELETED | BH_ONESHOT)) {
173 g_free(bh);
7d506c90 174 }
4f999d05
KW
175 }
176
4f999d05
KW
177 return ret;
178}
179
180void qemu_bh_schedule_idle(QEMUBH *bh)
181{
8c6b0356 182 aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
4f999d05
KW
183}
184
185void qemu_bh_schedule(QEMUBH *bh)
186{
8c6b0356 187 aio_bh_enqueue(bh, BH_SCHEDULED);
4f999d05
KW
188}
189
dcc772e2
LPF
190/* This func is async.
191 */
4f999d05
KW
192void qemu_bh_cancel(QEMUBH *bh)
193{
d73415a3 194 qatomic_and(&bh->flags, ~BH_SCHEDULED);
4f999d05
KW
195}
196
dcc772e2
LPF
197/* This func is async.The bottom half will do the delete action at the finial
198 * end.
199 */
4f999d05
KW
200void qemu_bh_delete(QEMUBH *bh)
201{
8c6b0356 202 aio_bh_enqueue(bh, BH_DELETED);
4f999d05
KW
203}
204
8c6b0356 205static int64_t aio_compute_bh_timeout(BHList *head, int timeout)
4f999d05
KW
206{
207 QEMUBH *bh;
208
8c6b0356
SH
209 QSLIST_FOREACH_RCU(bh, head, next) {
210 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
211 if (bh->flags & BH_IDLE) {
4f999d05
KW
212 /* idle bottom halves will be polled at least
213 * every 10ms */
845ca10d 214 timeout = 10000000;
4f999d05
KW
215 } else {
216 /* non-idle bottom halves will be executed
217 * immediately */
845ca10d 218 return 0;
4f999d05
KW
219 }
220 }
221 }
e3713e00 222
8c6b0356
SH
223 return timeout;
224}
225
226int64_t
227aio_compute_timeout(AioContext *ctx)
228{
229 BHListSlice *s;
230 int64_t deadline;
231 int timeout = -1;
232
233 timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout);
234 if (timeout == 0) {
235 return 0;
236 }
237
238 QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
239 timeout = aio_compute_bh_timeout(&s->bh_list, timeout);
240 if (timeout == 0) {
241 return 0;
242 }
243 }
244
845ca10d 245 deadline = timerlistgroup_deadline_ns(&ctx->tlg);
533a8cf3 246 if (deadline == 0) {
845ca10d 247 return 0;
533a8cf3 248 } else {
845ca10d 249 return qemu_soonest_timeout(timeout, deadline);
533a8cf3 250 }
845ca10d 251}
533a8cf3 252
845ca10d
PB
253static gboolean
254aio_ctx_prepare(GSource *source, gint *timeout)
255{
256 AioContext *ctx = (AioContext *) source;
257
d73415a3 258 qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1);
5710a3e0
PB
259
260 /*
261 * Write ctx->notify_me before computing the timeout
262 * (reading bottom half flags, etc.). Pairs with
263 * smp_mb in aio_notify().
264 */
265 smp_mb();
eabc9779 266
845ca10d
PB
267 /* We assume there is no timeout already supplied */
268 *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
a3462c65
PB
269
270 if (aio_prepare(ctx)) {
271 *timeout = 0;
272 }
273
845ca10d 274 return *timeout == 0;
e3713e00
PB
275}
276
277static gboolean
278aio_ctx_check(GSource *source)
279{
280 AioContext *ctx = (AioContext *) source;
281 QEMUBH *bh;
8c6b0356 282 BHListSlice *s;
e3713e00 283
5710a3e0 284 /* Finish computing the timeout before clearing the flag. */
d73415a3 285 qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1);
05e514b1 286 aio_notify_accept(ctx);
21a03d17 287
8c6b0356
SH
288 QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
289 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
e3713e00 290 return true;
6977d901 291 }
e3713e00 292 }
8c6b0356
SH
293
294 QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
295 QSLIST_FOREACH_RCU(bh, &s->bh_list, next) {
296 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
297 return true;
298 }
299 }
300 }
533a8cf3 301 return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
e3713e00
PB
302}
303
304static gboolean
305aio_ctx_dispatch(GSource *source,
306 GSourceFunc callback,
307 gpointer user_data)
308{
309 AioContext *ctx = (AioContext *) source;
310
311 assert(callback == NULL);
a153bf52 312 aio_dispatch(ctx);
e3713e00
PB
313 return true;
314}
315
2f4dc3c1
PB
316static void
317aio_ctx_finalize(GSource *source)
318{
319 AioContext *ctx = (AioContext *) source;
8c6b0356
SH
320 QEMUBH *bh;
321 unsigned flags;
2f4dc3c1 322
9b34277d 323 thread_pool_free(ctx->thread_pool);
a076972a 324
0187f5c9
PB
325#ifdef CONFIG_LINUX_AIO
326 if (ctx->linux_aio) {
327 laio_detach_aio_context(ctx->linux_aio, ctx);
328 laio_cleanup(ctx->linux_aio);
329 ctx->linux_aio = NULL;
330 }
331#endif
332
fcb7a4a4
AM
333#ifdef CONFIG_LINUX_IO_URING
334 if (ctx->linux_io_uring) {
335 luring_detach_aio_context(ctx->linux_io_uring, ctx);
336 luring_cleanup(ctx->linux_io_uring);
337 ctx->linux_io_uring = NULL;
338 }
339#endif
340
0c330a73
PB
341 assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
342 qemu_bh_delete(ctx->co_schedule_bh);
343
8c6b0356
SH
344 /* There must be no aio_bh_poll() calls going on */
345 assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list));
a076972a 346
8c6b0356 347 while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) {
023ca420
SH
348 /*
349 * qemu_bh_delete() must have been called on BHs in this AioContext. In
350 * many cases memory leaks, hangs, or inconsistent state occur when a
351 * BH is leaked because something still expects it to run.
352 *
353 * If you hit this, fix the lifecycle of the BH so that
354 * qemu_bh_delete() and any associated cleanup is called before the
355 * AioContext is finalized.
356 */
357 if (unlikely(!(flags & BH_DELETED))) {
358 fprintf(stderr, "%s: BH '%s' leaked, aborting...\n",
359 __func__, bh->name);
360 abort();
361 }
a076972a 362
8c6b0356 363 g_free(bh);
a076972a 364 }
a076972a 365
826cc324 366 aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL, NULL);
2f4dc3c1 367 event_notifier_cleanup(&ctx->notifier);
3fe71223 368 qemu_rec_mutex_destroy(&ctx->lock);
d7c99a12 369 qemu_lockcnt_destroy(&ctx->list_lock);
dae21b98 370 timerlistgroup_deinit(&ctx->tlg);
cd0a6d2b 371 aio_context_destroy(ctx);
2f4dc3c1
PB
372}
373
e3713e00
PB
374static GSourceFuncs aio_source_funcs = {
375 aio_ctx_prepare,
376 aio_ctx_check,
377 aio_ctx_dispatch,
2f4dc3c1 378 aio_ctx_finalize
e3713e00
PB
379};
380
381GSource *aio_get_g_source(AioContext *ctx)
382{
ba607ca8 383 aio_context_use_g_source(ctx);
e3713e00
PB
384 g_source_ref(&ctx->source);
385 return &ctx->source;
386}
a915f4bc 387
9b34277d
SH
388ThreadPool *aio_get_thread_pool(AioContext *ctx)
389{
390 if (!ctx->thread_pool) {
391 ctx->thread_pool = thread_pool_new(ctx);
392 }
393 return ctx->thread_pool;
394}
395
0187f5c9 396#ifdef CONFIG_LINUX_AIO
ed6e2161 397LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp)
0187f5c9
PB
398{
399 if (!ctx->linux_aio) {
ed6e2161
NA
400 ctx->linux_aio = laio_init(errp);
401 if (ctx->linux_aio) {
402 laio_attach_aio_context(ctx->linux_aio, ctx);
403 }
0187f5c9
PB
404 }
405 return ctx->linux_aio;
406}
ed6e2161
NA
407
408LinuxAioState *aio_get_linux_aio(AioContext *ctx)
409{
410 assert(ctx->linux_aio);
411 return ctx->linux_aio;
412}
0187f5c9
PB
413#endif
414
fcb7a4a4
AM
415#ifdef CONFIG_LINUX_IO_URING
416LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp)
417{
418 if (ctx->linux_io_uring) {
419 return ctx->linux_io_uring;
420 }
421
422 ctx->linux_io_uring = luring_init(errp);
423 if (!ctx->linux_io_uring) {
424 return NULL;
425 }
426
427 luring_attach_aio_context(ctx->linux_io_uring, ctx);
428 return ctx->linux_io_uring;
429}
430
431LuringState *aio_get_linux_io_uring(AioContext *ctx)
432{
433 assert(ctx->linux_io_uring);
434 return ctx->linux_io_uring;
435}
436#endif
437
2f4dc3c1
PB
438void aio_notify(AioContext *ctx)
439{
601829f8
SH
440 /*
441 * Write e.g. bh->flags before writing ctx->notified. Pairs with smp_mb in
442 * aio_notify_accept.
443 */
444 smp_wmb();
d73415a3 445 qatomic_set(&ctx->notified, true);
601829f8
SH
446
447 /*
448 * Write ctx->notified before reading ctx->notify_me. Pairs
5710a3e0 449 * with smp_mb in aio_ctx_prepare or aio_poll.
eabc9779 450 */
0ceb849b 451 smp_mb();
d73415a3 452 if (qatomic_read(&ctx->notify_me)) {
0ceb849b 453 event_notifier_set(&ctx->notifier);
05e514b1
PB
454 }
455}
456
457void aio_notify_accept(AioContext *ctx)
458{
d73415a3 459 qatomic_set(&ctx->notified, false);
601829f8
SH
460
461 /*
462 * Write ctx->notified before reading e.g. bh->flags. Pairs with smp_wmb
463 * in aio_notify.
464 */
465 smp_mb();
2f4dc3c1
PB
466}
467
3f53bc61 468static void aio_timerlist_notify(void *opaque, QEMUClockType type)
d5541d86
AB
469{
470 aio_notify(opaque);
471}
472
601829f8 473static void aio_context_notifier_cb(EventNotifier *e)
21a03d17 474{
601829f8
SH
475 AioContext *ctx = container_of(e, AioContext, notifier);
476
477 event_notifier_test_and_clear(&ctx->notifier);
21a03d17
PB
478}
479
4a1cba38 480/* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
c13be5a1 481static bool aio_context_notifier_poll(void *opaque)
4a1cba38
SH
482{
483 EventNotifier *e = opaque;
484 AioContext *ctx = container_of(e, AioContext, notifier);
485
d73415a3 486 return qatomic_read(&ctx->notified);
4a1cba38
SH
487}
488
826cc324
SH
489static void aio_context_notifier_poll_ready(EventNotifier *e)
490{
491 /* Do nothing, we just wanted to kick the event loop */
492}
493
0c330a73
PB
494static void co_schedule_bh_cb(void *opaque)
495{
496 AioContext *ctx = opaque;
497 QSLIST_HEAD(, Coroutine) straight, reversed;
498
499 QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines);
500 QSLIST_INIT(&straight);
501
502 while (!QSLIST_EMPTY(&reversed)) {
503 Coroutine *co = QSLIST_FIRST(&reversed);
504 QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next);
505 QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next);
506 }
507
508 while (!QSLIST_EMPTY(&straight)) {
509 Coroutine *co = QSLIST_FIRST(&straight);
510 QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
511 trace_aio_co_schedule_bh_cb(ctx, co);
1919631e 512 aio_context_acquire(ctx);
6133b39f
JC
513
514 /* Protected by write barrier in qemu_aio_coroutine_enter */
d73415a3 515 qatomic_set(&co->scheduled, NULL);
6808ae04 516 qemu_aio_coroutine_enter(ctx, co);
1919631e 517 aio_context_release(ctx);
0c330a73
PB
518 }
519}
520
2f78e491 521AioContext *aio_context_new(Error **errp)
f627aab1 522{
2f78e491 523 int ret;
2f4dc3c1 524 AioContext *ctx;
37fcee5d 525
2f4dc3c1 526 ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
8c6b0356
SH
527 QSLIST_INIT(&ctx->bh_list);
528 QSIMPLEQ_INIT(&ctx->bh_slice_list);
7e003465
C
529 aio_context_setup(ctx);
530
2f78e491
CN
531 ret = event_notifier_init(&ctx->notifier, false);
532 if (ret < 0) {
2f78e491 533 error_setg_errno(errp, -ret, "Failed to initialize event notifier");
37fcee5d 534 goto fail;
2f78e491 535 }
fcf5def1 536 g_source_set_can_recurse(&ctx->source, true);
d7c99a12 537 qemu_lockcnt_init(&ctx->list_lock);
0c330a73
PB
538
539 ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx);
540 QSLIST_INIT(&ctx->scheduled_coroutines);
541
2f78e491 542 aio_set_event_notifier(ctx, &ctx->notifier,
dca21ef2 543 false,
601829f8 544 aio_context_notifier_cb,
826cc324
SH
545 aio_context_notifier_poll,
546 aio_context_notifier_poll_ready);
0187f5c9
PB
547#ifdef CONFIG_LINUX_AIO
548 ctx->linux_aio = NULL;
549#endif
fcb7a4a4
AM
550
551#ifdef CONFIG_LINUX_IO_URING
552 ctx->linux_io_uring = NULL;
553#endif
554
9b34277d 555 ctx->thread_pool = NULL;
3fe71223 556 qemu_rec_mutex_init(&ctx->lock);
d5541d86 557 timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
2f4dc3c1 558
82a41186 559 ctx->poll_ns = 0;
4a1cba38 560 ctx->poll_max_ns = 0;
82a41186
SH
561 ctx->poll_grow = 0;
562 ctx->poll_shrink = 0;
4a1cba38 563
1793ad02
SG
564 ctx->aio_max_batch = 0;
565
71ad4713
NSJ
566 ctx->thread_pool_min = 0;
567 ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
568
2f4dc3c1 569 return ctx;
37fcee5d
FZ
570fail:
571 g_source_destroy(&ctx->source);
572 return NULL;
e3713e00
PB
573}
574
0c330a73
PB
575void aio_co_schedule(AioContext *ctx, Coroutine *co)
576{
577 trace_aio_co_schedule(ctx, co);
d73415a3 578 const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL,
6133b39f
JC
579 __func__);
580
581 if (scheduled) {
582 fprintf(stderr,
583 "%s: Co-routine was already scheduled in '%s'\n",
584 __func__, scheduled);
585 abort();
586 }
587
f0f81002
SH
588 /* The coroutine might run and release the last ctx reference before we
589 * invoke qemu_bh_schedule(). Take a reference to keep ctx alive until
590 * we're done.
591 */
592 aio_context_ref(ctx);
593
0c330a73
PB
594 QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
595 co, co_scheduled_next);
596 qemu_bh_schedule(ctx->co_schedule_bh);
f0f81002
SH
597
598 aio_context_unref(ctx);
0c330a73
PB
599}
600
26b0b698
KW
601typedef struct AioCoRescheduleSelf {
602 Coroutine *co;
603 AioContext *new_ctx;
604} AioCoRescheduleSelf;
605
606static void aio_co_reschedule_self_bh(void *opaque)
607{
608 AioCoRescheduleSelf *data = opaque;
609 aio_co_schedule(data->new_ctx, data->co);
610}
611
612void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx)
613{
614 AioContext *old_ctx = qemu_get_current_aio_context();
615
616 if (old_ctx != new_ctx) {
617 AioCoRescheduleSelf data = {
618 .co = qemu_coroutine_self(),
619 .new_ctx = new_ctx,
620 };
621 /*
622 * We can't directly schedule the coroutine in the target context
623 * because this would be racy: The other thread could try to enter the
624 * coroutine before it has yielded in this one.
625 */
626 aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data);
627 qemu_coroutine_yield();
628 }
629}
630
0c330a73
PB
631void aio_co_wake(struct Coroutine *co)
632{
633 AioContext *ctx;
634
635 /* Read coroutine before co->ctx. Matches smp_wmb in
636 * qemu_coroutine_enter.
637 */
638 smp_read_barrier_depends();
d73415a3 639 ctx = qatomic_read(&co->ctx);
0c330a73 640
8865852e
FZ
641 aio_co_enter(ctx, co);
642}
643
644void aio_co_enter(AioContext *ctx, struct Coroutine *co)
645{
0c330a73
PB
646 if (ctx != qemu_get_current_aio_context()) {
647 aio_co_schedule(ctx, co);
648 return;
649 }
650
651 if (qemu_in_coroutine()) {
652 Coroutine *self = qemu_coroutine_self();
653 assert(self != co);
654 QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
655 } else {
656 aio_context_acquire(ctx);
8865852e 657 qemu_aio_coroutine_enter(ctx, co);
0c330a73
PB
658 aio_context_release(ctx);
659 }
660}
661
e3713e00
PB
662void aio_context_ref(AioContext *ctx)
663{
664 g_source_ref(&ctx->source);
665}
666
667void aio_context_unref(AioContext *ctx)
668{
669 g_source_unref(&ctx->source);
f627aab1 670}
98563fc3
SH
671
672void aio_context_acquire(AioContext *ctx)
673{
3fe71223 674 qemu_rec_mutex_lock(&ctx->lock);
98563fc3
SH
675}
676
677void aio_context_release(AioContext *ctx)
678{
3fe71223 679 qemu_rec_mutex_unlock(&ctx->lock);
98563fc3 680}
5f50be9b 681
47b74464 682QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext)
5f50be9b
PB
683
684AioContext *qemu_get_current_aio_context(void)
685{
47b74464
SH
686 AioContext *ctx = get_my_aiocontext();
687 if (ctx) {
688 return ctx;
5f50be9b
PB
689 }
690 if (qemu_mutex_iothread_locked()) {
691 /* Possibly in a vCPU thread. */
692 return qemu_get_aio_context();
693 }
694 return NULL;
695}
696
697void qemu_set_current_aio_context(AioContext *ctx)
698{
47b74464
SH
699 assert(!get_my_aiocontext());
700 set_my_aiocontext(ctx);
5f50be9b 701}
71ad4713
NSJ
702
703void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
704 int64_t max, Error **errp)
705{
706
707 if (min > max || !max || min > INT_MAX || max > INT_MAX) {
708 error_setg(errp, "bad thread-pool-min/thread-pool-max values");
709 return;
710 }
711
712 ctx->thread_pool_min = min;
713 ctx->thread_pool_max = max;
714
715 if (ctx->thread_pool) {
716 thread_pool_update_params(ctx->thread_pool, ctx);
717 }
718}
This page took 0.673127 seconds and 4 git commands to generate.