4 * Copyright IBM, Corp. 2011
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/coroutine.h"
16 #include "qemu/coroutine_int.h"
17 #include "qemu/lockable.h"
20 * Check that qemu_in_coroutine() works
23 static void coroutine_fn verify_in_coroutine(void *opaque)
25 g_assert(qemu_in_coroutine());
28 static void test_in_coroutine(void)
32 g_assert(!qemu_in_coroutine());
34 coroutine = qemu_coroutine_create(verify_in_coroutine, NULL);
35 qemu_coroutine_enter(coroutine);
39 * Check that qemu_coroutine_self() works
42 static void coroutine_fn verify_self(void *opaque)
44 Coroutine **p_co = opaque;
45 g_assert(qemu_coroutine_self() == *p_co);
48 static void test_self(void)
52 coroutine = qemu_coroutine_create(verify_self, &coroutine);
53 qemu_coroutine_enter(coroutine);
57 * Check that qemu_coroutine_entered() works
60 static void coroutine_fn verify_entered_step_2(void *opaque)
62 Coroutine *caller = (Coroutine *)opaque;
64 g_assert(qemu_coroutine_entered(caller));
65 g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
66 qemu_coroutine_yield();
68 /* Once more to check it still works after yielding */
69 g_assert(qemu_coroutine_entered(caller));
70 g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
73 static void coroutine_fn verify_entered_step_1(void *opaque)
75 Coroutine *self = qemu_coroutine_self();
78 g_assert(qemu_coroutine_entered(self));
80 coroutine = qemu_coroutine_create(verify_entered_step_2, self);
81 g_assert(!qemu_coroutine_entered(coroutine));
82 qemu_coroutine_enter(coroutine);
83 g_assert(!qemu_coroutine_entered(coroutine));
84 qemu_coroutine_enter(coroutine);
87 static void test_entered(void)
91 coroutine = qemu_coroutine_create(verify_entered_step_1, NULL);
92 g_assert(!qemu_coroutine_entered(coroutine));
93 qemu_coroutine_enter(coroutine);
97 * Check that coroutines may nest multiple levels
101 unsigned int n_enter; /* num coroutines entered */
102 unsigned int n_return; /* num coroutines returned */
103 unsigned int max; /* maximum level of nesting */
106 static void coroutine_fn nest(void *opaque)
108 NestData *nd = opaque;
112 if (nd->n_enter < nd->max) {
115 child = qemu_coroutine_create(nest, nd);
116 qemu_coroutine_enter(child);
122 static void test_nesting(void)
131 root = qemu_coroutine_create(nest, &nd);
132 qemu_coroutine_enter(root);
134 /* Must enter and return from max nesting level */
135 g_assert_cmpint(nd.n_enter, ==, nd.max);
136 g_assert_cmpint(nd.n_return, ==, nd.max);
140 * Check that yield/enter transfer control correctly
143 static void coroutine_fn yield_5_times(void *opaque)
148 for (i = 0; i < 5; i++) {
149 qemu_coroutine_yield();
154 static void test_yield(void)
156 Coroutine *coroutine;
158 int i = -1; /* one extra time to return from coroutine */
160 coroutine = qemu_coroutine_create(yield_5_times, &done);
162 qemu_coroutine_enter(coroutine);
165 g_assert_cmpint(i, ==, 5); /* coroutine must yield 5 times */
168 static void coroutine_fn c2_fn(void *opaque)
170 qemu_coroutine_yield();
173 static void coroutine_fn c1_fn(void *opaque)
175 Coroutine *c2 = opaque;
176 qemu_coroutine_enter(c2);
179 static void test_no_dangling_access(void)
185 c2 = qemu_coroutine_create(c2_fn, NULL);
186 c1 = qemu_coroutine_create(c1_fn, c2);
188 qemu_coroutine_enter(c1);
190 /* c1 shouldn't be used any more now; make sure we segfault if it is */
192 memset(c1, 0xff, sizeof(Coroutine));
193 qemu_coroutine_enter(c2);
195 /* Must restore the coroutine now to avoid corrupted pool */
202 static void coroutine_fn mutex_fn(void *opaque)
205 qemu_co_mutex_lock(m);
208 qemu_coroutine_yield();
210 qemu_co_mutex_unlock(m);
214 static void coroutine_fn lockable_fn(void *opaque)
216 QemuLockable *x = opaque;
217 qemu_lockable_lock(x);
220 qemu_coroutine_yield();
222 qemu_lockable_unlock(x);
226 static void do_test_co_mutex(CoroutineEntry *entry, void *opaque)
228 Coroutine *c1 = qemu_coroutine_create(entry, opaque);
229 Coroutine *c2 = qemu_coroutine_create(entry, opaque);
232 qemu_coroutine_enter(c1);
234 qemu_coroutine_enter(c2);
236 /* Unlock queues c2. It is then started automatically when c1 yields or
239 qemu_coroutine_enter(c1);
240 g_assert_cmpint(done, ==, 1);
243 qemu_coroutine_enter(c2);
244 g_assert_cmpint(done, ==, 2);
248 static void test_co_mutex(void)
252 qemu_co_mutex_init(&m);
253 do_test_co_mutex(mutex_fn, &m);
256 static void test_co_mutex_lockable(void)
259 CoMutex *null_pointer = NULL;
261 qemu_co_mutex_init(&m);
262 do_test_co_mutex(lockable_fn, QEMU_MAKE_LOCKABLE(&m));
264 g_assert(QEMU_MAKE_LOCKABLE(null_pointer) == NULL);
267 static CoRwlock rwlock;
269 /* Test that readers are properly sent back to the queue when upgrading,
270 * even if they are the sole readers. The test scenario is as follows:
274 * |--------------+------------+
280 * | <queued> | <dequeued> |
286 static void coroutine_fn rwlock_yield_upgrade(void *opaque)
288 qemu_co_rwlock_rdlock(&rwlock);
289 qemu_coroutine_yield();
291 qemu_co_rwlock_upgrade(&rwlock);
292 qemu_co_rwlock_unlock(&rwlock);
294 *(bool *)opaque = true;
297 static void coroutine_fn rwlock_wrlock_yield(void *opaque)
299 qemu_co_rwlock_wrlock(&rwlock);
300 qemu_coroutine_yield();
302 qemu_co_rwlock_unlock(&rwlock);
303 *(bool *)opaque = true;
306 static void test_co_rwlock_upgrade(void)
308 bool c1_done = false;
309 bool c2_done = false;
312 qemu_co_rwlock_init(&rwlock);
313 c1 = qemu_coroutine_create(rwlock_yield_upgrade, &c1_done);
314 c2 = qemu_coroutine_create(rwlock_wrlock_yield, &c2_done);
316 qemu_coroutine_enter(c1);
317 qemu_coroutine_enter(c2);
319 /* c1 now should go to sleep. */
320 qemu_coroutine_enter(c1);
323 qemu_coroutine_enter(c2);
328 static void coroutine_fn rwlock_rdlock_yield(void *opaque)
330 qemu_co_rwlock_rdlock(&rwlock);
331 qemu_coroutine_yield();
333 qemu_co_rwlock_unlock(&rwlock);
334 qemu_coroutine_yield();
336 *(bool *)opaque = true;
339 static void coroutine_fn rwlock_wrlock_downgrade(void *opaque)
341 qemu_co_rwlock_wrlock(&rwlock);
343 qemu_co_rwlock_downgrade(&rwlock);
344 qemu_co_rwlock_unlock(&rwlock);
345 *(bool *)opaque = true;
348 static void coroutine_fn rwlock_rdlock(void *opaque)
350 qemu_co_rwlock_rdlock(&rwlock);
352 qemu_co_rwlock_unlock(&rwlock);
353 *(bool *)opaque = true;
356 static void coroutine_fn rwlock_wrlock(void *opaque)
358 qemu_co_rwlock_wrlock(&rwlock);
360 qemu_co_rwlock_unlock(&rwlock);
361 *(bool *)opaque = true;
365 * Check that downgrading a reader-writer lock does not cause a hang.
367 * Four coroutines are used to produce a situation where there are
368 * both reader and writer hopefuls waiting to acquire an rwlock that
369 * is held by a reader.
371 * The correct sequence of operations we aim to provoke can be
374 * | c1 | c2 | c3 | c4 |
375 * |--------+------------+------------+------------|
386 * | | <dequeued> | | |
387 * | | downgrade | | |
388 * | | | <dequeued> | |
392 * | | | | <dequeued> |
395 static void test_co_rwlock_downgrade(void)
397 bool c1_done = false;
398 bool c2_done = false;
399 bool c3_done = false;
400 bool c4_done = false;
401 Coroutine *c1, *c2, *c3, *c4;
403 qemu_co_rwlock_init(&rwlock);
405 c1 = qemu_coroutine_create(rwlock_rdlock_yield, &c1_done);
406 c2 = qemu_coroutine_create(rwlock_wrlock_downgrade, &c2_done);
407 c3 = qemu_coroutine_create(rwlock_rdlock, &c3_done);
408 c4 = qemu_coroutine_create(rwlock_wrlock, &c4_done);
410 qemu_coroutine_enter(c1);
411 qemu_coroutine_enter(c2);
412 qemu_coroutine_enter(c3);
413 qemu_coroutine_enter(c4);
415 qemu_coroutine_enter(c1);
421 qemu_coroutine_enter(c1);
427 * Check that creation, enter, and return work
430 static void coroutine_fn set_and_exit(void *opaque)
437 static void test_lifecycle(void)
439 Coroutine *coroutine;
442 /* Create, enter, and return from coroutine */
443 coroutine = qemu_coroutine_create(set_and_exit, &done);
444 qemu_coroutine_enter(coroutine);
445 g_assert(done); /* expect done to be true (first time) */
447 /* Repeat to check that no state affects this test */
449 coroutine = qemu_coroutine_create(set_and_exit, &done);
450 qemu_coroutine_enter(coroutine);
451 g_assert(done); /* expect done to be true (second time) */
455 #define RECORD_SIZE 10 /* Leave some room for expansion */
456 struct coroutine_position {
460 static struct coroutine_position records[RECORD_SIZE];
461 static unsigned record_pos;
463 static void record_push(int func, int state)
465 struct coroutine_position *cp = &records[record_pos++];
466 g_assert_cmpint(record_pos, <, RECORD_SIZE);
471 static void coroutine_fn co_order_test(void *opaque)
474 g_assert(qemu_in_coroutine());
475 qemu_coroutine_yield();
477 g_assert(qemu_in_coroutine());
480 static void do_order_test(void)
484 co = qemu_coroutine_create(co_order_test, NULL);
486 qemu_coroutine_enter(co);
488 g_assert(!qemu_in_coroutine());
489 qemu_coroutine_enter(co);
491 g_assert(!qemu_in_coroutine());
494 static void test_order(void)
497 const struct coroutine_position expected_pos[] = {
498 {1, 1,}, {2, 1}, {1, 2}, {2, 2}, {1, 3}
501 g_assert_cmpint(record_pos, ==, 5);
502 for (i = 0; i < record_pos; i++) {
503 g_assert_cmpint(records[i].func , ==, expected_pos[i].func );
504 g_assert_cmpint(records[i].state, ==, expected_pos[i].state);
508 * Lifecycle benchmark
511 static void coroutine_fn empty_coroutine(void *opaque)
516 static void perf_lifecycle(void)
518 Coroutine *coroutine;
524 g_test_timer_start();
525 for (i = 0; i < max; i++) {
526 coroutine = qemu_coroutine_create(empty_coroutine, NULL);
527 qemu_coroutine_enter(coroutine);
529 duration = g_test_timer_elapsed();
531 g_test_message("Lifecycle %u iterations: %f s", max, duration);
534 static void perf_nesting(void)
536 unsigned int i, maxcycles, maxnesting;
543 g_test_timer_start();
544 for (i = 0; i < maxcycles; i++) {
550 root = qemu_coroutine_create(nest, &nd);
551 qemu_coroutine_enter(root);
553 duration = g_test_timer_elapsed();
555 g_test_message("Nesting %u iterations of %u depth each: %f s",
556 maxcycles, maxnesting, duration);
563 static void coroutine_fn yield_loop(void *opaque)
565 unsigned int *counter = opaque;
567 while ((*counter) > 0) {
569 qemu_coroutine_yield();
573 static void perf_yield(void)
575 unsigned int i, maxcycles;
578 maxcycles = 100000000;
580 Coroutine *coroutine = qemu_coroutine_create(yield_loop, &i);
582 g_test_timer_start();
584 qemu_coroutine_enter(coroutine);
586 duration = g_test_timer_elapsed();
588 g_test_message("Yield %u iterations: %f s", maxcycles, duration);
591 static __attribute__((noinline)) void dummy(unsigned *i)
596 static void perf_baseline(void)
598 unsigned int i, maxcycles;
601 maxcycles = 100000000;
604 g_test_timer_start();
608 duration = g_test_timer_elapsed();
610 g_test_message("Function call %u iterations: %f s", maxcycles, duration);
613 static __attribute__((noinline)) void coroutine_fn perf_cost_func(void *opaque)
615 qemu_coroutine_yield();
618 static void perf_cost(void)
620 const unsigned long maxcycles = 40000000;
626 g_test_timer_start();
627 while (i++ < maxcycles) {
628 co = qemu_coroutine_create(perf_cost_func, &i);
629 qemu_coroutine_enter(co);
630 qemu_coroutine_enter(co);
632 duration = g_test_timer_elapsed();
633 ops = (long)(maxcycles / (duration * 1000));
635 g_test_message("Run operation %lu iterations %f s, %luK operations/s, "
636 "%luns per coroutine",
639 (unsigned long)(1000000000.0 * duration / maxcycles));
642 int main(int argc, char **argv)
644 g_test_init(&argc, &argv, NULL);
646 /* This test assumes there is a freelist and marks freed coroutine memory
647 * with a sentinel value. If there is no freelist this would legitimately
650 if (CONFIG_COROUTINE_POOL) {
651 g_test_add_func("/basic/no-dangling-access", test_no_dangling_access);
654 g_test_add_func("/basic/lifecycle", test_lifecycle);
655 g_test_add_func("/basic/yield", test_yield);
656 g_test_add_func("/basic/nesting", test_nesting);
657 g_test_add_func("/basic/self", test_self);
658 g_test_add_func("/basic/entered", test_entered);
659 g_test_add_func("/basic/in_coroutine", test_in_coroutine);
660 g_test_add_func("/basic/order", test_order);
661 g_test_add_func("/locking/co-mutex", test_co_mutex);
662 g_test_add_func("/locking/co-mutex/lockable", test_co_mutex_lockable);
663 g_test_add_func("/locking/co-rwlock/upgrade", test_co_rwlock_upgrade);
664 g_test_add_func("/locking/co-rwlock/downgrade", test_co_rwlock_downgrade);
666 g_test_add_func("/perf/lifecycle", perf_lifecycle);
667 g_test_add_func("/perf/nesting", perf_nesting);
668 g_test_add_func("/perf/yield", perf_yield);
669 g_test_add_func("/perf/function-call", perf_baseline);
670 g_test_add_func("/perf/cost", perf_cost);