X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/f7ad538e1ea130c8b6f3abb06ad6c856242c799e..7fe34ca9c2e99560dc65395d599a6920624b127d:/tests/test-aio.c diff --git a/tests/test-aio.c b/tests/test-aio.c index 07a1f61f87..217e33772e 100644 --- a/tests/test-aio.c +++ b/tests/test-aio.c @@ -13,8 +13,10 @@ #include #include "block/aio.h" #include "qemu/timer.h" +#include "qemu/sockets.h" +#include "qemu/error-report.h" -AioContext *ctx; +static AioContext *ctx; typedef struct { EventNotifier e; @@ -23,14 +25,6 @@ typedef struct { bool auto_set; } EventNotifierTestData; -/* Wait until there are no more BHs or AIO requests */ -static void wait_for_aio(void) -{ - while (aio_poll(ctx, true)) { - /* Do nothing */ - } -} - /* Wait until event notifier becomes inactive */ static void wait_until_inactive(EventNotifierTestData *data) { @@ -73,7 +67,7 @@ static void timer_test_cb(void *opaque) } } -static void dummy_io_handler_read(void *opaque) +static void dummy_io_handler_read(EventNotifier *e) { } @@ -103,12 +97,62 @@ static void event_ready_cb(EventNotifier *e) /* Tests using aio_*. */ -static void test_notify(void) +typedef struct { + QemuMutex start_lock; + bool thread_acquired; +} AcquireTestData; + +static void *test_acquire_thread(void *opaque) { - g_assert(!aio_poll(ctx, false)); - aio_notify(ctx); + AcquireTestData *data = opaque; + + /* Wait for other thread to let us start */ + qemu_mutex_lock(&data->start_lock); + qemu_mutex_unlock(&data->start_lock); + + aio_context_acquire(ctx); + aio_context_release(ctx); + + data->thread_acquired = true; /* success, we got here */ + + return NULL; +} + +static void dummy_notifier_read(EventNotifier *unused) +{ + g_assert(false); /* should never be invoked */ +} + +static void test_acquire(void) +{ + QemuThread thread; + EventNotifier notifier; + AcquireTestData data; + + /* Dummy event notifier ensures aio_poll() will block */ + event_notifier_init(¬ifier, false); + aio_set_event_notifier(ctx, ¬ifier, dummy_notifier_read); + g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */ + + qemu_mutex_init(&data.start_lock); + qemu_mutex_lock(&data.start_lock); + data.thread_acquired = false; + + qemu_thread_create(&thread, "test_acquire_thread", + test_acquire_thread, + &data, QEMU_THREAD_JOINABLE); + + /* Block in aio_poll(), let other thread kick us and acquire context */ + aio_context_acquire(ctx); + qemu_mutex_unlock(&data.start_lock); /* let the thread run */ g_assert(!aio_poll(ctx, true)); - g_assert(!aio_poll(ctx, false)); + aio_context_release(ctx); + + qemu_thread_join(&thread); + aio_set_event_notifier(ctx, ¬ifier, NULL); + event_notifier_cleanup(¬ifier); + + g_assert(data.thread_acquired); } static void test_bh_schedule(void) @@ -141,7 +185,9 @@ static void test_bh_schedule10(void) g_assert(aio_poll(ctx, true)); g_assert_cmpint(data.n, ==, 2); - wait_for_aio(); + while (data.n < 10) { + aio_poll(ctx, true); + } g_assert_cmpint(data.n, ==, 10); g_assert(!aio_poll(ctx, false)); @@ -189,12 +235,13 @@ static void test_bh_delete_from_cb(void) qemu_bh_schedule(data1.bh); g_assert_cmpint(data1.n, ==, 0); - wait_for_aio(); + while (data1.n < data1.max) { + aio_poll(ctx, true); + } g_assert_cmpint(data1.n, ==, data1.max); g_assert(data1.bh == NULL); g_assert(!aio_poll(ctx, false)); - g_assert(!aio_poll(ctx, true)); } static void test_bh_delete_from_cb_many(void) @@ -225,7 +272,12 @@ static void test_bh_delete_from_cb_many(void) g_assert_cmpint(data4.n, ==, 1); g_assert(data1.bh == NULL); - wait_for_aio(); + while (data1.n < data1.max || + data2.n < data2.max || + data3.n < data3.max || + data4.n < data4.max) { + aio_poll(ctx, true); + } g_assert_cmpint(data1.n, ==, data1.max); g_assert_cmpint(data2.n, ==, data2.max); g_assert_cmpint(data3.n, ==, data3.max); @@ -244,7 +296,7 @@ static void test_bh_flush(void) qemu_bh_schedule(data.bh); g_assert_cmpint(data.n, ==, 0); - wait_for_aio(); + g_assert(aio_poll(ctx, true)); g_assert_cmpint(data.n, ==, 1); g_assert(!aio_poll(ctx, false)); @@ -271,7 +323,7 @@ static void test_wait_event_notifier(void) EventNotifierTestData data = { .n = 0, .active = 1 }; event_notifier_init(&data.e, false); aio_set_event_notifier(ctx, &data.e, event_ready_cb); - g_assert(!aio_poll(ctx, false)); + while (aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 0); g_assert_cmpint(data.active, ==, 1); @@ -296,7 +348,7 @@ static void test_flush_event_notifier(void) EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true }; event_notifier_init(&data.e, false); aio_set_event_notifier(ctx, &data.e, event_ready_cb); - g_assert(!aio_poll(ctx, false)); + while (aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 0); g_assert_cmpint(data.active, ==, 10); @@ -370,14 +422,13 @@ static void test_timer_schedule(void) TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL, .max = 2, .clock_type = QEMU_CLOCK_VIRTUAL }; - int pipefd[2]; + EventNotifier e; /* aio_poll will not block to wait for timers to complete unless it has * an fd to wait on. Fixing this breaks other tests. So create a dummy one. */ - g_assert(!pipe2(pipefd, O_NONBLOCK)); - aio_set_fd_handler(ctx, pipefd[0], - dummy_io_handler_read, NULL, NULL); + event_notifier_init(&e, false); + aio_set_event_notifier(ctx, &e, dummy_io_handler_read); aio_poll(ctx, false); aio_timer_init(ctx, &data.timer, data.clock_type, @@ -396,7 +447,7 @@ static void test_timer_schedule(void) g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 0); - sleep(1); + g_usleep(1 * G_USEC_PER_SEC); g_assert_cmpint(data.n, ==, 0); g_assert(aio_poll(ctx, false)); @@ -416,9 +467,8 @@ static void test_timer_schedule(void) g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 2); - aio_set_fd_handler(ctx, pipefd[0], NULL, NULL, NULL); - close(pipefd[0]); - close(pipefd[1]); + aio_set_event_notifier(ctx, &e, NULL); + event_notifier_cleanup(&e); timer_del(&data.timer); } @@ -436,14 +486,6 @@ static void test_timer_schedule(void) * works well, and that's what I am using. */ -static void test_source_notify(void) -{ - while (g_main_context_iteration(NULL, false)); - aio_notify(ctx); - g_assert(g_main_context_iteration(NULL, true)); - g_assert(!g_main_context_iteration(NULL, false)); -} - static void test_source_flush(void) { g_assert(!g_main_context_iteration(NULL, false)); @@ -611,7 +653,7 @@ static void test_source_wait_event_notifier(void) EventNotifierTestData data = { .n = 0, .active = 1 }; event_notifier_init(&data.e, false); aio_set_event_notifier(ctx, &data.e, event_ready_cb); - g_assert(g_main_context_iteration(NULL, false)); + while (g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 0); g_assert_cmpint(data.active, ==, 1); @@ -636,7 +678,7 @@ static void test_source_flush_event_notifier(void) EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true }; event_notifier_init(&data.e, false); aio_set_event_notifier(ctx, &data.e, event_ready_cb); - g_assert(g_main_context_iteration(NULL, false)); + while (g_main_context_iteration(NULL, false)); g_assert_cmpint(data.n, ==, 0); g_assert_cmpint(data.active, ==, 10); @@ -710,15 +752,14 @@ static void test_source_timer_schedule(void) TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL, .max = 2, .clock_type = QEMU_CLOCK_VIRTUAL }; - int pipefd[2]; + EventNotifier e; int64_t expiry; /* aio_poll will not block to wait for timers to complete unless it has * an fd to wait on. Fixing this breaks other tests. So create a dummy one. */ - g_assert(!pipe2(pipefd, O_NONBLOCK)); - aio_set_fd_handler(ctx, pipefd[0], - dummy_io_handler_read, NULL, NULL); + event_notifier_init(&e, false); + aio_set_event_notifier(ctx, &e, dummy_io_handler_read); do {} while (g_main_context_iteration(NULL, false)); aio_timer_init(ctx, &data.timer, data.clock_type, @@ -729,24 +770,22 @@ static void test_source_timer_schedule(void) g_assert_cmpint(data.n, ==, 0); - sleep(1); + g_usleep(1 * G_USEC_PER_SEC); g_assert_cmpint(data.n, ==, 0); - g_assert(g_main_context_iteration(NULL, false)); + g_assert(g_main_context_iteration(NULL, true)); g_assert_cmpint(data.n, ==, 1); + expiry += data.ns; - /* The comment above was not kidding when it said this wakes up itself */ - do { - g_assert(g_main_context_iteration(NULL, true)); - } while (qemu_clock_get_ns(data.clock_type) <= expiry); - sleep(1); - g_main_context_iteration(NULL, false); + while (data.n < 2) { + g_main_context_iteration(NULL, true); + } g_assert_cmpint(data.n, ==, 2); + g_assert(qemu_clock_get_ns(data.clock_type) > expiry); - aio_set_fd_handler(ctx, pipefd[0], NULL, NULL, NULL); - close(pipefd[0]); - close(pipefd[1]); + aio_set_event_notifier(ctx, &e, NULL); + event_notifier_cleanup(&e); timer_del(&data.timer); } @@ -756,11 +795,18 @@ static void test_source_timer_schedule(void) int main(int argc, char **argv) { + Error *local_error = NULL; GSource *src; init_clocks(); - ctx = aio_context_new(); + ctx = aio_context_new(&local_error); + if (!ctx) { + error_report("Failed to create AIO Context: '%s'", + error_get_pretty(local_error)); + error_free(local_error); + exit(1); + } src = aio_get_g_source(ctx); g_source_attach(src, NULL); g_source_unref(src); @@ -768,7 +814,7 @@ int main(int argc, char **argv) while (g_main_context_iteration(NULL, false)); g_test_init(&argc, &argv, NULL); - g_test_add_func("/aio/notify", test_notify); + g_test_add_func("/aio/acquire", test_acquire); g_test_add_func("/aio/bh/schedule", test_bh_schedule); g_test_add_func("/aio/bh/schedule10", test_bh_schedule10); g_test_add_func("/aio/bh/cancel", test_bh_cancel); @@ -782,7 +828,6 @@ int main(int argc, char **argv) g_test_add_func("/aio/event/flush", test_flush_event_notifier); g_test_add_func("/aio/timer/schedule", test_timer_schedule); - g_test_add_func("/aio-gsource/notify", test_source_notify); g_test_add_func("/aio-gsource/flush", test_source_flush); g_test_add_func("/aio-gsource/bh/schedule", test_source_bh_schedule); g_test_add_func("/aio-gsource/bh/schedule10", test_source_bh_schedule10);