#include "block/aio.h"
#include "qemu/timer.h"
#include "qemu/sockets.h"
+#include "qemu/error-report.h"
-AioContext *ctx;
+static AioContext *ctx;
typedef struct {
EventNotifier e;
bool auto_set;
} EventNotifierTestData;
-/* Wait until there are no more BHs or AIO requests */
-static void wait_for_aio(void)
-{
- while (aio_poll(ctx, true)) {
- /* Do nothing */
- }
-}
-
/* Wait until event notifier becomes inactive */
static void wait_until_inactive(EventNotifierTestData *data)
{
}
}
-#if !defined(_WIN32)
-
static void timer_test_cb(void *opaque)
{
TimerTestData *data = opaque;
}
}
-static void dummy_io_handler_read(void *opaque)
+static void dummy_io_handler_read(EventNotifier *e)
{
}
-#endif /* !_WIN32 */
-
static void bh_delete_cb(void *opaque)
{
BHTestData *data = opaque;
g_assert(aio_poll(ctx, true));
g_assert_cmpint(data.n, ==, 2);
- wait_for_aio();
+ while (data.n < 10) {
+ aio_poll(ctx, true);
+ }
g_assert_cmpint(data.n, ==, 10);
g_assert(!aio_poll(ctx, false));
qemu_bh_schedule(data1.bh);
g_assert_cmpint(data1.n, ==, 0);
- wait_for_aio();
+ while (data1.n < data1.max) {
+ aio_poll(ctx, true);
+ }
g_assert_cmpint(data1.n, ==, data1.max);
g_assert(data1.bh == NULL);
g_assert_cmpint(data4.n, ==, 1);
g_assert(data1.bh == NULL);
- wait_for_aio();
+ while (data1.n < data1.max ||
+ data2.n < data2.max ||
+ data3.n < data3.max ||
+ data4.n < data4.max) {
+ aio_poll(ctx, true);
+ }
g_assert_cmpint(data1.n, ==, data1.max);
g_assert_cmpint(data2.n, ==, data2.max);
g_assert_cmpint(data3.n, ==, data3.max);
qemu_bh_schedule(data.bh);
g_assert_cmpint(data.n, ==, 0);
- wait_for_aio();
+ g_assert(aio_poll(ctx, true));
g_assert_cmpint(data.n, ==, 1);
g_assert(!aio_poll(ctx, false));
event_notifier_cleanup(&data.e);
}
-#if !defined(_WIN32)
-
static void test_timer_schedule(void)
{
TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
.max = 2,
.clock_type = QEMU_CLOCK_VIRTUAL };
- int pipefd[2];
+ EventNotifier e;
/* aio_poll will not block to wait for timers to complete unless it has
* an fd to wait on. Fixing this breaks other tests. So create a dummy one.
*/
- g_assert(!qemu_pipe(pipefd));
- qemu_set_nonblock(pipefd[0]);
- qemu_set_nonblock(pipefd[1]);
-
- aio_set_fd_handler(ctx, pipefd[0],
- dummy_io_handler_read, NULL, NULL);
+ event_notifier_init(&e, false);
+ aio_set_event_notifier(ctx, &e, dummy_io_handler_read);
aio_poll(ctx, false);
aio_timer_init(ctx, &data.timer, data.clock_type,
g_assert(!aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 2);
- aio_set_fd_handler(ctx, pipefd[0], NULL, NULL, NULL);
- close(pipefd[0]);
- close(pipefd[1]);
+ aio_set_event_notifier(ctx, &e, NULL);
+ event_notifier_cleanup(&e);
timer_del(&data.timer);
}
-#endif /* !_WIN32 */
-
/* Now the same tests, using the context as a GSource. They are
* very similar to the ones above, with g_main_context_iteration
* replacing aio_poll. However:
event_notifier_cleanup(&data.e);
}
-#if !defined(_WIN32)
-
static void test_source_timer_schedule(void)
{
TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
.max = 2,
.clock_type = QEMU_CLOCK_VIRTUAL };
- int pipefd[2];
+ EventNotifier e;
int64_t expiry;
/* aio_poll will not block to wait for timers to complete unless it has
* an fd to wait on. Fixing this breaks other tests. So create a dummy one.
*/
- g_assert(!qemu_pipe(pipefd));
- qemu_set_nonblock(pipefd[0]);
- qemu_set_nonblock(pipefd[1]);
-
- aio_set_fd_handler(ctx, pipefd[0],
- dummy_io_handler_read, NULL, NULL);
+ event_notifier_init(&e, false);
+ aio_set_event_notifier(ctx, &e, dummy_io_handler_read);
do {} while (g_main_context_iteration(NULL, false));
aio_timer_init(ctx, &data.timer, data.clock_type,
g_usleep(1 * G_USEC_PER_SEC);
g_assert_cmpint(data.n, ==, 0);
- g_assert(g_main_context_iteration(NULL, false));
+ g_assert(g_main_context_iteration(NULL, true));
g_assert_cmpint(data.n, ==, 1);
+ expiry += data.ns;
- /* The comment above was not kidding when it said this wakes up itself */
- do {
- g_assert(g_main_context_iteration(NULL, true));
- } while (qemu_clock_get_ns(data.clock_type) <= expiry);
- g_usleep(1 * G_USEC_PER_SEC);
- g_main_context_iteration(NULL, false);
+ while (data.n < 2) {
+ g_main_context_iteration(NULL, true);
+ }
g_assert_cmpint(data.n, ==, 2);
+ g_assert(qemu_clock_get_ns(data.clock_type) > expiry);
- aio_set_fd_handler(ctx, pipefd[0], NULL, NULL, NULL);
- close(pipefd[0]);
- close(pipefd[1]);
+ aio_set_event_notifier(ctx, &e, NULL);
+ event_notifier_cleanup(&e);
timer_del(&data.timer);
}
-#endif /* !_WIN32 */
-
/* End of tests. */
int main(int argc, char **argv)
{
+ Error *local_error = NULL;
GSource *src;
init_clocks();
- ctx = aio_context_new();
+ ctx = aio_context_new(&local_error);
+ if (!ctx) {
+ error_report("Failed to create AIO Context: '%s'",
+ error_get_pretty(local_error));
+ error_free(local_error);
+ exit(1);
+ }
src = aio_get_g_source(ctx);
g_source_attach(src, NULL);
g_source_unref(src);
g_test_add_func("/aio/event/wait", test_wait_event_notifier);
g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush);
g_test_add_func("/aio/event/flush", test_flush_event_notifier);
-#if !defined(_WIN32)
g_test_add_func("/aio/timer/schedule", test_timer_schedule);
-#endif
g_test_add_func("/aio-gsource/notify", test_source_notify);
g_test_add_func("/aio-gsource/flush", test_source_flush);
g_test_add_func("/aio-gsource/event/wait", test_source_wait_event_notifier);
g_test_add_func("/aio-gsource/event/wait/no-flush-cb", test_source_wait_event_notifier_noflush);
g_test_add_func("/aio-gsource/event/flush", test_source_flush_event_notifier);
-#if !defined(_WIN32)
g_test_add_func("/aio-gsource/timer/schedule", test_source_timer_schedule);
-#endif
return g_test_run();
}