-#include <glib.h>
+#include "qemu/osdep.h"
#include "qemu-common.h"
-#include "qemu-aio.h"
-#include "thread-pool.h"
-#include "block.h"
-
+#include "block/aio.h"
+#include "block/thread-pool.h"
+#include "block/block.h"
+#include "qapi/error.h"
+#include "qemu/timer.h"
+#include "qemu/error-report.h"
+
+static AioContext *ctx;
+static ThreadPool *pool;
static int active;
typedef struct {
- BlockDriverAIOCB *aiocb;
+ BlockAIOCB *aiocb;
int n;
int ret;
} WorkerTestData;
static int worker_cb(void *opaque)
{
WorkerTestData *data = opaque;
- return __sync_fetch_and_add(&data->n, 1);
+ return atomic_fetch_inc(&data->n);
}
static int long_cb(void *opaque)
{
WorkerTestData *data = opaque;
- __sync_fetch_and_add(&data->n, 1);
+ atomic_inc(&data->n);
g_usleep(2000000);
- __sync_fetch_and_add(&data->n, 1);
+ atomic_inc(&data->n);
return 0;
}
static void done_cb(void *opaque, int ret)
{
WorkerTestData *data = opaque;
- g_assert_cmpint(data->ret, ==, -EINPROGRESS);
+ g_assert(data->ret == -EINPROGRESS || data->ret == -ECANCELED);
data->ret = ret;
data->aiocb = NULL;
active--;
}
-/* A non-blocking poll of the main AIO context (we cannot use aio_poll
- * because we do not know the AioContext).
- */
-static void qemu_aio_wait_nonblocking(void)
-{
- qemu_notify_event();
- qemu_aio_wait();
-}
-
-/* Wait until all aio and bh activity has finished */
-static void qemu_aio_wait_all(void)
-{
- while (qemu_aio_wait()) {
- /* Do nothing */
- }
-}
-
static void test_submit(void)
{
WorkerTestData data = { .n = 0 };
- thread_pool_submit(worker_cb, &data);
- qemu_aio_wait_all();
+ thread_pool_submit(pool, worker_cb, &data);
+ while (data.n == 0) {
+ aio_poll(ctx, true);
+ }
g_assert_cmpint(data.n, ==, 1);
}
static void test_submit_aio(void)
{
WorkerTestData data = { .n = 0, .ret = -EINPROGRESS };
- data.aiocb = thread_pool_submit_aio(worker_cb, &data, done_cb, &data);
+ data.aiocb = thread_pool_submit_aio(pool, worker_cb, &data,
+ done_cb, &data);
/* The callbacks are not called until after the first wait. */
active = 1;
g_assert_cmpint(data.ret, ==, -EINPROGRESS);
- qemu_aio_wait_all();
+ while (data.ret == -EINPROGRESS) {
+ aio_poll(ctx, true);
+ }
g_assert_cmpint(active, ==, 0);
g_assert_cmpint(data.n, ==, 1);
g_assert_cmpint(data.ret, ==, 0);
active = 1;
data->n = 0;
data->ret = -EINPROGRESS;
- thread_pool_submit_co(worker_cb, data);
+ thread_pool_submit_co(pool, worker_cb, data);
/* The test continues in test_submit_co, after qemu_coroutine_enter... */
data->ret = 0;
active--;
- /* The test continues in test_submit_co, after qemu_aio_wait_all... */
+ /* The test continues in test_submit_co, after aio_poll... */
}
static void test_submit_co(void)
g_assert_cmpint(active, ==, 1);
g_assert_cmpint(data.ret, ==, -EINPROGRESS);
- /* qemu_aio_wait_all will execute the rest of the coroutine. */
+ /* aio_poll will execute the rest of the coroutine. */
- qemu_aio_wait_all();
+ while (data.ret == -EINPROGRESS) {
+ aio_poll(ctx, true);
+ }
/* Back here after the coroutine has finished. */
for (i = 0; i < 100; i++) {
data[i].n = 0;
data[i].ret = -EINPROGRESS;
- thread_pool_submit_aio(worker_cb, &data[i], done_cb, &data[i]);
+ thread_pool_submit_aio(pool, worker_cb, &data[i], done_cb, &data[i]);
}
active = 100;
while (active > 0) {
- qemu_aio_wait();
+ aio_poll(ctx, true);
}
for (i = 0; i < 100; i++) {
g_assert_cmpint(data[i].n, ==, 1);
}
}
-static void test_cancel(void)
+static void do_test_cancel(bool sync)
{
WorkerTestData data[100];
int num_canceled;
for (i = 0; i < 100; i++) {
data[i].n = 0;
data[i].ret = -EINPROGRESS;
- data[i].aiocb = thread_pool_submit_aio(long_cb, &data[i],
+ data[i].aiocb = thread_pool_submit_aio(pool, long_cb, &data[i],
done_cb, &data[i]);
}
* run, but do not waste too much time...
*/
active = 100;
- qemu_aio_wait_nonblocking();
+ aio_notify(ctx);
+ aio_poll(ctx, false);
/* Wait some time for the threads to start, with some sanity
* testing on the behavior of the scheduler...
/* Cancel the jobs that haven't been started yet. */
num_canceled = 0;
for (i = 0; i < 100; i++) {
- if (__sync_val_compare_and_swap(&data[i].n, 0, 3) == 0) {
+ if (atomic_cmpxchg(&data[i].n, 0, 3) == 0) {
data[i].ret = -ECANCELED;
- bdrv_aio_cancel(data[i].aiocb);
- active--;
+ if (sync) {
+ bdrv_aio_cancel(data[i].aiocb);
+ } else {
+ bdrv_aio_cancel_async(data[i].aiocb);
+ }
num_canceled++;
}
}
g_assert_cmpint(active, >, 0);
g_assert_cmpint(num_canceled, <, 100);
- /* Canceling the others will be a blocking operation. */
for (i = 0; i < 100; i++) {
- if (data[i].n != 3) {
- bdrv_aio_cancel(data[i].aiocb);
+ if (data[i].aiocb && data[i].n != 3) {
+ if (sync) {
+ /* Canceling the others will be a blocking operation. */
+ bdrv_aio_cancel(data[i].aiocb);
+ } else {
+ bdrv_aio_cancel_async(data[i].aiocb);
+ }
}
}
/* Finish execution and execute any remaining callbacks. */
- qemu_aio_wait_all();
+ while (active > 0) {
+ aio_poll(ctx, true);
+ }
g_assert_cmpint(active, ==, 0);
for (i = 0; i < 100; i++) {
if (data[i].n == 3) {
g_assert_cmpint(data[i].ret, ==, -ECANCELED);
- g_assert(data[i].aiocb != NULL);
+ g_assert(data[i].aiocb == NULL);
} else {
g_assert_cmpint(data[i].n, ==, 2);
- g_assert_cmpint(data[i].ret, ==, 0);
+ g_assert(data[i].ret == 0 || data[i].ret == -ECANCELED);
g_assert(data[i].aiocb == NULL);
}
}
}
+static void test_cancel(void)
+{
+ do_test_cancel(true);
+}
+
+static void test_cancel_async(void)
+{
+ do_test_cancel(false);
+}
+
int main(int argc, char **argv)
{
- /* These should be removed once each AioContext has its thread pool.
- * The test should create its own AioContext.
- */
- qemu_init_main_loop();
- bdrv_init();
+ int ret;
+ Error *local_error = NULL;
+
+ init_clocks();
+
+ ctx = aio_context_new(&local_error);
+ if (!ctx) {
+ error_reportf_err(local_error, "Failed to create AIO Context: ");
+ exit(1);
+ }
+ pool = aio_get_thread_pool(ctx);
g_test_init(&argc, &argv, NULL);
g_test_add_func("/thread-pool/submit", test_submit);
g_test_add_func("/thread-pool/submit-co", test_submit_co);
g_test_add_func("/thread-pool/submit-many", test_submit_many);
g_test_add_func("/thread-pool/cancel", test_cancel);
- return g_test_run();
+ g_test_add_func("/thread-pool/cancel-async", test_cancel_async);
+
+ ret = g_test_run();
+
+ aio_context_unref(ctx);
+ return ret;
}