]>
Commit | Line | Data |
---|---|---|
74c856e9 PB |
1 | #include <glib.h> |
2 | #include "qemu-common.h" | |
737e150e PB |
3 | #include "block/aio.h" |
4 | #include "block/thread-pool.h" | |
5 | #include "block/block.h" | |
74c856e9 | 6 | |
c4d9d196 SH |
7 | static AioContext *ctx; |
8 | static ThreadPool *pool; | |
74c856e9 PB |
9 | static int active; |
10 | ||
11 | typedef struct { | |
12 | BlockDriverAIOCB *aiocb; | |
13 | int n; | |
14 | int ret; | |
15 | } WorkerTestData; | |
16 | ||
17 | static int worker_cb(void *opaque) | |
18 | { | |
19 | WorkerTestData *data = opaque; | |
20 | return __sync_fetch_and_add(&data->n, 1); | |
21 | } | |
22 | ||
23 | static int long_cb(void *opaque) | |
24 | { | |
25 | WorkerTestData *data = opaque; | |
26 | __sync_fetch_and_add(&data->n, 1); | |
27 | g_usleep(2000000); | |
28 | __sync_fetch_and_add(&data->n, 1); | |
29 | return 0; | |
30 | } | |
31 | ||
32 | static void done_cb(void *opaque, int ret) | |
33 | { | |
34 | WorkerTestData *data = opaque; | |
35 | g_assert_cmpint(data->ret, ==, -EINPROGRESS); | |
36 | data->ret = ret; | |
37 | data->aiocb = NULL; | |
38 | ||
39 | /* Callbacks are serialized, so no need to use atomic ops. */ | |
40 | active--; | |
41 | } | |
42 | ||
8a805c22 SH |
43 | /* Wait until all aio and bh activity has finished */ |
44 | static void qemu_aio_wait_all(void) | |
45 | { | |
c4d9d196 | 46 | while (aio_poll(ctx, true)) { |
8a805c22 SH |
47 | /* Do nothing */ |
48 | } | |
49 | } | |
50 | ||
74c856e9 PB |
51 | static void test_submit(void) |
52 | { | |
53 | WorkerTestData data = { .n = 0 }; | |
c4d9d196 | 54 | thread_pool_submit(pool, worker_cb, &data); |
8a805c22 | 55 | qemu_aio_wait_all(); |
74c856e9 PB |
56 | g_assert_cmpint(data.n, ==, 1); |
57 | } | |
58 | ||
59 | static void test_submit_aio(void) | |
60 | { | |
61 | WorkerTestData data = { .n = 0, .ret = -EINPROGRESS }; | |
c4d9d196 SH |
62 | data.aiocb = thread_pool_submit_aio(pool, worker_cb, &data, |
63 | done_cb, &data); | |
74c856e9 PB |
64 | |
65 | /* The callbacks are not called until after the first wait. */ | |
66 | active = 1; | |
67 | g_assert_cmpint(data.ret, ==, -EINPROGRESS); | |
8a805c22 | 68 | qemu_aio_wait_all(); |
74c856e9 PB |
69 | g_assert_cmpint(active, ==, 0); |
70 | g_assert_cmpint(data.n, ==, 1); | |
71 | g_assert_cmpint(data.ret, ==, 0); | |
72 | } | |
73 | ||
74 | static void co_test_cb(void *opaque) | |
75 | { | |
76 | WorkerTestData *data = opaque; | |
77 | ||
78 | active = 1; | |
79 | data->n = 0; | |
80 | data->ret = -EINPROGRESS; | |
c4d9d196 | 81 | thread_pool_submit_co(pool, worker_cb, data); |
74c856e9 PB |
82 | |
83 | /* The test continues in test_submit_co, after qemu_coroutine_enter... */ | |
84 | ||
85 | g_assert_cmpint(data->n, ==, 1); | |
86 | data->ret = 0; | |
87 | active--; | |
88 | ||
8a805c22 | 89 | /* The test continues in test_submit_co, after qemu_aio_wait_all... */ |
74c856e9 PB |
90 | } |
91 | ||
92 | static void test_submit_co(void) | |
93 | { | |
94 | WorkerTestData data; | |
95 | Coroutine *co = qemu_coroutine_create(co_test_cb); | |
96 | ||
97 | qemu_coroutine_enter(co, &data); | |
98 | ||
99 | /* Back here once the worker has started. */ | |
100 | ||
101 | g_assert_cmpint(active, ==, 1); | |
102 | g_assert_cmpint(data.ret, ==, -EINPROGRESS); | |
103 | ||
8a805c22 | 104 | /* qemu_aio_wait_all will execute the rest of the coroutine. */ |
74c856e9 | 105 | |
8a805c22 | 106 | qemu_aio_wait_all(); |
74c856e9 PB |
107 | |
108 | /* Back here after the coroutine has finished. */ | |
109 | ||
110 | g_assert_cmpint(active, ==, 0); | |
111 | g_assert_cmpint(data.ret, ==, 0); | |
112 | } | |
113 | ||
114 | static void test_submit_many(void) | |
115 | { | |
116 | WorkerTestData data[100]; | |
117 | int i; | |
118 | ||
119 | /* Start more work items than there will be threads. */ | |
120 | for (i = 0; i < 100; i++) { | |
121 | data[i].n = 0; | |
122 | data[i].ret = -EINPROGRESS; | |
c4d9d196 | 123 | thread_pool_submit_aio(pool, worker_cb, &data[i], done_cb, &data[i]); |
74c856e9 PB |
124 | } |
125 | ||
126 | active = 100; | |
127 | while (active > 0) { | |
c4d9d196 | 128 | aio_poll(ctx, true); |
74c856e9 PB |
129 | } |
130 | for (i = 0; i < 100; i++) { | |
131 | g_assert_cmpint(data[i].n, ==, 1); | |
132 | g_assert_cmpint(data[i].ret, ==, 0); | |
133 | } | |
134 | } | |
135 | ||
136 | static void test_cancel(void) | |
137 | { | |
138 | WorkerTestData data[100]; | |
d60478c5 | 139 | int num_canceled; |
74c856e9 PB |
140 | int i; |
141 | ||
142 | /* Start more work items than there will be threads, to ensure | |
143 | * the pool is full. | |
144 | */ | |
145 | test_submit_many(); | |
146 | ||
147 | /* Start long running jobs, to ensure we can cancel some. */ | |
148 | for (i = 0; i < 100; i++) { | |
149 | data[i].n = 0; | |
150 | data[i].ret = -EINPROGRESS; | |
c4d9d196 | 151 | data[i].aiocb = thread_pool_submit_aio(pool, long_cb, &data[i], |
74c856e9 PB |
152 | done_cb, &data[i]); |
153 | } | |
154 | ||
155 | /* Starting the threads may be left to a bottom half. Let it | |
156 | * run, but do not waste too much time... | |
157 | */ | |
158 | active = 100; | |
c4d9d196 SH |
159 | aio_notify(ctx); |
160 | aio_poll(ctx, false); | |
74c856e9 PB |
161 | |
162 | /* Wait some time for the threads to start, with some sanity | |
163 | * testing on the behavior of the scheduler... | |
164 | */ | |
165 | g_assert_cmpint(active, ==, 100); | |
166 | g_usleep(1000000); | |
167 | g_assert_cmpint(active, >, 50); | |
168 | ||
169 | /* Cancel the jobs that haven't been started yet. */ | |
d60478c5 | 170 | num_canceled = 0; |
74c856e9 PB |
171 | for (i = 0; i < 100; i++) { |
172 | if (__sync_val_compare_and_swap(&data[i].n, 0, 3) == 0) { | |
173 | data[i].ret = -ECANCELED; | |
174 | bdrv_aio_cancel(data[i].aiocb); | |
175 | active--; | |
d60478c5 | 176 | num_canceled++; |
74c856e9 PB |
177 | } |
178 | } | |
d60478c5 PB |
179 | g_assert_cmpint(active, >, 0); |
180 | g_assert_cmpint(num_canceled, <, 100); | |
74c856e9 PB |
181 | |
182 | /* Canceling the others will be a blocking operation. */ | |
183 | for (i = 0; i < 100; i++) { | |
184 | if (data[i].n != 3) { | |
185 | bdrv_aio_cancel(data[i].aiocb); | |
186 | } | |
187 | } | |
188 | ||
189 | /* Finish execution and execute any remaining callbacks. */ | |
8a805c22 | 190 | qemu_aio_wait_all(); |
74c856e9 PB |
191 | g_assert_cmpint(active, ==, 0); |
192 | for (i = 0; i < 100; i++) { | |
193 | if (data[i].n == 3) { | |
194 | g_assert_cmpint(data[i].ret, ==, -ECANCELED); | |
195 | g_assert(data[i].aiocb != NULL); | |
196 | } else { | |
197 | g_assert_cmpint(data[i].n, ==, 2); | |
198 | g_assert_cmpint(data[i].ret, ==, 0); | |
199 | g_assert(data[i].aiocb == NULL); | |
200 | } | |
201 | } | |
202 | } | |
203 | ||
204 | int main(int argc, char **argv) | |
205 | { | |
c4d9d196 SH |
206 | int ret; |
207 | ||
208 | ctx = aio_context_new(); | |
209 | pool = aio_get_thread_pool(ctx); | |
74c856e9 PB |
210 | |
211 | g_test_init(&argc, &argv, NULL); | |
212 | g_test_add_func("/thread-pool/submit", test_submit); | |
213 | g_test_add_func("/thread-pool/submit-aio", test_submit_aio); | |
214 | g_test_add_func("/thread-pool/submit-co", test_submit_co); | |
215 | g_test_add_func("/thread-pool/submit-many", test_submit_many); | |
216 | g_test_add_func("/thread-pool/cancel", test_cancel); | |
c4d9d196 SH |
217 | |
218 | ret = g_test_run(); | |
219 | ||
220 | aio_context_unref(ctx); | |
221 | return ret; | |
74c856e9 | 222 | } |