]>
Commit | Line | Data |
---|---|---|
74c856e9 PB |
1 | #include <glib.h> |
2 | #include "qemu-common.h" | |
737e150e PB |
3 | #include "block/aio.h" |
4 | #include "block/thread-pool.h" | |
5 | #include "block/block.h" | |
dae21b98 | 6 | #include "qemu/timer.h" |
2f78e491 | 7 | #include "qemu/error-report.h" |
74c856e9 | 8 | |
c4d9d196 SH |
9 | static AioContext *ctx; |
10 | static ThreadPool *pool; | |
74c856e9 PB |
11 | static int active; |
12 | ||
13 | typedef struct { | |
14 | BlockDriverAIOCB *aiocb; | |
15 | int n; | |
16 | int ret; | |
17 | } WorkerTestData; | |
18 | ||
19 | static int worker_cb(void *opaque) | |
20 | { | |
21 | WorkerTestData *data = opaque; | |
5444e768 | 22 | return atomic_fetch_inc(&data->n); |
74c856e9 PB |
23 | } |
24 | ||
25 | static int long_cb(void *opaque) | |
26 | { | |
27 | WorkerTestData *data = opaque; | |
5444e768 | 28 | atomic_inc(&data->n); |
74c856e9 | 29 | g_usleep(2000000); |
5444e768 | 30 | atomic_inc(&data->n); |
74c856e9 PB |
31 | return 0; |
32 | } | |
33 | ||
34 | static void done_cb(void *opaque, int ret) | |
35 | { | |
36 | WorkerTestData *data = opaque; | |
3391f5e5 | 37 | g_assert(data->ret == -EINPROGRESS || data->ret == -ECANCELED); |
74c856e9 PB |
38 | data->ret = ret; |
39 | data->aiocb = NULL; | |
40 | ||
41 | /* Callbacks are serialized, so no need to use atomic ops. */ | |
42 | active--; | |
43 | } | |
44 | ||
74c856e9 PB |
45 | static void test_submit(void) |
46 | { | |
47 | WorkerTestData data = { .n = 0 }; | |
c4d9d196 | 48 | thread_pool_submit(pool, worker_cb, &data); |
35ecde26 SH |
49 | while (data.n == 0) { |
50 | aio_poll(ctx, true); | |
51 | } | |
74c856e9 PB |
52 | g_assert_cmpint(data.n, ==, 1); |
53 | } | |
54 | ||
55 | static void test_submit_aio(void) | |
56 | { | |
57 | WorkerTestData data = { .n = 0, .ret = -EINPROGRESS }; | |
c4d9d196 SH |
58 | data.aiocb = thread_pool_submit_aio(pool, worker_cb, &data, |
59 | done_cb, &data); | |
74c856e9 PB |
60 | |
61 | /* The callbacks are not called until after the first wait. */ | |
62 | active = 1; | |
63 | g_assert_cmpint(data.ret, ==, -EINPROGRESS); | |
35ecde26 SH |
64 | while (data.ret == -EINPROGRESS) { |
65 | aio_poll(ctx, true); | |
66 | } | |
74c856e9 PB |
67 | g_assert_cmpint(active, ==, 0); |
68 | g_assert_cmpint(data.n, ==, 1); | |
69 | g_assert_cmpint(data.ret, ==, 0); | |
70 | } | |
71 | ||
72 | static void co_test_cb(void *opaque) | |
73 | { | |
74 | WorkerTestData *data = opaque; | |
75 | ||
76 | active = 1; | |
77 | data->n = 0; | |
78 | data->ret = -EINPROGRESS; | |
c4d9d196 | 79 | thread_pool_submit_co(pool, worker_cb, data); |
74c856e9 PB |
80 | |
81 | /* The test continues in test_submit_co, after qemu_coroutine_enter... */ | |
82 | ||
83 | g_assert_cmpint(data->n, ==, 1); | |
84 | data->ret = 0; | |
85 | active--; | |
86 | ||
87f68d31 | 87 | /* The test continues in test_submit_co, after aio_poll... */ |
74c856e9 PB |
88 | } |
89 | ||
90 | static void test_submit_co(void) | |
91 | { | |
92 | WorkerTestData data; | |
93 | Coroutine *co = qemu_coroutine_create(co_test_cb); | |
94 | ||
95 | qemu_coroutine_enter(co, &data); | |
96 | ||
97 | /* Back here once the worker has started. */ | |
98 | ||
99 | g_assert_cmpint(active, ==, 1); | |
100 | g_assert_cmpint(data.ret, ==, -EINPROGRESS); | |
101 | ||
87f68d31 | 102 | /* aio_poll will execute the rest of the coroutine. */ |
74c856e9 | 103 | |
35ecde26 SH |
104 | while (data.ret == -EINPROGRESS) { |
105 | aio_poll(ctx, true); | |
106 | } | |
74c856e9 PB |
107 | |
108 | /* Back here after the coroutine has finished. */ | |
109 | ||
110 | g_assert_cmpint(active, ==, 0); | |
111 | g_assert_cmpint(data.ret, ==, 0); | |
112 | } | |
113 | ||
114 | static void test_submit_many(void) | |
115 | { | |
116 | WorkerTestData data[100]; | |
117 | int i; | |
118 | ||
119 | /* Start more work items than there will be threads. */ | |
120 | for (i = 0; i < 100; i++) { | |
121 | data[i].n = 0; | |
122 | data[i].ret = -EINPROGRESS; | |
c4d9d196 | 123 | thread_pool_submit_aio(pool, worker_cb, &data[i], done_cb, &data[i]); |
74c856e9 PB |
124 | } |
125 | ||
126 | active = 100; | |
127 | while (active > 0) { | |
c4d9d196 | 128 | aio_poll(ctx, true); |
74c856e9 PB |
129 | } |
130 | for (i = 0; i < 100; i++) { | |
131 | g_assert_cmpint(data[i].n, ==, 1); | |
132 | g_assert_cmpint(data[i].ret, ==, 0); | |
133 | } | |
134 | } | |
135 | ||
3391f5e5 | 136 | static void do_test_cancel(bool sync) |
74c856e9 PB |
137 | { |
138 | WorkerTestData data[100]; | |
d60478c5 | 139 | int num_canceled; |
74c856e9 PB |
140 | int i; |
141 | ||
142 | /* Start more work items than there will be threads, to ensure | |
143 | * the pool is full. | |
144 | */ | |
145 | test_submit_many(); | |
146 | ||
147 | /* Start long running jobs, to ensure we can cancel some. */ | |
148 | for (i = 0; i < 100; i++) { | |
149 | data[i].n = 0; | |
150 | data[i].ret = -EINPROGRESS; | |
c4d9d196 | 151 | data[i].aiocb = thread_pool_submit_aio(pool, long_cb, &data[i], |
74c856e9 PB |
152 | done_cb, &data[i]); |
153 | } | |
154 | ||
155 | /* Starting the threads may be left to a bottom half. Let it | |
156 | * run, but do not waste too much time... | |
157 | */ | |
158 | active = 100; | |
c4d9d196 SH |
159 | aio_notify(ctx); |
160 | aio_poll(ctx, false); | |
74c856e9 PB |
161 | |
162 | /* Wait some time for the threads to start, with some sanity | |
163 | * testing on the behavior of the scheduler... | |
164 | */ | |
165 | g_assert_cmpint(active, ==, 100); | |
166 | g_usleep(1000000); | |
167 | g_assert_cmpint(active, >, 50); | |
168 | ||
169 | /* Cancel the jobs that haven't been started yet. */ | |
d60478c5 | 170 | num_canceled = 0; |
74c856e9 | 171 | for (i = 0; i < 100; i++) { |
5444e768 | 172 | if (atomic_cmpxchg(&data[i].n, 0, 3) == 0) { |
74c856e9 | 173 | data[i].ret = -ECANCELED; |
3391f5e5 FZ |
174 | if (sync) { |
175 | bdrv_aio_cancel(data[i].aiocb); | |
176 | } else { | |
177 | bdrv_aio_cancel_async(data[i].aiocb); | |
178 | } | |
d60478c5 | 179 | num_canceled++; |
74c856e9 PB |
180 | } |
181 | } | |
d60478c5 PB |
182 | g_assert_cmpint(active, >, 0); |
183 | g_assert_cmpint(num_canceled, <, 100); | |
74c856e9 | 184 | |
74c856e9 | 185 | for (i = 0; i < 100; i++) { |
271c0f68 | 186 | if (data[i].aiocb && data[i].n != 3) { |
3391f5e5 FZ |
187 | if (sync) { |
188 | /* Canceling the others will be a blocking operation. */ | |
189 | bdrv_aio_cancel(data[i].aiocb); | |
190 | } else { | |
191 | bdrv_aio_cancel_async(data[i].aiocb); | |
192 | } | |
74c856e9 PB |
193 | } |
194 | } | |
195 | ||
196 | /* Finish execution and execute any remaining callbacks. */ | |
35ecde26 SH |
197 | while (active > 0) { |
198 | aio_poll(ctx, true); | |
199 | } | |
74c856e9 PB |
200 | g_assert_cmpint(active, ==, 0); |
201 | for (i = 0; i < 100; i++) { | |
202 | if (data[i].n == 3) { | |
203 | g_assert_cmpint(data[i].ret, ==, -ECANCELED); | |
3391f5e5 | 204 | g_assert(data[i].aiocb == NULL); |
74c856e9 PB |
205 | } else { |
206 | g_assert_cmpint(data[i].n, ==, 2); | |
3391f5e5 | 207 | g_assert(data[i].ret == 0 || data[i].ret == -ECANCELED); |
74c856e9 PB |
208 | g_assert(data[i].aiocb == NULL); |
209 | } | |
210 | } | |
211 | } | |
212 | ||
3391f5e5 FZ |
213 | static void test_cancel(void) |
214 | { | |
215 | do_test_cancel(true); | |
216 | } | |
217 | ||
218 | static void test_cancel_async(void) | |
219 | { | |
220 | do_test_cancel(false); | |
221 | } | |
222 | ||
74c856e9 PB |
223 | int main(int argc, char **argv) |
224 | { | |
c4d9d196 | 225 | int ret; |
2f78e491 | 226 | Error *local_error = NULL; |
c4d9d196 | 227 | |
dae21b98 AB |
228 | init_clocks(); |
229 | ||
2f78e491 CN |
230 | ctx = aio_context_new(&local_error); |
231 | if (!ctx) { | |
232 | error_report("Failed to create AIO Context: '%s'", | |
233 | error_get_pretty(local_error)); | |
234 | error_free(local_error); | |
235 | exit(1); | |
236 | } | |
c4d9d196 | 237 | pool = aio_get_thread_pool(ctx); |
74c856e9 PB |
238 | |
239 | g_test_init(&argc, &argv, NULL); | |
240 | g_test_add_func("/thread-pool/submit", test_submit); | |
241 | g_test_add_func("/thread-pool/submit-aio", test_submit_aio); | |
242 | g_test_add_func("/thread-pool/submit-co", test_submit_co); | |
243 | g_test_add_func("/thread-pool/submit-many", test_submit_many); | |
244 | g_test_add_func("/thread-pool/cancel", test_cancel); | |
3391f5e5 | 245 | g_test_add_func("/thread-pool/cancel-async", test_cancel_async); |
c4d9d196 SH |
246 | |
247 | ret = g_test_run(); | |
248 | ||
249 | aio_context_unref(ctx); | |
250 | return ret; | |
74c856e9 | 251 | } |