]>
Commit | Line | Data |
---|---|---|
881cfd17 KW |
1 | /* |
2 | * Block node draining tests | |
3 | * | |
4 | * Copyright (c) 2017 Kevin Wolf <[email protected]> | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include "qemu/osdep.h" | |
26 | #include "block/block.h" | |
7253220d | 27 | #include "block/blockjob_int.h" |
881cfd17 KW |
28 | #include "sysemu/block-backend.h" |
29 | #include "qapi/error.h" | |
db725815 | 30 | #include "qemu/main-loop.h" |
bb675689 KW |
31 | #include "iothread.h" |
32 | ||
33 | static QemuEvent done_event; | |
881cfd17 KW |
34 | |
35 | typedef struct BDRVTestState { | |
36 | int drain_count; | |
bb675689 | 37 | AioContext *bh_indirection_ctx; |
57320ca9 | 38 | bool sleep_in_drain_begin; |
881cfd17 KW |
39 | } BDRVTestState; |
40 | ||
41 | static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs) | |
42 | { | |
43 | BDRVTestState *s = bs->opaque; | |
44 | s->drain_count++; | |
57320ca9 KW |
45 | if (s->sleep_in_drain_begin) { |
46 | qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000); | |
47 | } | |
881cfd17 KW |
48 | } |
49 | ||
50 | static void coroutine_fn bdrv_test_co_drain_end(BlockDriverState *bs) | |
51 | { | |
52 | BDRVTestState *s = bs->opaque; | |
53 | s->drain_count--; | |
54 | } | |
55 | ||
56 | static void bdrv_test_close(BlockDriverState *bs) | |
57 | { | |
58 | BDRVTestState *s = bs->opaque; | |
59 | g_assert_cmpint(s->drain_count, >, 0); | |
60 | } | |
61 | ||
bb675689 KW |
62 | static void co_reenter_bh(void *opaque) |
63 | { | |
64 | aio_co_wake(opaque); | |
65 | } | |
66 | ||
881cfd17 KW |
67 | static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs, |
68 | uint64_t offset, uint64_t bytes, | |
69 | QEMUIOVector *qiov, int flags) | |
70 | { | |
bb675689 KW |
71 | BDRVTestState *s = bs->opaque; |
72 | ||
881cfd17 KW |
73 | /* We want this request to stay until the polling loop in drain waits for |
74 | * it to complete. We need to sleep a while as bdrv_drain_invoke() comes | |
75 | * first and polls its result, too, but it shouldn't accidentally complete | |
76 | * this request yet. */ | |
77 | qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000); | |
78 | ||
bb675689 KW |
79 | if (s->bh_indirection_ctx) { |
80 | aio_bh_schedule_oneshot(s->bh_indirection_ctx, co_reenter_bh, | |
81 | qemu_coroutine_self()); | |
82 | qemu_coroutine_yield(); | |
83 | } | |
84 | ||
881cfd17 KW |
85 | return 0; |
86 | } | |
87 | ||
57320ca9 KW |
88 | static void bdrv_test_child_perm(BlockDriverState *bs, BdrvChild *c, |
89 | const BdrvChildRole *role, | |
90 | BlockReopenQueue *reopen_queue, | |
91 | uint64_t perm, uint64_t shared, | |
92 | uint64_t *nperm, uint64_t *nshared) | |
93 | { | |
94 | /* bdrv_format_default_perms() accepts only these two, so disguise | |
95 | * detach_by_driver_cb_role as one of them. */ | |
96 | if (role != &child_file && role != &child_backing) { | |
97 | role = &child_file; | |
98 | } | |
99 | ||
100 | bdrv_format_default_perms(bs, c, role, reopen_queue, perm, shared, | |
101 | nperm, nshared); | |
102 | } | |
103 | ||
9746b35c HR |
104 | static int bdrv_test_change_backing_file(BlockDriverState *bs, |
105 | const char *backing_file, | |
106 | const char *backing_fmt) | |
107 | { | |
108 | return 0; | |
109 | } | |
110 | ||
881cfd17 KW |
111 | static BlockDriver bdrv_test = { |
112 | .format_name = "test", | |
113 | .instance_size = sizeof(BDRVTestState), | |
114 | ||
115 | .bdrv_close = bdrv_test_close, | |
116 | .bdrv_co_preadv = bdrv_test_co_preadv, | |
117 | ||
118 | .bdrv_co_drain_begin = bdrv_test_co_drain_begin, | |
119 | .bdrv_co_drain_end = bdrv_test_co_drain_end, | |
86e1c840 | 120 | |
57320ca9 | 121 | .bdrv_child_perm = bdrv_test_child_perm, |
9746b35c HR |
122 | |
123 | .bdrv_change_backing_file = bdrv_test_change_backing_file, | |
881cfd17 KW |
124 | }; |
125 | ||
126 | static void aio_ret_cb(void *opaque, int ret) | |
127 | { | |
128 | int *aio_ret = opaque; | |
129 | *aio_ret = ret; | |
130 | } | |
131 | ||
0582eb10 KW |
132 | typedef struct CallInCoroutineData { |
133 | void (*entry)(void); | |
134 | bool done; | |
135 | } CallInCoroutineData; | |
136 | ||
137 | static coroutine_fn void call_in_coroutine_entry(void *opaque) | |
138 | { | |
139 | CallInCoroutineData *data = opaque; | |
140 | ||
141 | data->entry(); | |
142 | data->done = true; | |
143 | } | |
144 | ||
145 | static void call_in_coroutine(void (*entry)(void)) | |
146 | { | |
147 | Coroutine *co; | |
148 | CallInCoroutineData data = { | |
149 | .entry = entry, | |
150 | .done = false, | |
151 | }; | |
152 | ||
153 | co = qemu_coroutine_create(call_in_coroutine_entry, &data); | |
154 | qemu_coroutine_enter(co); | |
155 | while (!data.done) { | |
156 | aio_poll(qemu_get_aio_context(), true); | |
157 | } | |
158 | } | |
159 | ||
86e1c840 KW |
160 | enum drain_type { |
161 | BDRV_DRAIN_ALL, | |
162 | BDRV_DRAIN, | |
d2a85d0f | 163 | BDRV_SUBTREE_DRAIN, |
6c429a6a | 164 | DRAIN_TYPE_MAX, |
86e1c840 KW |
165 | }; |
166 | ||
167 | static void do_drain_begin(enum drain_type drain_type, BlockDriverState *bs) | |
168 | { | |
169 | switch (drain_type) { | |
170 | case BDRV_DRAIN_ALL: bdrv_drain_all_begin(); break; | |
171 | case BDRV_DRAIN: bdrv_drained_begin(bs); break; | |
d2a85d0f | 172 | case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_begin(bs); break; |
86e1c840 KW |
173 | default: g_assert_not_reached(); |
174 | } | |
175 | } | |
176 | ||
177 | static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs) | |
178 | { | |
179 | switch (drain_type) { | |
180 | case BDRV_DRAIN_ALL: bdrv_drain_all_end(); break; | |
181 | case BDRV_DRAIN: bdrv_drained_end(bs); break; | |
d2a85d0f | 182 | case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_end(bs); break; |
86e1c840 KW |
183 | default: g_assert_not_reached(); |
184 | } | |
185 | } | |
186 | ||
f62c1729 KW |
187 | static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs) |
188 | { | |
189 | if (drain_type != BDRV_DRAIN_ALL) { | |
190 | aio_context_acquire(bdrv_get_aio_context(bs)); | |
191 | } | |
192 | do_drain_begin(drain_type, bs); | |
193 | if (drain_type != BDRV_DRAIN_ALL) { | |
194 | aio_context_release(bdrv_get_aio_context(bs)); | |
195 | } | |
196 | } | |
197 | ||
198 | static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs) | |
199 | { | |
200 | if (drain_type != BDRV_DRAIN_ALL) { | |
201 | aio_context_acquire(bdrv_get_aio_context(bs)); | |
202 | } | |
203 | do_drain_end(drain_type, bs); | |
204 | if (drain_type != BDRV_DRAIN_ALL) { | |
205 | aio_context_release(bdrv_get_aio_context(bs)); | |
206 | } | |
207 | } | |
208 | ||
86e1c840 | 209 | static void test_drv_cb_common(enum drain_type drain_type, bool recursive) |
881cfd17 KW |
210 | { |
211 | BlockBackend *blk; | |
86e1c840 KW |
212 | BlockDriverState *bs, *backing; |
213 | BDRVTestState *s, *backing_s; | |
881cfd17 KW |
214 | BlockAIOCB *acb; |
215 | int aio_ret; | |
216 | ||
405d8fe0 | 217 | QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0); |
881cfd17 | 218 | |
d861ab3a | 219 | blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
881cfd17 KW |
220 | bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, |
221 | &error_abort); | |
222 | s = bs->opaque; | |
223 | blk_insert_bs(blk, bs, &error_abort); | |
224 | ||
86e1c840 KW |
225 | backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); |
226 | backing_s = backing->opaque; | |
227 | bdrv_set_backing_hd(bs, backing, &error_abort); | |
228 | ||
881cfd17 KW |
229 | /* Simple bdrv_drain_all_begin/end pair, check that CBs are called */ |
230 | g_assert_cmpint(s->drain_count, ==, 0); | |
86e1c840 KW |
231 | g_assert_cmpint(backing_s->drain_count, ==, 0); |
232 | ||
233 | do_drain_begin(drain_type, bs); | |
234 | ||
881cfd17 | 235 | g_assert_cmpint(s->drain_count, ==, 1); |
86e1c840 KW |
236 | g_assert_cmpint(backing_s->drain_count, ==, !!recursive); |
237 | ||
238 | do_drain_end(drain_type, bs); | |
239 | ||
881cfd17 | 240 | g_assert_cmpint(s->drain_count, ==, 0); |
86e1c840 | 241 | g_assert_cmpint(backing_s->drain_count, ==, 0); |
881cfd17 KW |
242 | |
243 | /* Now do the same while a request is pending */ | |
244 | aio_ret = -EINPROGRESS; | |
245 | acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret); | |
246 | g_assert(acb != NULL); | |
247 | g_assert_cmpint(aio_ret, ==, -EINPROGRESS); | |
248 | ||
249 | g_assert_cmpint(s->drain_count, ==, 0); | |
86e1c840 KW |
250 | g_assert_cmpint(backing_s->drain_count, ==, 0); |
251 | ||
252 | do_drain_begin(drain_type, bs); | |
253 | ||
881cfd17 KW |
254 | g_assert_cmpint(aio_ret, ==, 0); |
255 | g_assert_cmpint(s->drain_count, ==, 1); | |
86e1c840 KW |
256 | g_assert_cmpint(backing_s->drain_count, ==, !!recursive); |
257 | ||
258 | do_drain_end(drain_type, bs); | |
259 | ||
881cfd17 | 260 | g_assert_cmpint(s->drain_count, ==, 0); |
86e1c840 | 261 | g_assert_cmpint(backing_s->drain_count, ==, 0); |
881cfd17 | 262 | |
86e1c840 | 263 | bdrv_unref(backing); |
881cfd17 KW |
264 | bdrv_unref(bs); |
265 | blk_unref(blk); | |
266 | } | |
267 | ||
86e1c840 KW |
268 | static void test_drv_cb_drain_all(void) |
269 | { | |
270 | test_drv_cb_common(BDRV_DRAIN_ALL, true); | |
271 | } | |
272 | ||
273 | static void test_drv_cb_drain(void) | |
274 | { | |
275 | test_drv_cb_common(BDRV_DRAIN, false); | |
276 | } | |
277 | ||
d2a85d0f KW |
278 | static void test_drv_cb_drain_subtree(void) |
279 | { | |
280 | test_drv_cb_common(BDRV_SUBTREE_DRAIN, true); | |
281 | } | |
282 | ||
6d0252f2 KW |
283 | static void test_drv_cb_co_drain_all(void) |
284 | { | |
285 | call_in_coroutine(test_drv_cb_drain_all); | |
286 | } | |
287 | ||
0582eb10 KW |
288 | static void test_drv_cb_co_drain(void) |
289 | { | |
290 | call_in_coroutine(test_drv_cb_drain); | |
291 | } | |
292 | ||
293 | static void test_drv_cb_co_drain_subtree(void) | |
294 | { | |
295 | call_in_coroutine(test_drv_cb_drain_subtree); | |
296 | } | |
297 | ||
89a6ceab KW |
298 | static void test_quiesce_common(enum drain_type drain_type, bool recursive) |
299 | { | |
300 | BlockBackend *blk; | |
301 | BlockDriverState *bs, *backing; | |
302 | ||
d861ab3a | 303 | blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
89a6ceab KW |
304 | bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, |
305 | &error_abort); | |
306 | blk_insert_bs(blk, bs, &error_abort); | |
307 | ||
308 | backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); | |
309 | bdrv_set_backing_hd(bs, backing, &error_abort); | |
310 | ||
311 | g_assert_cmpint(bs->quiesce_counter, ==, 0); | |
312 | g_assert_cmpint(backing->quiesce_counter, ==, 0); | |
313 | ||
314 | do_drain_begin(drain_type, bs); | |
315 | ||
316 | g_assert_cmpint(bs->quiesce_counter, ==, 1); | |
317 | g_assert_cmpint(backing->quiesce_counter, ==, !!recursive); | |
318 | ||
319 | do_drain_end(drain_type, bs); | |
320 | ||
321 | g_assert_cmpint(bs->quiesce_counter, ==, 0); | |
322 | g_assert_cmpint(backing->quiesce_counter, ==, 0); | |
323 | ||
324 | bdrv_unref(backing); | |
325 | bdrv_unref(bs); | |
326 | blk_unref(blk); | |
327 | } | |
328 | ||
329 | static void test_quiesce_drain_all(void) | |
330 | { | |
79ab8b21 | 331 | test_quiesce_common(BDRV_DRAIN_ALL, true); |
89a6ceab KW |
332 | } |
333 | ||
334 | static void test_quiesce_drain(void) | |
335 | { | |
336 | test_quiesce_common(BDRV_DRAIN, false); | |
337 | } | |
338 | ||
d2a85d0f KW |
339 | static void test_quiesce_drain_subtree(void) |
340 | { | |
341 | test_quiesce_common(BDRV_SUBTREE_DRAIN, true); | |
342 | } | |
343 | ||
6d0252f2 KW |
344 | static void test_quiesce_co_drain_all(void) |
345 | { | |
346 | call_in_coroutine(test_quiesce_drain_all); | |
347 | } | |
348 | ||
0582eb10 KW |
349 | static void test_quiesce_co_drain(void) |
350 | { | |
351 | call_in_coroutine(test_quiesce_drain); | |
352 | } | |
353 | ||
354 | static void test_quiesce_co_drain_subtree(void) | |
355 | { | |
356 | call_in_coroutine(test_quiesce_drain_subtree); | |
357 | } | |
358 | ||
6c429a6a KW |
359 | static void test_nested(void) |
360 | { | |
361 | BlockBackend *blk; | |
362 | BlockDriverState *bs, *backing; | |
363 | BDRVTestState *s, *backing_s; | |
364 | enum drain_type outer, inner; | |
365 | ||
d861ab3a | 366 | blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
6c429a6a KW |
367 | bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, |
368 | &error_abort); | |
369 | s = bs->opaque; | |
370 | blk_insert_bs(blk, bs, &error_abort); | |
371 | ||
372 | backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); | |
373 | backing_s = backing->opaque; | |
374 | bdrv_set_backing_hd(bs, backing, &error_abort); | |
375 | ||
376 | for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) { | |
377 | for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) { | |
79ab8b21 | 378 | int backing_quiesce = (outer != BDRV_DRAIN) + |
6c429a6a KW |
379 | (inner != BDRV_DRAIN); |
380 | ||
381 | g_assert_cmpint(bs->quiesce_counter, ==, 0); | |
382 | g_assert_cmpint(backing->quiesce_counter, ==, 0); | |
383 | g_assert_cmpint(s->drain_count, ==, 0); | |
384 | g_assert_cmpint(backing_s->drain_count, ==, 0); | |
385 | ||
386 | do_drain_begin(outer, bs); | |
387 | do_drain_begin(inner, bs); | |
388 | ||
79ab8b21 | 389 | g_assert_cmpint(bs->quiesce_counter, ==, 2); |
6c429a6a KW |
390 | g_assert_cmpint(backing->quiesce_counter, ==, backing_quiesce); |
391 | g_assert_cmpint(s->drain_count, ==, 2); | |
79ab8b21 | 392 | g_assert_cmpint(backing_s->drain_count, ==, backing_quiesce); |
6c429a6a KW |
393 | |
394 | do_drain_end(inner, bs); | |
395 | do_drain_end(outer, bs); | |
396 | ||
397 | g_assert_cmpint(bs->quiesce_counter, ==, 0); | |
398 | g_assert_cmpint(backing->quiesce_counter, ==, 0); | |
399 | g_assert_cmpint(s->drain_count, ==, 0); | |
400 | g_assert_cmpint(backing_s->drain_count, ==, 0); | |
401 | } | |
402 | } | |
403 | ||
404 | bdrv_unref(backing); | |
405 | bdrv_unref(bs); | |
406 | blk_unref(blk); | |
407 | } | |
408 | ||
27e64474 KW |
409 | static void test_multiparent(void) |
410 | { | |
411 | BlockBackend *blk_a, *blk_b; | |
412 | BlockDriverState *bs_a, *bs_b, *backing; | |
413 | BDRVTestState *a_s, *b_s, *backing_s; | |
414 | ||
d861ab3a | 415 | blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
27e64474 KW |
416 | bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR, |
417 | &error_abort); | |
418 | a_s = bs_a->opaque; | |
419 | blk_insert_bs(blk_a, bs_a, &error_abort); | |
420 | ||
d861ab3a | 421 | blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
27e64474 KW |
422 | bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR, |
423 | &error_abort); | |
424 | b_s = bs_b->opaque; | |
425 | blk_insert_bs(blk_b, bs_b, &error_abort); | |
426 | ||
427 | backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); | |
428 | backing_s = backing->opaque; | |
429 | bdrv_set_backing_hd(bs_a, backing, &error_abort); | |
430 | bdrv_set_backing_hd(bs_b, backing, &error_abort); | |
431 | ||
432 | g_assert_cmpint(bs_a->quiesce_counter, ==, 0); | |
433 | g_assert_cmpint(bs_b->quiesce_counter, ==, 0); | |
434 | g_assert_cmpint(backing->quiesce_counter, ==, 0); | |
435 | g_assert_cmpint(a_s->drain_count, ==, 0); | |
436 | g_assert_cmpint(b_s->drain_count, ==, 0); | |
437 | g_assert_cmpint(backing_s->drain_count, ==, 0); | |
438 | ||
439 | do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a); | |
440 | ||
441 | g_assert_cmpint(bs_a->quiesce_counter, ==, 1); | |
442 | g_assert_cmpint(bs_b->quiesce_counter, ==, 1); | |
443 | g_assert_cmpint(backing->quiesce_counter, ==, 1); | |
444 | g_assert_cmpint(a_s->drain_count, ==, 1); | |
445 | g_assert_cmpint(b_s->drain_count, ==, 1); | |
446 | g_assert_cmpint(backing_s->drain_count, ==, 1); | |
447 | ||
448 | do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b); | |
449 | ||
450 | g_assert_cmpint(bs_a->quiesce_counter, ==, 2); | |
451 | g_assert_cmpint(bs_b->quiesce_counter, ==, 2); | |
452 | g_assert_cmpint(backing->quiesce_counter, ==, 2); | |
453 | g_assert_cmpint(a_s->drain_count, ==, 2); | |
454 | g_assert_cmpint(b_s->drain_count, ==, 2); | |
455 | g_assert_cmpint(backing_s->drain_count, ==, 2); | |
456 | ||
457 | do_drain_end(BDRV_SUBTREE_DRAIN, bs_b); | |
458 | ||
459 | g_assert_cmpint(bs_a->quiesce_counter, ==, 1); | |
460 | g_assert_cmpint(bs_b->quiesce_counter, ==, 1); | |
461 | g_assert_cmpint(backing->quiesce_counter, ==, 1); | |
462 | g_assert_cmpint(a_s->drain_count, ==, 1); | |
463 | g_assert_cmpint(b_s->drain_count, ==, 1); | |
464 | g_assert_cmpint(backing_s->drain_count, ==, 1); | |
465 | ||
466 | do_drain_end(BDRV_SUBTREE_DRAIN, bs_a); | |
467 | ||
468 | g_assert_cmpint(bs_a->quiesce_counter, ==, 0); | |
469 | g_assert_cmpint(bs_b->quiesce_counter, ==, 0); | |
470 | g_assert_cmpint(backing->quiesce_counter, ==, 0); | |
471 | g_assert_cmpint(a_s->drain_count, ==, 0); | |
472 | g_assert_cmpint(b_s->drain_count, ==, 0); | |
473 | g_assert_cmpint(backing_s->drain_count, ==, 0); | |
474 | ||
475 | bdrv_unref(backing); | |
476 | bdrv_unref(bs_a); | |
477 | bdrv_unref(bs_b); | |
478 | blk_unref(blk_a); | |
479 | blk_unref(blk_b); | |
480 | } | |
481 | ||
19f7a7e5 | 482 | static void test_graph_change_drain_subtree(void) |
acebcf8d KW |
483 | { |
484 | BlockBackend *blk_a, *blk_b; | |
485 | BlockDriverState *bs_a, *bs_b, *backing; | |
486 | BDRVTestState *a_s, *b_s, *backing_s; | |
487 | ||
d861ab3a | 488 | blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
acebcf8d KW |
489 | bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR, |
490 | &error_abort); | |
491 | a_s = bs_a->opaque; | |
492 | blk_insert_bs(blk_a, bs_a, &error_abort); | |
493 | ||
d861ab3a | 494 | blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
acebcf8d KW |
495 | bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR, |
496 | &error_abort); | |
497 | b_s = bs_b->opaque; | |
498 | blk_insert_bs(blk_b, bs_b, &error_abort); | |
499 | ||
500 | backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort); | |
501 | backing_s = backing->opaque; | |
502 | bdrv_set_backing_hd(bs_a, backing, &error_abort); | |
503 | ||
504 | g_assert_cmpint(bs_a->quiesce_counter, ==, 0); | |
505 | g_assert_cmpint(bs_b->quiesce_counter, ==, 0); | |
506 | g_assert_cmpint(backing->quiesce_counter, ==, 0); | |
507 | g_assert_cmpint(a_s->drain_count, ==, 0); | |
508 | g_assert_cmpint(b_s->drain_count, ==, 0); | |
509 | g_assert_cmpint(backing_s->drain_count, ==, 0); | |
510 | ||
511 | do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a); | |
512 | do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a); | |
513 | do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a); | |
514 | do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b); | |
515 | do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b); | |
516 | ||
517 | bdrv_set_backing_hd(bs_b, backing, &error_abort); | |
518 | g_assert_cmpint(bs_a->quiesce_counter, ==, 5); | |
519 | g_assert_cmpint(bs_b->quiesce_counter, ==, 5); | |
520 | g_assert_cmpint(backing->quiesce_counter, ==, 5); | |
521 | g_assert_cmpint(a_s->drain_count, ==, 5); | |
522 | g_assert_cmpint(b_s->drain_count, ==, 5); | |
523 | g_assert_cmpint(backing_s->drain_count, ==, 5); | |
524 | ||
525 | bdrv_set_backing_hd(bs_b, NULL, &error_abort); | |
526 | g_assert_cmpint(bs_a->quiesce_counter, ==, 3); | |
527 | g_assert_cmpint(bs_b->quiesce_counter, ==, 2); | |
528 | g_assert_cmpint(backing->quiesce_counter, ==, 3); | |
529 | g_assert_cmpint(a_s->drain_count, ==, 3); | |
530 | g_assert_cmpint(b_s->drain_count, ==, 2); | |
531 | g_assert_cmpint(backing_s->drain_count, ==, 3); | |
532 | ||
533 | bdrv_set_backing_hd(bs_b, backing, &error_abort); | |
534 | g_assert_cmpint(bs_a->quiesce_counter, ==, 5); | |
535 | g_assert_cmpint(bs_b->quiesce_counter, ==, 5); | |
536 | g_assert_cmpint(backing->quiesce_counter, ==, 5); | |
537 | g_assert_cmpint(a_s->drain_count, ==, 5); | |
538 | g_assert_cmpint(b_s->drain_count, ==, 5); | |
539 | g_assert_cmpint(backing_s->drain_count, ==, 5); | |
540 | ||
541 | do_drain_end(BDRV_SUBTREE_DRAIN, bs_b); | |
542 | do_drain_end(BDRV_SUBTREE_DRAIN, bs_b); | |
543 | do_drain_end(BDRV_SUBTREE_DRAIN, bs_a); | |
544 | do_drain_end(BDRV_SUBTREE_DRAIN, bs_a); | |
545 | do_drain_end(BDRV_SUBTREE_DRAIN, bs_a); | |
546 | ||
547 | g_assert_cmpint(bs_a->quiesce_counter, ==, 0); | |
548 | g_assert_cmpint(bs_b->quiesce_counter, ==, 0); | |
549 | g_assert_cmpint(backing->quiesce_counter, ==, 0); | |
550 | g_assert_cmpint(a_s->drain_count, ==, 0); | |
551 | g_assert_cmpint(b_s->drain_count, ==, 0); | |
552 | g_assert_cmpint(backing_s->drain_count, ==, 0); | |
553 | ||
554 | bdrv_unref(backing); | |
555 | bdrv_unref(bs_a); | |
556 | bdrv_unref(bs_b); | |
557 | blk_unref(blk_a); | |
558 | blk_unref(blk_b); | |
559 | } | |
560 | ||
19f7a7e5 KW |
561 | static void test_graph_change_drain_all(void) |
562 | { | |
563 | BlockBackend *blk_a, *blk_b; | |
564 | BlockDriverState *bs_a, *bs_b; | |
565 | BDRVTestState *a_s, *b_s; | |
566 | ||
567 | /* Create node A with a BlockBackend */ | |
d861ab3a | 568 | blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
19f7a7e5 KW |
569 | bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR, |
570 | &error_abort); | |
571 | a_s = bs_a->opaque; | |
572 | blk_insert_bs(blk_a, bs_a, &error_abort); | |
573 | ||
574 | g_assert_cmpint(bs_a->quiesce_counter, ==, 0); | |
575 | g_assert_cmpint(a_s->drain_count, ==, 0); | |
576 | ||
577 | /* Call bdrv_drain_all_begin() */ | |
578 | bdrv_drain_all_begin(); | |
579 | ||
580 | g_assert_cmpint(bs_a->quiesce_counter, ==, 1); | |
581 | g_assert_cmpint(a_s->drain_count, ==, 1); | |
582 | ||
583 | /* Create node B with a BlockBackend */ | |
d861ab3a | 584 | blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
19f7a7e5 KW |
585 | bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR, |
586 | &error_abort); | |
587 | b_s = bs_b->opaque; | |
588 | blk_insert_bs(blk_b, bs_b, &error_abort); | |
589 | ||
590 | g_assert_cmpint(bs_a->quiesce_counter, ==, 1); | |
591 | g_assert_cmpint(bs_b->quiesce_counter, ==, 1); | |
592 | g_assert_cmpint(a_s->drain_count, ==, 1); | |
593 | g_assert_cmpint(b_s->drain_count, ==, 1); | |
594 | ||
595 | /* Unref and finally delete node A */ | |
596 | blk_unref(blk_a); | |
597 | ||
598 | g_assert_cmpint(bs_a->quiesce_counter, ==, 1); | |
599 | g_assert_cmpint(bs_b->quiesce_counter, ==, 1); | |
600 | g_assert_cmpint(a_s->drain_count, ==, 1); | |
601 | g_assert_cmpint(b_s->drain_count, ==, 1); | |
602 | ||
603 | bdrv_unref(bs_a); | |
604 | ||
605 | g_assert_cmpint(bs_b->quiesce_counter, ==, 1); | |
606 | g_assert_cmpint(b_s->drain_count, ==, 1); | |
607 | ||
608 | /* End the drained section */ | |
609 | bdrv_drain_all_end(); | |
610 | ||
611 | g_assert_cmpint(bs_b->quiesce_counter, ==, 0); | |
612 | g_assert_cmpint(b_s->drain_count, ==, 0); | |
613 | ||
614 | bdrv_unref(bs_b); | |
615 | blk_unref(blk_b); | |
616 | } | |
617 | ||
bb675689 KW |
618 | struct test_iothread_data { |
619 | BlockDriverState *bs; | |
620 | enum drain_type drain_type; | |
621 | int *aio_ret; | |
622 | }; | |
623 | ||
624 | static void test_iothread_drain_entry(void *opaque) | |
625 | { | |
626 | struct test_iothread_data *data = opaque; | |
627 | ||
628 | aio_context_acquire(bdrv_get_aio_context(data->bs)); | |
629 | do_drain_begin(data->drain_type, data->bs); | |
630 | g_assert_cmpint(*data->aio_ret, ==, 0); | |
631 | do_drain_end(data->drain_type, data->bs); | |
632 | aio_context_release(bdrv_get_aio_context(data->bs)); | |
633 | ||
634 | qemu_event_set(&done_event); | |
635 | } | |
636 | ||
637 | static void test_iothread_aio_cb(void *opaque, int ret) | |
638 | { | |
639 | int *aio_ret = opaque; | |
640 | *aio_ret = ret; | |
641 | qemu_event_set(&done_event); | |
642 | } | |
643 | ||
ecc1a5c7 KW |
644 | static void test_iothread_main_thread_bh(void *opaque) |
645 | { | |
646 | struct test_iothread_data *data = opaque; | |
647 | ||
648 | /* Test that the AioContext is not yet locked in a random BH that is | |
649 | * executed during drain, otherwise this would deadlock. */ | |
650 | aio_context_acquire(bdrv_get_aio_context(data->bs)); | |
651 | bdrv_flush(data->bs); | |
652 | aio_context_release(bdrv_get_aio_context(data->bs)); | |
653 | } | |
654 | ||
bb675689 KW |
655 | /* |
656 | * Starts an AIO request on a BDS that runs in the AioContext of iothread 1. | |
657 | * The request involves a BH on iothread 2 before it can complete. | |
658 | * | |
659 | * @drain_thread = 0 means that do_drain_begin/end are called from the main | |
660 | * thread, @drain_thread = 1 means that they are called from iothread 1. Drain | |
661 | * for this BDS cannot be called from iothread 2 because only the main thread | |
662 | * may do cross-AioContext polling. | |
663 | */ | |
664 | static void test_iothread_common(enum drain_type drain_type, int drain_thread) | |
665 | { | |
666 | BlockBackend *blk; | |
667 | BlockDriverState *bs; | |
668 | BDRVTestState *s; | |
669 | BlockAIOCB *acb; | |
670 | int aio_ret; | |
671 | struct test_iothread_data data; | |
672 | ||
673 | IOThread *a = iothread_new(); | |
674 | IOThread *b = iothread_new(); | |
675 | AioContext *ctx_a = iothread_get_aio_context(a); | |
676 | AioContext *ctx_b = iothread_get_aio_context(b); | |
677 | ||
405d8fe0 | 678 | QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0); |
bb675689 KW |
679 | |
680 | /* bdrv_drain_all() may only be called from the main loop thread */ | |
681 | if (drain_type == BDRV_DRAIN_ALL && drain_thread != 0) { | |
682 | goto out; | |
683 | } | |
684 | ||
d861ab3a | 685 | blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
bb675689 KW |
686 | bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, |
687 | &error_abort); | |
688 | s = bs->opaque; | |
689 | blk_insert_bs(blk, bs, &error_abort); | |
cf312932 | 690 | blk_set_disable_request_queuing(blk, true); |
bb675689 | 691 | |
97896a48 | 692 | blk_set_aio_context(blk, ctx_a, &error_abort); |
bb675689 KW |
693 | aio_context_acquire(ctx_a); |
694 | ||
695 | s->bh_indirection_ctx = ctx_b; | |
696 | ||
697 | aio_ret = -EINPROGRESS; | |
dd353157 KW |
698 | qemu_event_reset(&done_event); |
699 | ||
bb675689 KW |
700 | if (drain_thread == 0) { |
701 | acb = blk_aio_preadv(blk, 0, &qiov, 0, test_iothread_aio_cb, &aio_ret); | |
702 | } else { | |
703 | acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret); | |
704 | } | |
705 | g_assert(acb != NULL); | |
706 | g_assert_cmpint(aio_ret, ==, -EINPROGRESS); | |
707 | ||
708 | aio_context_release(ctx_a); | |
709 | ||
710 | data = (struct test_iothread_data) { | |
711 | .bs = bs, | |
712 | .drain_type = drain_type, | |
713 | .aio_ret = &aio_ret, | |
714 | }; | |
715 | ||
716 | switch (drain_thread) { | |
717 | case 0: | |
718 | if (drain_type != BDRV_DRAIN_ALL) { | |
719 | aio_context_acquire(ctx_a); | |
720 | } | |
721 | ||
ecc1a5c7 KW |
722 | aio_bh_schedule_oneshot(ctx_a, test_iothread_main_thread_bh, &data); |
723 | ||
bb675689 KW |
724 | /* The request is running on the IOThread a. Draining its block device |
725 | * will make sure that it has completed as far as the BDS is concerned, | |
726 | * but the drain in this thread can continue immediately after | |
727 | * bdrv_dec_in_flight() and aio_ret might be assigned only slightly | |
728 | * later. */ | |
bb675689 KW |
729 | do_drain_begin(drain_type, bs); |
730 | g_assert_cmpint(bs->in_flight, ==, 0); | |
731 | ||
732 | if (drain_type != BDRV_DRAIN_ALL) { | |
733 | aio_context_release(ctx_a); | |
734 | } | |
735 | qemu_event_wait(&done_event); | |
736 | if (drain_type != BDRV_DRAIN_ALL) { | |
737 | aio_context_acquire(ctx_a); | |
738 | } | |
739 | ||
740 | g_assert_cmpint(aio_ret, ==, 0); | |
741 | do_drain_end(drain_type, bs); | |
742 | ||
743 | if (drain_type != BDRV_DRAIN_ALL) { | |
744 | aio_context_release(ctx_a); | |
745 | } | |
746 | break; | |
747 | case 1: | |
bb675689 KW |
748 | aio_bh_schedule_oneshot(ctx_a, test_iothread_drain_entry, &data); |
749 | qemu_event_wait(&done_event); | |
750 | break; | |
751 | default: | |
752 | g_assert_not_reached(); | |
753 | } | |
754 | ||
755 | aio_context_acquire(ctx_a); | |
97896a48 | 756 | blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); |
bb675689 KW |
757 | aio_context_release(ctx_a); |
758 | ||
759 | bdrv_unref(bs); | |
760 | blk_unref(blk); | |
761 | ||
762 | out: | |
763 | iothread_join(a); | |
764 | iothread_join(b); | |
765 | } | |
766 | ||
767 | static void test_iothread_drain_all(void) | |
768 | { | |
769 | test_iothread_common(BDRV_DRAIN_ALL, 0); | |
770 | test_iothread_common(BDRV_DRAIN_ALL, 1); | |
771 | } | |
772 | ||
773 | static void test_iothread_drain(void) | |
774 | { | |
775 | test_iothread_common(BDRV_DRAIN, 0); | |
776 | test_iothread_common(BDRV_DRAIN, 1); | |
777 | } | |
778 | ||
779 | static void test_iothread_drain_subtree(void) | |
780 | { | |
781 | test_iothread_common(BDRV_SUBTREE_DRAIN, 0); | |
782 | test_iothread_common(BDRV_SUBTREE_DRAIN, 1); | |
783 | } | |
784 | ||
7253220d KW |
785 | |
786 | typedef struct TestBlockJob { | |
787 | BlockJob common; | |
d49725af KW |
788 | int run_ret; |
789 | int prepare_ret; | |
d8b3afd5 | 790 | bool running; |
7253220d KW |
791 | bool should_complete; |
792 | } TestBlockJob; | |
793 | ||
ae23dde9 KW |
794 | static int test_job_prepare(Job *job) |
795 | { | |
796 | TestBlockJob *s = container_of(job, TestBlockJob, common.job); | |
797 | ||
798 | /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */ | |
799 | blk_flush(s->common.blk); | |
d49725af KW |
800 | return s->prepare_ret; |
801 | } | |
802 | ||
803 | static void test_job_commit(Job *job) | |
804 | { | |
805 | TestBlockJob *s = container_of(job, TestBlockJob, common.job); | |
806 | ||
807 | /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */ | |
808 | blk_flush(s->common.blk); | |
809 | } | |
810 | ||
811 | static void test_job_abort(Job *job) | |
812 | { | |
813 | TestBlockJob *s = container_of(job, TestBlockJob, common.job); | |
814 | ||
815 | /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */ | |
816 | blk_flush(s->common.blk); | |
ae23dde9 KW |
817 | } |
818 | ||
f67432a2 | 819 | static int coroutine_fn test_job_run(Job *job, Error **errp) |
7253220d | 820 | { |
f67432a2 | 821 | TestBlockJob *s = container_of(job, TestBlockJob, common.job); |
7253220d | 822 | |
d8b3afd5 KW |
823 | /* We are running the actual job code past the pause point in |
824 | * job_co_entry(). */ | |
825 | s->running = true; | |
826 | ||
2e1795b5 | 827 | job_transition_to_ready(&s->common.job); |
7253220d | 828 | while (!s->should_complete) { |
5599c162 KW |
829 | /* Avoid job_sleep_ns() because it marks the job as !busy. We want to |
830 | * emulate some actual activity (probably some I/O) here so that drain | |
831 | * has to wait for this activity to stop. */ | |
d8b3afd5 KW |
832 | qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000); |
833 | ||
89bd0305 | 834 | job_pause_point(&s->common.job); |
7253220d KW |
835 | } |
836 | ||
d49725af | 837 | return s->run_ret; |
7253220d KW |
838 | } |
839 | ||
3453d972 | 840 | static void test_job_complete(Job *job, Error **errp) |
7253220d | 841 | { |
3453d972 | 842 | TestBlockJob *s = container_of(job, TestBlockJob, common.job); |
7253220d KW |
843 | s->should_complete = true; |
844 | } | |
845 | ||
846 | BlockJobDriver test_job_driver = { | |
33e9e9bd KW |
847 | .job_driver = { |
848 | .instance_size = sizeof(TestBlockJob), | |
80fa2c75 | 849 | .free = block_job_free, |
b15de828 | 850 | .user_resume = block_job_user_resume, |
b69f777d | 851 | .drain = block_job_drain, |
f67432a2 | 852 | .run = test_job_run, |
3453d972 | 853 | .complete = test_job_complete, |
ae23dde9 | 854 | .prepare = test_job_prepare, |
d49725af KW |
855 | .commit = test_job_commit, |
856 | .abort = test_job_abort, | |
33e9e9bd | 857 | }, |
7253220d KW |
858 | }; |
859 | ||
d49725af KW |
860 | enum test_job_result { |
861 | TEST_JOB_SUCCESS, | |
862 | TEST_JOB_FAIL_RUN, | |
863 | TEST_JOB_FAIL_PREPARE, | |
864 | }; | |
865 | ||
d8b3afd5 KW |
866 | enum test_job_drain_node { |
867 | TEST_JOB_DRAIN_SRC, | |
868 | TEST_JOB_DRAIN_SRC_CHILD, | |
869 | TEST_JOB_DRAIN_SRC_PARENT, | |
870 | }; | |
871 | ||
872 | static void test_blockjob_common_drain_node(enum drain_type drain_type, | |
873 | bool use_iothread, | |
874 | enum test_job_result result, | |
875 | enum test_job_drain_node drain_node) | |
7253220d KW |
876 | { |
877 | BlockBackend *blk_src, *blk_target; | |
d8b3afd5 | 878 | BlockDriverState *src, *src_backing, *src_overlay, *target, *drain_bs; |
7253220d | 879 | BlockJob *job; |
d49725af | 880 | TestBlockJob *tjob; |
f62c1729 KW |
881 | IOThread *iothread = NULL; |
882 | AioContext *ctx; | |
7253220d KW |
883 | int ret; |
884 | ||
885 | src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR, | |
886 | &error_abort); | |
d8b3afd5 KW |
887 | src_backing = bdrv_new_open_driver(&bdrv_test, "source-backing", |
888 | BDRV_O_RDWR, &error_abort); | |
889 | src_overlay = bdrv_new_open_driver(&bdrv_test, "source-overlay", | |
890 | BDRV_O_RDWR, &error_abort); | |
891 | ||
892 | bdrv_set_backing_hd(src_overlay, src, &error_abort); | |
893 | bdrv_unref(src); | |
894 | bdrv_set_backing_hd(src, src_backing, &error_abort); | |
895 | bdrv_unref(src_backing); | |
896 | ||
d861ab3a | 897 | blk_src = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
d8b3afd5 KW |
898 | blk_insert_bs(blk_src, src_overlay, &error_abort); |
899 | ||
900 | switch (drain_node) { | |
901 | case TEST_JOB_DRAIN_SRC: | |
902 | drain_bs = src; | |
903 | break; | |
904 | case TEST_JOB_DRAIN_SRC_CHILD: | |
905 | drain_bs = src_backing; | |
906 | break; | |
907 | case TEST_JOB_DRAIN_SRC_PARENT: | |
908 | drain_bs = src_overlay; | |
909 | break; | |
910 | default: | |
911 | g_assert_not_reached(); | |
912 | } | |
7253220d | 913 | |
f62c1729 KW |
914 | if (use_iothread) { |
915 | iothread = iothread_new(); | |
916 | ctx = iothread_get_aio_context(iothread); | |
97896a48 | 917 | blk_set_aio_context(blk_src, ctx, &error_abort); |
f62c1729 KW |
918 | } else { |
919 | ctx = qemu_get_aio_context(); | |
920 | } | |
921 | ||
7253220d KW |
922 | target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR, |
923 | &error_abort); | |
d861ab3a | 924 | blk_target = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
7253220d | 925 | blk_insert_bs(blk_target, target, &error_abort); |
132ada80 | 926 | blk_set_allow_aio_context_change(blk_target, true); |
7253220d | 927 | |
f62c1729 | 928 | aio_context_acquire(ctx); |
d49725af KW |
929 | tjob = block_job_create("job0", &test_job_driver, NULL, src, |
930 | 0, BLK_PERM_ALL, | |
931 | 0, 0, NULL, NULL, &error_abort); | |
932 | job = &tjob->common; | |
7253220d | 933 | block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort); |
d49725af KW |
934 | |
935 | switch (result) { | |
936 | case TEST_JOB_SUCCESS: | |
937 | break; | |
938 | case TEST_JOB_FAIL_RUN: | |
939 | tjob->run_ret = -EIO; | |
940 | break; | |
941 | case TEST_JOB_FAIL_PREPARE: | |
942 | tjob->prepare_ret = -EIO; | |
943 | break; | |
944 | } | |
945 | ||
da01ff7f | 946 | job_start(&job->job); |
f62c1729 | 947 | aio_context_release(ctx); |
7253220d | 948 | |
d8b3afd5 KW |
949 | if (use_iothread) { |
950 | /* job_co_entry() is run in the I/O thread, wait for the actual job | |
951 | * code to start (we don't want to catch the job in the pause point in | |
952 | * job_co_entry(). */ | |
953 | while (!tjob->running) { | |
954 | aio_poll(qemu_get_aio_context(), false); | |
955 | } | |
956 | } | |
957 | ||
da01ff7f KW |
958 | g_assert_cmpint(job->job.pause_count, ==, 0); |
959 | g_assert_false(job->job.paused); | |
d8b3afd5 | 960 | g_assert_true(tjob->running); |
5599c162 | 961 | g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ |
7253220d | 962 | |
d8b3afd5 | 963 | do_drain_begin_unlocked(drain_type, drain_bs); |
7253220d KW |
964 | |
965 | if (drain_type == BDRV_DRAIN_ALL) { | |
81193349 | 966 | /* bdrv_drain_all() drains both src and target */ |
da01ff7f | 967 | g_assert_cmpint(job->job.pause_count, ==, 2); |
7253220d | 968 | } else { |
da01ff7f | 969 | g_assert_cmpint(job->job.pause_count, ==, 1); |
7253220d | 970 | } |
89bd0305 | 971 | g_assert_true(job->job.paused); |
da01ff7f | 972 | g_assert_false(job->job.busy); /* The job is paused */ |
7253220d | 973 | |
d8b3afd5 | 974 | do_drain_end_unlocked(drain_type, drain_bs); |
f62c1729 KW |
975 | |
976 | if (use_iothread) { | |
977 | /* paused is reset in the I/O thread, wait for it */ | |
978 | while (job->job.paused) { | |
979 | aio_poll(qemu_get_aio_context(), false); | |
980 | } | |
981 | } | |
7253220d | 982 | |
da01ff7f KW |
983 | g_assert_cmpint(job->job.pause_count, ==, 0); |
984 | g_assert_false(job->job.paused); | |
89bd0305 | 985 | g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ |
7253220d | 986 | |
132ada80 | 987 | do_drain_begin_unlocked(drain_type, target); |
7253220d KW |
988 | |
989 | if (drain_type == BDRV_DRAIN_ALL) { | |
81193349 | 990 | /* bdrv_drain_all() drains both src and target */ |
da01ff7f | 991 | g_assert_cmpint(job->job.pause_count, ==, 2); |
7253220d | 992 | } else { |
da01ff7f | 993 | g_assert_cmpint(job->job.pause_count, ==, 1); |
7253220d | 994 | } |
89bd0305 | 995 | g_assert_true(job->job.paused); |
da01ff7f | 996 | g_assert_false(job->job.busy); /* The job is paused */ |
7253220d | 997 | |
132ada80 | 998 | do_drain_end_unlocked(drain_type, target); |
7253220d | 999 | |
f62c1729 KW |
1000 | if (use_iothread) { |
1001 | /* paused is reset in the I/O thread, wait for it */ | |
1002 | while (job->job.paused) { | |
1003 | aio_poll(qemu_get_aio_context(), false); | |
1004 | } | |
1005 | } | |
1006 | ||
da01ff7f KW |
1007 | g_assert_cmpint(job->job.pause_count, ==, 0); |
1008 | g_assert_false(job->job.paused); | |
5599c162 | 1009 | g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ |
7253220d | 1010 | |
f62c1729 | 1011 | aio_context_acquire(ctx); |
3d70ff53 | 1012 | ret = job_complete_sync(&job->job, &error_abort); |
d49725af | 1013 | g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO)); |
7253220d | 1014 | |
f62c1729 | 1015 | if (use_iothread) { |
97896a48 | 1016 | blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort); |
ad943dcb | 1017 | assert(blk_get_aio_context(blk_target) == qemu_get_aio_context()); |
f62c1729 KW |
1018 | } |
1019 | aio_context_release(ctx); | |
1020 | ||
7253220d KW |
1021 | blk_unref(blk_src); |
1022 | blk_unref(blk_target); | |
d8b3afd5 | 1023 | bdrv_unref(src_overlay); |
7253220d | 1024 | bdrv_unref(target); |
f62c1729 KW |
1025 | |
1026 | if (iothread) { | |
1027 | iothread_join(iothread); | |
1028 | } | |
7253220d KW |
1029 | } |
1030 | ||
d8b3afd5 KW |
1031 | static void test_blockjob_common(enum drain_type drain_type, bool use_iothread, |
1032 | enum test_job_result result) | |
1033 | { | |
1034 | test_blockjob_common_drain_node(drain_type, use_iothread, result, | |
1035 | TEST_JOB_DRAIN_SRC); | |
1036 | test_blockjob_common_drain_node(drain_type, use_iothread, result, | |
1037 | TEST_JOB_DRAIN_SRC_CHILD); | |
1038 | if (drain_type == BDRV_SUBTREE_DRAIN) { | |
1039 | test_blockjob_common_drain_node(drain_type, use_iothread, result, | |
1040 | TEST_JOB_DRAIN_SRC_PARENT); | |
1041 | } | |
1042 | } | |
1043 | ||
7253220d KW |
1044 | static void test_blockjob_drain_all(void) |
1045 | { | |
d49725af | 1046 | test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_SUCCESS); |
7253220d KW |
1047 | } |
1048 | ||
1049 | static void test_blockjob_drain(void) | |
1050 | { | |
d49725af | 1051 | test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_SUCCESS); |
7253220d KW |
1052 | } |
1053 | ||
d2a85d0f KW |
1054 | static void test_blockjob_drain_subtree(void) |
1055 | { | |
d49725af KW |
1056 | test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_SUCCESS); |
1057 | } | |
1058 | ||
1059 | static void test_blockjob_error_drain_all(void) | |
1060 | { | |
1061 | test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_RUN); | |
1062 | test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_PREPARE); | |
1063 | } | |
1064 | ||
1065 | static void test_blockjob_error_drain(void) | |
1066 | { | |
1067 | test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_RUN); | |
1068 | test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_PREPARE); | |
1069 | } | |
1070 | ||
1071 | static void test_blockjob_error_drain_subtree(void) | |
1072 | { | |
1073 | test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_RUN); | |
1074 | test_blockjob_common(BDRV_SUBTREE_DRAIN, false, TEST_JOB_FAIL_PREPARE); | |
f62c1729 KW |
1075 | } |
1076 | ||
1077 | static void test_blockjob_iothread_drain_all(void) | |
1078 | { | |
d49725af | 1079 | test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_SUCCESS); |
f62c1729 KW |
1080 | } |
1081 | ||
1082 | static void test_blockjob_iothread_drain(void) | |
1083 | { | |
d49725af | 1084 | test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_SUCCESS); |
f62c1729 KW |
1085 | } |
1086 | ||
1087 | static void test_blockjob_iothread_drain_subtree(void) | |
1088 | { | |
d49725af KW |
1089 | test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_SUCCESS); |
1090 | } | |
1091 | ||
1092 | static void test_blockjob_iothread_error_drain_all(void) | |
1093 | { | |
1094 | test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_RUN); | |
1095 | test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_PREPARE); | |
1096 | } | |
1097 | ||
1098 | static void test_blockjob_iothread_error_drain(void) | |
1099 | { | |
1100 | test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_RUN); | |
1101 | test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_PREPARE); | |
1102 | } | |
1103 | ||
1104 | static void test_blockjob_iothread_error_drain_subtree(void) | |
1105 | { | |
1106 | test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_RUN); | |
1107 | test_blockjob_common(BDRV_SUBTREE_DRAIN, true, TEST_JOB_FAIL_PREPARE); | |
d2a85d0f KW |
1108 | } |
1109 | ||
4c8158e3 HR |
1110 | |
1111 | typedef struct BDRVTestTopState { | |
1112 | BdrvChild *wait_child; | |
1113 | } BDRVTestTopState; | |
1114 | ||
1115 | static void bdrv_test_top_close(BlockDriverState *bs) | |
1116 | { | |
1117 | BdrvChild *c, *next_c; | |
1118 | QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { | |
1119 | bdrv_unref_child(bs, c); | |
1120 | } | |
1121 | } | |
1122 | ||
1123 | static int coroutine_fn bdrv_test_top_co_preadv(BlockDriverState *bs, | |
1124 | uint64_t offset, uint64_t bytes, | |
1125 | QEMUIOVector *qiov, int flags) | |
1126 | { | |
1127 | BDRVTestTopState *tts = bs->opaque; | |
1128 | return bdrv_co_preadv(tts->wait_child, offset, bytes, qiov, flags); | |
1129 | } | |
1130 | ||
1131 | static BlockDriver bdrv_test_top_driver = { | |
1132 | .format_name = "test_top_driver", | |
1133 | .instance_size = sizeof(BDRVTestTopState), | |
1134 | ||
1135 | .bdrv_close = bdrv_test_top_close, | |
1136 | .bdrv_co_preadv = bdrv_test_top_co_preadv, | |
1137 | ||
1138 | .bdrv_child_perm = bdrv_format_default_perms, | |
1139 | }; | |
1140 | ||
1141 | typedef struct TestCoDeleteByDrainData { | |
1142 | BlockBackend *blk; | |
1143 | bool detach_instead_of_delete; | |
1144 | bool done; | |
1145 | } TestCoDeleteByDrainData; | |
1146 | ||
1147 | static void coroutine_fn test_co_delete_by_drain(void *opaque) | |
1148 | { | |
1149 | TestCoDeleteByDrainData *dbdd = opaque; | |
1150 | BlockBackend *blk = dbdd->blk; | |
1151 | BlockDriverState *bs = blk_bs(blk); | |
1152 | BDRVTestTopState *tts = bs->opaque; | |
1153 | void *buffer = g_malloc(65536); | |
405d8fe0 | 1154 | QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buffer, 65536); |
4c8158e3 HR |
1155 | |
1156 | /* Pretend some internal write operation from parent to child. | |
1157 | * Important: We have to read from the child, not from the parent! | |
1158 | * Draining works by first propagating it all up the tree to the | |
1159 | * root and then waiting for drainage from root to the leaves | |
1160 | * (protocol nodes). If we have a request waiting on the root, | |
1161 | * everything will be drained before we go back down the tree, but | |
1162 | * we do not want that. We want to be in the middle of draining | |
1163 | * when this following requests returns. */ | |
1164 | bdrv_co_preadv(tts->wait_child, 0, 65536, &qiov, 0); | |
1165 | ||
1166 | g_assert_cmpint(bs->refcnt, ==, 1); | |
1167 | ||
1168 | if (!dbdd->detach_instead_of_delete) { | |
1169 | blk_unref(blk); | |
1170 | } else { | |
1171 | BdrvChild *c, *next_c; | |
1172 | QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { | |
1173 | bdrv_unref_child(bs, c); | |
1174 | } | |
1175 | } | |
1176 | ||
1177 | dbdd->done = true; | |
7b43db3c | 1178 | g_free(buffer); |
4c8158e3 HR |
1179 | } |
1180 | ||
1181 | /** | |
1182 | * Test what happens when some BDS has some children, you drain one of | |
1183 | * them and this results in the BDS being deleted. | |
1184 | * | |
1185 | * If @detach_instead_of_delete is set, the BDS is not going to be | |
1186 | * deleted but will only detach all of its children. | |
1187 | */ | |
ebd31837 KW |
1188 | static void do_test_delete_by_drain(bool detach_instead_of_delete, |
1189 | enum drain_type drain_type) | |
4c8158e3 HR |
1190 | { |
1191 | BlockBackend *blk; | |
1192 | BlockDriverState *bs, *child_bs, *null_bs; | |
1193 | BDRVTestTopState *tts; | |
1194 | TestCoDeleteByDrainData dbdd; | |
1195 | Coroutine *co; | |
1196 | ||
1197 | bs = bdrv_new_open_driver(&bdrv_test_top_driver, "top", BDRV_O_RDWR, | |
1198 | &error_abort); | |
1199 | bs->total_sectors = 65536 >> BDRV_SECTOR_BITS; | |
1200 | tts = bs->opaque; | |
1201 | ||
1202 | null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, | |
1203 | &error_abort); | |
1204 | bdrv_attach_child(bs, null_bs, "null-child", &child_file, &error_abort); | |
1205 | ||
1206 | /* This child will be the one to pass to requests through to, and | |
1207 | * it will stall until a drain occurs */ | |
1208 | child_bs = bdrv_new_open_driver(&bdrv_test, "child", BDRV_O_RDWR, | |
1209 | &error_abort); | |
1210 | child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS; | |
1211 | /* Takes our reference to child_bs */ | |
1212 | tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child", &child_file, | |
1213 | &error_abort); | |
1214 | ||
1215 | /* This child is just there to be deleted | |
1216 | * (for detach_instead_of_delete == true) */ | |
1217 | null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, | |
1218 | &error_abort); | |
1219 | bdrv_attach_child(bs, null_bs, "null-child", &child_file, &error_abort); | |
1220 | ||
d861ab3a | 1221 | blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
4c8158e3 HR |
1222 | blk_insert_bs(blk, bs, &error_abort); |
1223 | ||
1224 | /* Referenced by blk now */ | |
1225 | bdrv_unref(bs); | |
1226 | ||
1227 | g_assert_cmpint(bs->refcnt, ==, 1); | |
1228 | g_assert_cmpint(child_bs->refcnt, ==, 1); | |
1229 | g_assert_cmpint(null_bs->refcnt, ==, 1); | |
1230 | ||
1231 | ||
1232 | dbdd = (TestCoDeleteByDrainData){ | |
1233 | .blk = blk, | |
1234 | .detach_instead_of_delete = detach_instead_of_delete, | |
1235 | .done = false, | |
1236 | }; | |
1237 | co = qemu_coroutine_create(test_co_delete_by_drain, &dbdd); | |
1238 | qemu_coroutine_enter(co); | |
1239 | ||
1240 | /* Drain the child while the read operation is still pending. | |
1241 | * This should result in the operation finishing and | |
1242 | * test_co_delete_by_drain() resuming. Thus, @bs will be deleted | |
1243 | * and the coroutine will exit while this drain operation is still | |
1244 | * in progress. */ | |
ebd31837 KW |
1245 | switch (drain_type) { |
1246 | case BDRV_DRAIN: | |
1247 | bdrv_ref(child_bs); | |
1248 | bdrv_drain(child_bs); | |
1249 | bdrv_unref(child_bs); | |
1250 | break; | |
1251 | case BDRV_SUBTREE_DRAIN: | |
1252 | /* Would have to ref/unref bs here for !detach_instead_of_delete, but | |
1253 | * then the whole test becomes pointless because the graph changes | |
1254 | * don't occur during the drain any more. */ | |
1255 | assert(detach_instead_of_delete); | |
1256 | bdrv_subtree_drained_begin(bs); | |
1257 | bdrv_subtree_drained_end(bs); | |
1258 | break; | |
19f7a7e5 KW |
1259 | case BDRV_DRAIN_ALL: |
1260 | bdrv_drain_all_begin(); | |
1261 | bdrv_drain_all_end(); | |
1262 | break; | |
ebd31837 KW |
1263 | default: |
1264 | g_assert_not_reached(); | |
1265 | } | |
4c8158e3 HR |
1266 | |
1267 | while (!dbdd.done) { | |
1268 | aio_poll(qemu_get_aio_context(), true); | |
1269 | } | |
1270 | ||
1271 | if (detach_instead_of_delete) { | |
1272 | /* Here, the reference has not passed over to the coroutine, | |
1273 | * so we have to delete the BB ourselves */ | |
1274 | blk_unref(blk); | |
1275 | } | |
1276 | } | |
1277 | ||
4c8158e3 HR |
1278 | static void test_delete_by_drain(void) |
1279 | { | |
ebd31837 | 1280 | do_test_delete_by_drain(false, BDRV_DRAIN); |
4c8158e3 HR |
1281 | } |
1282 | ||
19f7a7e5 KW |
1283 | static void test_detach_by_drain_all(void) |
1284 | { | |
1285 | do_test_delete_by_drain(true, BDRV_DRAIN_ALL); | |
1286 | } | |
1287 | ||
4c8158e3 HR |
1288 | static void test_detach_by_drain(void) |
1289 | { | |
ebd31837 KW |
1290 | do_test_delete_by_drain(true, BDRV_DRAIN); |
1291 | } | |
1292 | ||
1293 | static void test_detach_by_drain_subtree(void) | |
1294 | { | |
1295 | do_test_delete_by_drain(true, BDRV_SUBTREE_DRAIN); | |
4c8158e3 HR |
1296 | } |
1297 | ||
1298 | ||
231281ab KW |
1299 | struct detach_by_parent_data { |
1300 | BlockDriverState *parent_b; | |
1301 | BdrvChild *child_b; | |
1302 | BlockDriverState *c; | |
1303 | BdrvChild *child_c; | |
57320ca9 | 1304 | bool by_parent_cb; |
231281ab | 1305 | }; |
57320ca9 | 1306 | static struct detach_by_parent_data detach_by_parent_data; |
231281ab | 1307 | |
57320ca9 | 1308 | static void detach_indirect_bh(void *opaque) |
231281ab KW |
1309 | { |
1310 | struct detach_by_parent_data *data = opaque; | |
1311 | ||
231281ab KW |
1312 | bdrv_unref_child(data->parent_b, data->child_b); |
1313 | ||
1314 | bdrv_ref(data->c); | |
1315 | data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C", | |
1316 | &child_file, &error_abort); | |
1317 | } | |
1318 | ||
57320ca9 KW |
1319 | static void detach_by_parent_aio_cb(void *opaque, int ret) |
1320 | { | |
1321 | struct detach_by_parent_data *data = &detach_by_parent_data; | |
1322 | ||
1323 | g_assert_cmpint(ret, ==, 0); | |
1324 | if (data->by_parent_cb) { | |
1325 | detach_indirect_bh(data); | |
1326 | } | |
1327 | } | |
1328 | ||
1329 | static void detach_by_driver_cb_drained_begin(BdrvChild *child) | |
1330 | { | |
1331 | aio_bh_schedule_oneshot(qemu_get_current_aio_context(), | |
1332 | detach_indirect_bh, &detach_by_parent_data); | |
1333 | child_file.drained_begin(child); | |
1334 | } | |
1335 | ||
1336 | static BdrvChildRole detach_by_driver_cb_role; | |
1337 | ||
231281ab KW |
1338 | /* |
1339 | * Initial graph: | |
1340 | * | |
1341 | * PA PB | |
1342 | * \ / \ | |
1343 | * A B C | |
1344 | * | |
57320ca9 KW |
1345 | * by_parent_cb == true: Test that parent callbacks don't poll |
1346 | * | |
1347 | * PA has a pending write request whose callback changes the child nodes of | |
1348 | * PB: It removes B and adds C instead. The subtree of PB is drained, which | |
1349 | * will indirectly drain the write request, too. | |
1350 | * | |
1351 | * by_parent_cb == false: Test that bdrv_drain_invoke() doesn't poll | |
1352 | * | |
1353 | * PA's BdrvChildRole has a .drained_begin callback that schedules a BH | |
1354 | * that does the same graph change. If bdrv_drain_invoke() calls it, the | |
1355 | * state is messed up, but if it is only polled in the single | |
1356 | * BDRV_POLL_WHILE() at the end of the drain, this should work fine. | |
231281ab | 1357 | */ |
57320ca9 | 1358 | static void test_detach_indirect(bool by_parent_cb) |
231281ab KW |
1359 | { |
1360 | BlockBackend *blk; | |
1361 | BlockDriverState *parent_a, *parent_b, *a, *b, *c; | |
1362 | BdrvChild *child_a, *child_b; | |
1363 | BlockAIOCB *acb; | |
231281ab | 1364 | |
405d8fe0 | 1365 | QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0); |
231281ab | 1366 | |
57320ca9 KW |
1367 | if (!by_parent_cb) { |
1368 | detach_by_driver_cb_role = child_file; | |
1369 | detach_by_driver_cb_role.drained_begin = | |
1370 | detach_by_driver_cb_drained_begin; | |
1371 | } | |
1372 | ||
231281ab KW |
1373 | /* Create all involved nodes */ |
1374 | parent_a = bdrv_new_open_driver(&bdrv_test, "parent-a", BDRV_O_RDWR, | |
1375 | &error_abort); | |
1376 | parent_b = bdrv_new_open_driver(&bdrv_test, "parent-b", 0, | |
1377 | &error_abort); | |
1378 | ||
1379 | a = bdrv_new_open_driver(&bdrv_test, "a", BDRV_O_RDWR, &error_abort); | |
1380 | b = bdrv_new_open_driver(&bdrv_test, "b", BDRV_O_RDWR, &error_abort); | |
1381 | c = bdrv_new_open_driver(&bdrv_test, "c", BDRV_O_RDWR, &error_abort); | |
1382 | ||
1383 | /* blk is a BB for parent-a */ | |
d861ab3a | 1384 | blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
231281ab KW |
1385 | blk_insert_bs(blk, parent_a, &error_abort); |
1386 | bdrv_unref(parent_a); | |
1387 | ||
57320ca9 KW |
1388 | /* If we want to get bdrv_drain_invoke() to call aio_poll(), the driver |
1389 | * callback must not return immediately. */ | |
1390 | if (!by_parent_cb) { | |
1391 | BDRVTestState *s = parent_a->opaque; | |
1392 | s->sleep_in_drain_begin = true; | |
1393 | } | |
1394 | ||
231281ab KW |
1395 | /* Set child relationships */ |
1396 | bdrv_ref(b); | |
1397 | bdrv_ref(a); | |
1398 | child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_file, &error_abort); | |
1399 | child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_backing, &error_abort); | |
1400 | ||
1401 | bdrv_ref(a); | |
57320ca9 KW |
1402 | bdrv_attach_child(parent_a, a, "PA-A", |
1403 | by_parent_cb ? &child_file : &detach_by_driver_cb_role, | |
1404 | &error_abort); | |
231281ab KW |
1405 | |
1406 | g_assert_cmpint(parent_a->refcnt, ==, 1); | |
1407 | g_assert_cmpint(parent_b->refcnt, ==, 1); | |
1408 | g_assert_cmpint(a->refcnt, ==, 3); | |
1409 | g_assert_cmpint(b->refcnt, ==, 2); | |
1410 | g_assert_cmpint(c->refcnt, ==, 1); | |
1411 | ||
1412 | g_assert(QLIST_FIRST(&parent_b->children) == child_a); | |
1413 | g_assert(QLIST_NEXT(child_a, next) == child_b); | |
1414 | g_assert(QLIST_NEXT(child_b, next) == NULL); | |
1415 | ||
1416 | /* Start the evil write request */ | |
57320ca9 | 1417 | detach_by_parent_data = (struct detach_by_parent_data) { |
231281ab KW |
1418 | .parent_b = parent_b, |
1419 | .child_b = child_b, | |
1420 | .c = c, | |
57320ca9 | 1421 | .by_parent_cb = by_parent_cb, |
231281ab | 1422 | }; |
57320ca9 | 1423 | acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, NULL); |
231281ab KW |
1424 | g_assert(acb != NULL); |
1425 | ||
1426 | /* Drain and check the expected result */ | |
1427 | bdrv_subtree_drained_begin(parent_b); | |
1428 | ||
57320ca9 | 1429 | g_assert(detach_by_parent_data.child_c != NULL); |
231281ab KW |
1430 | |
1431 | g_assert_cmpint(parent_a->refcnt, ==, 1); | |
1432 | g_assert_cmpint(parent_b->refcnt, ==, 1); | |
1433 | g_assert_cmpint(a->refcnt, ==, 3); | |
1434 | g_assert_cmpint(b->refcnt, ==, 1); | |
1435 | g_assert_cmpint(c->refcnt, ==, 2); | |
1436 | ||
57320ca9 KW |
1437 | g_assert(QLIST_FIRST(&parent_b->children) == detach_by_parent_data.child_c); |
1438 | g_assert(QLIST_NEXT(detach_by_parent_data.child_c, next) == child_a); | |
231281ab KW |
1439 | g_assert(QLIST_NEXT(child_a, next) == NULL); |
1440 | ||
1441 | g_assert_cmpint(parent_a->quiesce_counter, ==, 1); | |
1442 | g_assert_cmpint(parent_b->quiesce_counter, ==, 1); | |
1443 | g_assert_cmpint(a->quiesce_counter, ==, 1); | |
1444 | g_assert_cmpint(b->quiesce_counter, ==, 0); | |
1445 | g_assert_cmpint(c->quiesce_counter, ==, 1); | |
1446 | ||
1447 | bdrv_subtree_drained_end(parent_b); | |
1448 | ||
1449 | bdrv_unref(parent_b); | |
1450 | blk_unref(blk); | |
1451 | ||
231281ab KW |
1452 | g_assert_cmpint(a->refcnt, ==, 1); |
1453 | g_assert_cmpint(b->refcnt, ==, 1); | |
1454 | g_assert_cmpint(c->refcnt, ==, 1); | |
1455 | bdrv_unref(a); | |
1456 | bdrv_unref(b); | |
1457 | bdrv_unref(c); | |
1458 | } | |
1459 | ||
57320ca9 KW |
1460 | static void test_detach_by_parent_cb(void) |
1461 | { | |
1462 | test_detach_indirect(true); | |
1463 | } | |
1464 | ||
1465 | static void test_detach_by_driver_cb(void) | |
1466 | { | |
1467 | test_detach_indirect(false); | |
1468 | } | |
231281ab | 1469 | |
b994c5bc KW |
1470 | static void test_append_to_drained(void) |
1471 | { | |
1472 | BlockBackend *blk; | |
1473 | BlockDriverState *base, *overlay; | |
1474 | BDRVTestState *base_s, *overlay_s; | |
1475 | ||
d861ab3a | 1476 | blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); |
b994c5bc KW |
1477 | base = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); |
1478 | base_s = base->opaque; | |
1479 | blk_insert_bs(blk, base, &error_abort); | |
1480 | ||
1481 | overlay = bdrv_new_open_driver(&bdrv_test, "overlay", BDRV_O_RDWR, | |
1482 | &error_abort); | |
1483 | overlay_s = overlay->opaque; | |
1484 | ||
1485 | do_drain_begin(BDRV_DRAIN, base); | |
1486 | g_assert_cmpint(base->quiesce_counter, ==, 1); | |
1487 | g_assert_cmpint(base_s->drain_count, ==, 1); | |
1488 | g_assert_cmpint(base->in_flight, ==, 0); | |
1489 | ||
1490 | /* Takes ownership of overlay, so we don't have to unref it later */ | |
1491 | bdrv_append(overlay, base, &error_abort); | |
1492 | g_assert_cmpint(base->in_flight, ==, 0); | |
1493 | g_assert_cmpint(overlay->in_flight, ==, 0); | |
1494 | ||
1495 | g_assert_cmpint(base->quiesce_counter, ==, 1); | |
1496 | g_assert_cmpint(base_s->drain_count, ==, 1); | |
1497 | g_assert_cmpint(overlay->quiesce_counter, ==, 1); | |
1498 | g_assert_cmpint(overlay_s->drain_count, ==, 1); | |
1499 | ||
1500 | do_drain_end(BDRV_DRAIN, base); | |
1501 | ||
1502 | g_assert_cmpint(base->quiesce_counter, ==, 0); | |
1503 | g_assert_cmpint(base_s->drain_count, ==, 0); | |
1504 | g_assert_cmpint(overlay->quiesce_counter, ==, 0); | |
1505 | g_assert_cmpint(overlay_s->drain_count, ==, 0); | |
1506 | ||
1507 | bdrv_unref(base); | |
1508 | blk_unref(blk); | |
1509 | } | |
1510 | ||
247d2737 KW |
1511 | static void test_set_aio_context(void) |
1512 | { | |
1513 | BlockDriverState *bs; | |
1514 | IOThread *a = iothread_new(); | |
1515 | IOThread *b = iothread_new(); | |
1516 | AioContext *ctx_a = iothread_get_aio_context(a); | |
1517 | AioContext *ctx_b = iothread_get_aio_context(b); | |
1518 | ||
1519 | bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, | |
1520 | &error_abort); | |
1521 | ||
1522 | bdrv_drained_begin(bs); | |
26bf15e4 | 1523 | bdrv_try_set_aio_context(bs, ctx_a, &error_abort); |
247d2737 KW |
1524 | |
1525 | aio_context_acquire(ctx_a); | |
1526 | bdrv_drained_end(bs); | |
1527 | ||
1528 | bdrv_drained_begin(bs); | |
26bf15e4 | 1529 | bdrv_try_set_aio_context(bs, ctx_b, &error_abort); |
247d2737 KW |
1530 | aio_context_release(ctx_a); |
1531 | aio_context_acquire(ctx_b); | |
26bf15e4 | 1532 | bdrv_try_set_aio_context(bs, qemu_get_aio_context(), &error_abort); |
247d2737 KW |
1533 | aio_context_release(ctx_b); |
1534 | bdrv_drained_end(bs); | |
1535 | ||
1536 | bdrv_unref(bs); | |
1537 | iothread_join(a); | |
1538 | iothread_join(b); | |
1539 | } | |
1540 | ||
8e442810 HR |
1541 | |
1542 | typedef struct TestDropBackingBlockJob { | |
1543 | BlockJob common; | |
1544 | bool should_complete; | |
1545 | bool *did_complete; | |
2afdc790 | 1546 | BlockDriverState *detach_also; |
8e442810 HR |
1547 | } TestDropBackingBlockJob; |
1548 | ||
1549 | static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp) | |
1550 | { | |
1551 | TestDropBackingBlockJob *s = | |
1552 | container_of(job, TestDropBackingBlockJob, common.job); | |
1553 | ||
1554 | while (!s->should_complete) { | |
1555 | job_sleep_ns(job, 0); | |
1556 | } | |
1557 | ||
1558 | return 0; | |
1559 | } | |
1560 | ||
1561 | static void test_drop_backing_job_commit(Job *job) | |
1562 | { | |
1563 | TestDropBackingBlockJob *s = | |
1564 | container_of(job, TestDropBackingBlockJob, common.job); | |
1565 | ||
1566 | bdrv_set_backing_hd(blk_bs(s->common.blk), NULL, &error_abort); | |
2afdc790 | 1567 | bdrv_set_backing_hd(s->detach_also, NULL, &error_abort); |
8e442810 HR |
1568 | |
1569 | *s->did_complete = true; | |
1570 | } | |
1571 | ||
1572 | static const BlockJobDriver test_drop_backing_job_driver = { | |
1573 | .job_driver = { | |
1574 | .instance_size = sizeof(TestDropBackingBlockJob), | |
1575 | .free = block_job_free, | |
1576 | .user_resume = block_job_user_resume, | |
1577 | .drain = block_job_drain, | |
1578 | .run = test_drop_backing_job_run, | |
1579 | .commit = test_drop_backing_job_commit, | |
1580 | } | |
1581 | }; | |
1582 | ||
1583 | /** | |
1584 | * Creates a child node with three parent nodes on it, and then runs a | |
1585 | * block job on the final one, parent-node-2. | |
1586 | * | |
8e442810 HR |
1587 | * The job is then asked to complete before a section where the child |
1588 | * is drained. | |
1589 | * | |
1590 | * Ending this section will undrain the child's parents, first | |
1591 | * parent-node-2, then parent-node-1, then parent-node-0 -- the parent | |
1592 | * list is in reverse order of how they were added. Ending the drain | |
1593 | * on parent-node-2 will resume the job, thus completing it and | |
1594 | * scheduling job_exit(). | |
1595 | * | |
1596 | * Ending the drain on parent-node-1 will poll the AioContext, which | |
1597 | * lets job_exit() and thus test_drop_backing_job_commit() run. That | |
2afdc790 | 1598 | * function first removes the child as parent-node-2's backing file. |
8e442810 HR |
1599 | * |
1600 | * In old (and buggy) implementations, there are two problems with | |
1601 | * that: | |
1602 | * (A) bdrv_drain_invoke() polls for every node that leaves the | |
1603 | * drained section. This means that job_exit() is scheduled | |
1604 | * before the child has left the drained section. Its | |
1605 | * quiesce_counter is therefore still 1 when it is removed from | |
1606 | * parent-node-2. | |
1607 | * | |
1608 | * (B) bdrv_replace_child_noperm() calls drained_end() on the old | |
1609 | * child's parents as many times as the child is quiesced. This | |
1610 | * means it will call drained_end() on parent-node-2 once. | |
1611 | * Because parent-node-2 is no longer quiesced at this point, this | |
1612 | * will fail. | |
1613 | * | |
1614 | * bdrv_replace_child_noperm() therefore must call drained_end() on | |
1615 | * the parent only if it really is still drained because the child is | |
1616 | * drained. | |
2afdc790 HR |
1617 | * |
1618 | * If removing child from parent-node-2 was successful (as it should | |
1619 | * be), test_drop_backing_job_commit() will then also remove the child | |
1620 | * from parent-node-0. | |
1621 | * | |
1622 | * With an old version of our drain infrastructure ((A) above), that | |
1623 | * resulted in the following flow: | |
1624 | * | |
1625 | * 1. child attempts to leave its drained section. The call recurses | |
1626 | * to its parents. | |
1627 | * | |
1628 | * 2. parent-node-2 leaves the drained section. Polling in | |
1629 | * bdrv_drain_invoke() will schedule job_exit(). | |
1630 | * | |
1631 | * 3. parent-node-1 leaves the drained section. Polling in | |
1632 | * bdrv_drain_invoke() will run job_exit(), thus disconnecting | |
1633 | * parent-node-0 from the child node. | |
1634 | * | |
1635 | * 4. bdrv_parent_drained_end() uses a QLIST_FOREACH_SAFE() loop to | |
1636 | * iterate over the parents. Thus, it now accesses the BdrvChild | |
1637 | * object that used to connect parent-node-0 and the child node. | |
1638 | * However, that object no longer exists, so it accesses a dangling | |
1639 | * pointer. | |
1640 | * | |
1641 | * The solution is to only poll once when running a bdrv_drained_end() | |
1642 | * operation, specifically at the end when all drained_end() | |
1643 | * operations for all involved nodes have been scheduled. | |
1644 | * Note that this also solves (A) above, thus hiding (B). | |
8e442810 HR |
1645 | */ |
1646 | static void test_blockjob_commit_by_drained_end(void) | |
1647 | { | |
1648 | BlockDriverState *bs_child, *bs_parents[3]; | |
1649 | TestDropBackingBlockJob *job; | |
1650 | bool job_has_completed = false; | |
1651 | int i; | |
1652 | ||
1653 | bs_child = bdrv_new_open_driver(&bdrv_test, "child-node", BDRV_O_RDWR, | |
1654 | &error_abort); | |
1655 | ||
1656 | for (i = 0; i < 3; i++) { | |
1657 | char name[32]; | |
1658 | snprintf(name, sizeof(name), "parent-node-%i", i); | |
1659 | bs_parents[i] = bdrv_new_open_driver(&bdrv_test, name, BDRV_O_RDWR, | |
1660 | &error_abort); | |
1661 | bdrv_set_backing_hd(bs_parents[i], bs_child, &error_abort); | |
1662 | } | |
1663 | ||
1664 | job = block_job_create("job", &test_drop_backing_job_driver, NULL, | |
1665 | bs_parents[2], 0, BLK_PERM_ALL, 0, 0, NULL, NULL, | |
1666 | &error_abort); | |
1667 | ||
2afdc790 | 1668 | job->detach_also = bs_parents[0]; |
8e442810 HR |
1669 | job->did_complete = &job_has_completed; |
1670 | ||
1671 | job_start(&job->common.job); | |
1672 | ||
1673 | job->should_complete = true; | |
1674 | bdrv_drained_begin(bs_child); | |
1675 | g_assert(!job_has_completed); | |
1676 | bdrv_drained_end(bs_child); | |
1677 | g_assert(job_has_completed); | |
1678 | ||
1679 | bdrv_unref(bs_parents[0]); | |
1680 | bdrv_unref(bs_parents[1]); | |
1681 | bdrv_unref(bs_parents[2]); | |
1682 | bdrv_unref(bs_child); | |
1683 | } | |
1684 | ||
9746b35c HR |
1685 | |
1686 | typedef struct TestSimpleBlockJob { | |
1687 | BlockJob common; | |
1688 | bool should_complete; | |
1689 | bool *did_complete; | |
1690 | } TestSimpleBlockJob; | |
1691 | ||
1692 | static int coroutine_fn test_simple_job_run(Job *job, Error **errp) | |
1693 | { | |
1694 | TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job); | |
1695 | ||
1696 | while (!s->should_complete) { | |
1697 | job_sleep_ns(job, 0); | |
1698 | } | |
1699 | ||
1700 | return 0; | |
1701 | } | |
1702 | ||
1703 | static void test_simple_job_clean(Job *job) | |
1704 | { | |
1705 | TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job); | |
1706 | *s->did_complete = true; | |
1707 | } | |
1708 | ||
1709 | static const BlockJobDriver test_simple_job_driver = { | |
1710 | .job_driver = { | |
1711 | .instance_size = sizeof(TestSimpleBlockJob), | |
1712 | .free = block_job_free, | |
1713 | .user_resume = block_job_user_resume, | |
1714 | .drain = block_job_drain, | |
1715 | .run = test_simple_job_run, | |
1716 | .clean = test_simple_job_clean, | |
1717 | }, | |
1718 | }; | |
1719 | ||
1720 | static int drop_intermediate_poll_update_filename(BdrvChild *child, | |
1721 | BlockDriverState *new_base, | |
1722 | const char *filename, | |
1723 | Error **errp) | |
1724 | { | |
1725 | /* | |
1726 | * We are free to poll here, which may change the block graph, if | |
1727 | * it is not drained. | |
1728 | */ | |
1729 | ||
1730 | /* If the job is not drained: Complete it, schedule job_exit() */ | |
1731 | aio_poll(qemu_get_current_aio_context(), false); | |
1732 | /* If the job is not drained: Run job_exit(), finish the job */ | |
1733 | aio_poll(qemu_get_current_aio_context(), false); | |
1734 | ||
1735 | return 0; | |
1736 | } | |
1737 | ||
1738 | /** | |
1739 | * Test a poll in the midst of bdrv_drop_intermediate(). | |
1740 | * | |
1741 | * bdrv_drop_intermediate() calls BdrvChildRole.update_filename(), | |
1742 | * which can yield or poll. This may lead to graph changes, unless | |
1743 | * the whole subtree in question is drained. | |
1744 | * | |
1745 | * We test this on the following graph: | |
1746 | * | |
1747 | * Job | |
1748 | * | |
1749 | * | | |
1750 | * job-node | |
1751 | * | | |
1752 | * v | |
1753 | * | |
1754 | * job-node | |
1755 | * | |
1756 | * | | |
1757 | * backing | |
1758 | * | | |
1759 | * v | |
1760 | * | |
1761 | * node-2 --chain--> node-1 --chain--> node-0 | |
1762 | * | |
1763 | * We drop node-1 with bdrv_drop_intermediate(top=node-1, base=node-0). | |
1764 | * | |
1765 | * This first updates node-2's backing filename by invoking | |
1766 | * drop_intermediate_poll_update_filename(), which polls twice. This | |
1767 | * causes the job to finish, which in turns causes the job-node to be | |
1768 | * deleted. | |
1769 | * | |
1770 | * bdrv_drop_intermediate() uses a QLIST_FOREACH_SAFE() loop, so it | |
1771 | * already has a pointer to the BdrvChild edge between job-node and | |
1772 | * node-1. When it tries to handle that edge, we probably get a | |
1773 | * segmentation fault because the object no longer exists. | |
1774 | * | |
1775 | * | |
1776 | * The solution is for bdrv_drop_intermediate() to drain top's | |
1777 | * subtree. This prevents graph changes from happening just because | |
1778 | * BdrvChildRole.update_filename() yields or polls. Thus, the block | |
1779 | * job is paused during that drained section and must finish before or | |
1780 | * after. | |
1781 | * | |
1782 | * (In addition, bdrv_replace_child() must keep the job paused.) | |
1783 | */ | |
1784 | static void test_drop_intermediate_poll(void) | |
1785 | { | |
1786 | static BdrvChildRole chain_child_role; | |
1787 | BlockDriverState *chain[3]; | |
1788 | TestSimpleBlockJob *job; | |
1789 | BlockDriverState *job_node; | |
1790 | bool job_has_completed = false; | |
1791 | int i; | |
1792 | int ret; | |
1793 | ||
1794 | chain_child_role = child_backing; | |
1795 | chain_child_role.update_filename = drop_intermediate_poll_update_filename; | |
1796 | ||
1797 | for (i = 0; i < 3; i++) { | |
1798 | char name[32]; | |
1799 | snprintf(name, 32, "node-%i", i); | |
1800 | ||
1801 | chain[i] = bdrv_new_open_driver(&bdrv_test, name, 0, &error_abort); | |
1802 | } | |
1803 | ||
1804 | job_node = bdrv_new_open_driver(&bdrv_test, "job-node", BDRV_O_RDWR, | |
1805 | &error_abort); | |
1806 | bdrv_set_backing_hd(job_node, chain[1], &error_abort); | |
1807 | ||
1808 | /* | |
1809 | * Establish the chain last, so the chain links are the first | |
1810 | * elements in the BDS.parents lists | |
1811 | */ | |
1812 | for (i = 0; i < 3; i++) { | |
1813 | if (i) { | |
1814 | /* Takes the reference to chain[i - 1] */ | |
1815 | chain[i]->backing = bdrv_attach_child(chain[i], chain[i - 1], | |
1816 | "chain", &chain_child_role, | |
1817 | &error_abort); | |
1818 | } | |
1819 | } | |
1820 | ||
1821 | job = block_job_create("job", &test_simple_job_driver, NULL, job_node, | |
1822 | 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort); | |
1823 | ||
1824 | /* The job has a reference now */ | |
1825 | bdrv_unref(job_node); | |
1826 | ||
1827 | job->did_complete = &job_has_completed; | |
1828 | ||
1829 | job_start(&job->common.job); | |
1830 | job->should_complete = true; | |
1831 | ||
1832 | g_assert(!job_has_completed); | |
1833 | ret = bdrv_drop_intermediate(chain[1], chain[0], NULL); | |
1834 | g_assert(ret == 0); | |
1835 | g_assert(job_has_completed); | |
1836 | ||
1837 | bdrv_unref(chain[2]); | |
1838 | } | |
1839 | ||
0513f984 HR |
1840 | |
1841 | typedef struct BDRVReplaceTestState { | |
1842 | bool was_drained; | |
1843 | bool was_undrained; | |
1844 | bool has_read; | |
1845 | ||
1846 | int drain_count; | |
1847 | ||
1848 | bool yield_before_read; | |
1849 | Coroutine *io_co; | |
1850 | Coroutine *drain_co; | |
1851 | } BDRVReplaceTestState; | |
1852 | ||
1853 | static void bdrv_replace_test_close(BlockDriverState *bs) | |
1854 | { | |
1855 | } | |
1856 | ||
1857 | /** | |
1858 | * If @bs has a backing file: | |
1859 | * Yield if .yield_before_read is true (and wait for drain_begin to | |
1860 | * wake us up). | |
1861 | * Forward the read to bs->backing. Set .has_read to true. | |
1862 | * If drain_begin has woken us, wake it in turn. | |
1863 | * | |
1864 | * Otherwise: | |
1865 | * Set .has_read to true and return success. | |
1866 | */ | |
1867 | static int coroutine_fn bdrv_replace_test_co_preadv(BlockDriverState *bs, | |
1868 | uint64_t offset, | |
1869 | uint64_t bytes, | |
1870 | QEMUIOVector *qiov, | |
1871 | int flags) | |
1872 | { | |
1873 | BDRVReplaceTestState *s = bs->opaque; | |
1874 | ||
1875 | if (bs->backing) { | |
1876 | int ret; | |
1877 | ||
1878 | g_assert(!s->drain_count); | |
1879 | ||
1880 | s->io_co = qemu_coroutine_self(); | |
1881 | if (s->yield_before_read) { | |
1882 | s->yield_before_read = false; | |
1883 | qemu_coroutine_yield(); | |
1884 | } | |
1885 | s->io_co = NULL; | |
1886 | ||
1887 | ret = bdrv_preadv(bs->backing, offset, qiov); | |
1888 | s->has_read = true; | |
1889 | ||
1890 | /* Wake up drain_co if it runs */ | |
1891 | if (s->drain_co) { | |
1892 | aio_co_wake(s->drain_co); | |
1893 | } | |
1894 | ||
1895 | return ret; | |
1896 | } | |
1897 | ||
1898 | s->has_read = true; | |
1899 | return 0; | |
1900 | } | |
1901 | ||
1902 | /** | |
1903 | * If .drain_count is 0, wake up .io_co if there is one; and set | |
1904 | * .was_drained. | |
1905 | * Increment .drain_count. | |
1906 | */ | |
1907 | static void coroutine_fn bdrv_replace_test_co_drain_begin(BlockDriverState *bs) | |
1908 | { | |
1909 | BDRVReplaceTestState *s = bs->opaque; | |
1910 | ||
1911 | if (!s->drain_count) { | |
1912 | /* Keep waking io_co up until it is done */ | |
1913 | s->drain_co = qemu_coroutine_self(); | |
1914 | while (s->io_co) { | |
1915 | aio_co_wake(s->io_co); | |
1916 | s->io_co = NULL; | |
1917 | qemu_coroutine_yield(); | |
1918 | } | |
1919 | s->drain_co = NULL; | |
1920 | ||
1921 | s->was_drained = true; | |
1922 | } | |
1923 | s->drain_count++; | |
1924 | } | |
1925 | ||
1926 | /** | |
1927 | * Reduce .drain_count, set .was_undrained once it reaches 0. | |
1928 | * If .drain_count reaches 0 and the node has a backing file, issue a | |
1929 | * read request. | |
1930 | */ | |
1931 | static void coroutine_fn bdrv_replace_test_co_drain_end(BlockDriverState *bs) | |
1932 | { | |
1933 | BDRVReplaceTestState *s = bs->opaque; | |
1934 | ||
1935 | g_assert(s->drain_count > 0); | |
1936 | if (!--s->drain_count) { | |
1937 | int ret; | |
1938 | ||
1939 | s->was_undrained = true; | |
1940 | ||
1941 | if (bs->backing) { | |
1942 | char data; | |
1943 | QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1); | |
1944 | ||
1945 | /* Queue a read request post-drain */ | |
1946 | ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0); | |
1947 | g_assert(ret >= 0); | |
1948 | } | |
1949 | } | |
1950 | } | |
1951 | ||
1952 | static BlockDriver bdrv_replace_test = { | |
1953 | .format_name = "replace_test", | |
1954 | .instance_size = sizeof(BDRVReplaceTestState), | |
1955 | ||
1956 | .bdrv_close = bdrv_replace_test_close, | |
1957 | .bdrv_co_preadv = bdrv_replace_test_co_preadv, | |
1958 | ||
1959 | .bdrv_co_drain_begin = bdrv_replace_test_co_drain_begin, | |
1960 | .bdrv_co_drain_end = bdrv_replace_test_co_drain_end, | |
1961 | ||
1962 | .bdrv_child_perm = bdrv_format_default_perms, | |
1963 | }; | |
1964 | ||
1965 | static void coroutine_fn test_replace_child_mid_drain_read_co(void *opaque) | |
1966 | { | |
1967 | int ret; | |
1968 | char data; | |
1969 | ||
1970 | ret = blk_co_pread(opaque, 0, 1, &data, 0); | |
1971 | g_assert(ret >= 0); | |
1972 | } | |
1973 | ||
1974 | /** | |
1975 | * We test two things: | |
1976 | * (1) bdrv_replace_child_noperm() must not undrain the parent if both | |
1977 | * children are drained. | |
1978 | * (2) bdrv_replace_child_noperm() must never flush I/O requests to a | |
1979 | * drained child. If the old child is drained, it must flush I/O | |
1980 | * requests after the new one has been attached. If the new child | |
1981 | * is drained, it must flush I/O requests before the old one is | |
1982 | * detached. | |
1983 | * | |
1984 | * To do so, we create one parent node and two child nodes; then | |
1985 | * attach one of the children (old_child_bs) to the parent, then | |
1986 | * drain both old_child_bs and new_child_bs according to | |
1987 | * old_drain_count and new_drain_count, respectively, and finally | |
1988 | * we invoke bdrv_replace_node() to replace old_child_bs by | |
1989 | * new_child_bs. | |
1990 | * | |
1991 | * The test block driver we use here (bdrv_replace_test) has a read | |
1992 | * function that: | |
1993 | * - For the parent node, can optionally yield, and then forwards the | |
1994 | * read to bdrv_preadv(), | |
1995 | * - For the child node, just returns immediately. | |
1996 | * | |
1997 | * If the read yields, the drain_begin function will wake it up. | |
1998 | * | |
1999 | * The drain_end function issues a read on the parent once it is fully | |
2000 | * undrained (which simulates requests starting to come in again). | |
2001 | */ | |
2002 | static void do_test_replace_child_mid_drain(int old_drain_count, | |
2003 | int new_drain_count) | |
2004 | { | |
2005 | BlockBackend *parent_blk; | |
2006 | BlockDriverState *parent_bs; | |
2007 | BlockDriverState *old_child_bs, *new_child_bs; | |
2008 | BDRVReplaceTestState *parent_s; | |
2009 | BDRVReplaceTestState *old_child_s, *new_child_s; | |
2010 | Coroutine *io_co; | |
2011 | int i; | |
2012 | ||
2013 | parent_bs = bdrv_new_open_driver(&bdrv_replace_test, "parent", 0, | |
2014 | &error_abort); | |
2015 | parent_s = parent_bs->opaque; | |
2016 | ||
2017 | parent_blk = blk_new(qemu_get_aio_context(), | |
2018 | BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL); | |
2019 | blk_insert_bs(parent_blk, parent_bs, &error_abort); | |
2020 | ||
2021 | old_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "old-child", 0, | |
2022 | &error_abort); | |
2023 | new_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "new-child", 0, | |
2024 | &error_abort); | |
2025 | old_child_s = old_child_bs->opaque; | |
2026 | new_child_s = new_child_bs->opaque; | |
2027 | ||
2028 | /* So that we can read something */ | |
2029 | parent_bs->total_sectors = 1; | |
2030 | old_child_bs->total_sectors = 1; | |
2031 | new_child_bs->total_sectors = 1; | |
2032 | ||
2033 | bdrv_ref(old_child_bs); | |
2034 | parent_bs->backing = bdrv_attach_child(parent_bs, old_child_bs, "child", | |
2035 | &child_backing, &error_abort); | |
2036 | ||
2037 | for (i = 0; i < old_drain_count; i++) { | |
2038 | bdrv_drained_begin(old_child_bs); | |
2039 | } | |
2040 | for (i = 0; i < new_drain_count; i++) { | |
2041 | bdrv_drained_begin(new_child_bs); | |
2042 | } | |
2043 | ||
2044 | if (!old_drain_count) { | |
2045 | /* | |
2046 | * Start a read operation that will yield, so it will not | |
2047 | * complete before the node is drained. | |
2048 | */ | |
2049 | parent_s->yield_before_read = true; | |
2050 | io_co = qemu_coroutine_create(test_replace_child_mid_drain_read_co, | |
2051 | parent_blk); | |
2052 | qemu_coroutine_enter(io_co); | |
2053 | } | |
2054 | ||
2055 | /* If we have started a read operation, it should have yielded */ | |
2056 | g_assert(!parent_s->has_read); | |
2057 | ||
2058 | /* Reset drained status so we can see what bdrv_replace_node() does */ | |
2059 | parent_s->was_drained = false; | |
2060 | parent_s->was_undrained = false; | |
2061 | ||
2062 | g_assert(parent_bs->quiesce_counter == old_drain_count); | |
2063 | bdrv_replace_node(old_child_bs, new_child_bs, &error_abort); | |
2064 | g_assert(parent_bs->quiesce_counter == new_drain_count); | |
2065 | ||
2066 | if (!old_drain_count && !new_drain_count) { | |
2067 | /* | |
2068 | * From undrained to undrained drains and undrains the parent, | |
2069 | * because bdrv_replace_node() contains a drained section for | |
2070 | * @old_child_bs. | |
2071 | */ | |
2072 | g_assert(parent_s->was_drained && parent_s->was_undrained); | |
2073 | } else if (!old_drain_count && new_drain_count) { | |
2074 | /* | |
2075 | * From undrained to drained should drain the parent and keep | |
2076 | * it that way. | |
2077 | */ | |
2078 | g_assert(parent_s->was_drained && !parent_s->was_undrained); | |
2079 | } else if (old_drain_count && !new_drain_count) { | |
2080 | /* | |
2081 | * From drained to undrained should undrain the parent and | |
2082 | * keep it that way. | |
2083 | */ | |
2084 | g_assert(!parent_s->was_drained && parent_s->was_undrained); | |
2085 | } else /* if (old_drain_count && new_drain_count) */ { | |
2086 | /* | |
2087 | * From drained to drained must not undrain the parent at any | |
2088 | * point | |
2089 | */ | |
2090 | g_assert(!parent_s->was_drained && !parent_s->was_undrained); | |
2091 | } | |
2092 | ||
2093 | if (!old_drain_count || !new_drain_count) { | |
2094 | /* | |
2095 | * If !old_drain_count, we have started a read request before | |
2096 | * bdrv_replace_node(). If !new_drain_count, the parent must | |
2097 | * have been undrained at some point, and | |
2098 | * bdrv_replace_test_co_drain_end() starts a read request | |
2099 | * then. | |
2100 | */ | |
2101 | g_assert(parent_s->has_read); | |
2102 | } else { | |
2103 | /* | |
2104 | * If the parent was never undrained, there is no way to start | |
2105 | * a read request. | |
2106 | */ | |
2107 | g_assert(!parent_s->has_read); | |
2108 | } | |
2109 | ||
2110 | /* A drained child must have not received any request */ | |
2111 | g_assert(!(old_drain_count && old_child_s->has_read)); | |
2112 | g_assert(!(new_drain_count && new_child_s->has_read)); | |
2113 | ||
2114 | for (i = 0; i < new_drain_count; i++) { | |
2115 | bdrv_drained_end(new_child_bs); | |
2116 | } | |
2117 | for (i = 0; i < old_drain_count; i++) { | |
2118 | bdrv_drained_end(old_child_bs); | |
2119 | } | |
2120 | ||
2121 | /* | |
2122 | * By now, bdrv_replace_test_co_drain_end() must have been called | |
2123 | * at some point while the new child was attached to the parent. | |
2124 | */ | |
2125 | g_assert(parent_s->has_read); | |
2126 | g_assert(new_child_s->has_read); | |
2127 | ||
2128 | blk_unref(parent_blk); | |
2129 | bdrv_unref(parent_bs); | |
2130 | bdrv_unref(old_child_bs); | |
2131 | bdrv_unref(new_child_bs); | |
2132 | } | |
2133 | ||
2134 | static void test_replace_child_mid_drain(void) | |
2135 | { | |
2136 | int old_drain_count, new_drain_count; | |
2137 | ||
2138 | for (old_drain_count = 0; old_drain_count < 2; old_drain_count++) { | |
2139 | for (new_drain_count = 0; new_drain_count < 2; new_drain_count++) { | |
2140 | do_test_replace_child_mid_drain(old_drain_count, new_drain_count); | |
2141 | } | |
2142 | } | |
2143 | } | |
2144 | ||
881cfd17 KW |
2145 | int main(int argc, char **argv) |
2146 | { | |
bb675689 KW |
2147 | int ret; |
2148 | ||
881cfd17 KW |
2149 | bdrv_init(); |
2150 | qemu_init_main_loop(&error_abort); | |
2151 | ||
2152 | g_test_init(&argc, &argv, NULL); | |
bb675689 | 2153 | qemu_event_init(&done_event, false); |
881cfd17 KW |
2154 | |
2155 | g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all); | |
86e1c840 | 2156 | g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain); |
d2a85d0f KW |
2157 | g_test_add_func("/bdrv-drain/driver-cb/drain_subtree", |
2158 | test_drv_cb_drain_subtree); | |
881cfd17 | 2159 | |
6d0252f2 KW |
2160 | g_test_add_func("/bdrv-drain/driver-cb/co/drain_all", |
2161 | test_drv_cb_co_drain_all); | |
0582eb10 KW |
2162 | g_test_add_func("/bdrv-drain/driver-cb/co/drain", test_drv_cb_co_drain); |
2163 | g_test_add_func("/bdrv-drain/driver-cb/co/drain_subtree", | |
2164 | test_drv_cb_co_drain_subtree); | |
2165 | ||
2166 | ||
89a6ceab KW |
2167 | g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all); |
2168 | g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain); | |
d2a85d0f KW |
2169 | g_test_add_func("/bdrv-drain/quiesce/drain_subtree", |
2170 | test_quiesce_drain_subtree); | |
89a6ceab | 2171 | |
6d0252f2 KW |
2172 | g_test_add_func("/bdrv-drain/quiesce/co/drain_all", |
2173 | test_quiesce_co_drain_all); | |
0582eb10 KW |
2174 | g_test_add_func("/bdrv-drain/quiesce/co/drain", test_quiesce_co_drain); |
2175 | g_test_add_func("/bdrv-drain/quiesce/co/drain_subtree", | |
2176 | test_quiesce_co_drain_subtree); | |
2177 | ||
6c429a6a | 2178 | g_test_add_func("/bdrv-drain/nested", test_nested); |
27e64474 | 2179 | g_test_add_func("/bdrv-drain/multiparent", test_multiparent); |
19f7a7e5 KW |
2180 | |
2181 | g_test_add_func("/bdrv-drain/graph-change/drain_subtree", | |
2182 | test_graph_change_drain_subtree); | |
2183 | g_test_add_func("/bdrv-drain/graph-change/drain_all", | |
2184 | test_graph_change_drain_all); | |
6c429a6a | 2185 | |
bb675689 KW |
2186 | g_test_add_func("/bdrv-drain/iothread/drain_all", test_iothread_drain_all); |
2187 | g_test_add_func("/bdrv-drain/iothread/drain", test_iothread_drain); | |
2188 | g_test_add_func("/bdrv-drain/iothread/drain_subtree", | |
2189 | test_iothread_drain_subtree); | |
2190 | ||
7253220d KW |
2191 | g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all); |
2192 | g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain); | |
d2a85d0f KW |
2193 | g_test_add_func("/bdrv-drain/blockjob/drain_subtree", |
2194 | test_blockjob_drain_subtree); | |
7253220d | 2195 | |
d49725af KW |
2196 | g_test_add_func("/bdrv-drain/blockjob/error/drain_all", |
2197 | test_blockjob_error_drain_all); | |
2198 | g_test_add_func("/bdrv-drain/blockjob/error/drain", | |
2199 | test_blockjob_error_drain); | |
2200 | g_test_add_func("/bdrv-drain/blockjob/error/drain_subtree", | |
2201 | test_blockjob_error_drain_subtree); | |
2202 | ||
f62c1729 KW |
2203 | g_test_add_func("/bdrv-drain/blockjob/iothread/drain_all", |
2204 | test_blockjob_iothread_drain_all); | |
2205 | g_test_add_func("/bdrv-drain/blockjob/iothread/drain", | |
2206 | test_blockjob_iothread_drain); | |
2207 | g_test_add_func("/bdrv-drain/blockjob/iothread/drain_subtree", | |
2208 | test_blockjob_iothread_drain_subtree); | |
2209 | ||
d49725af KW |
2210 | g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_all", |
2211 | test_blockjob_iothread_error_drain_all); | |
2212 | g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain", | |
2213 | test_blockjob_iothread_error_drain); | |
2214 | g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_subtree", | |
2215 | test_blockjob_iothread_error_drain_subtree); | |
2216 | ||
ebd31837 | 2217 | g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain); |
19f7a7e5 | 2218 | g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all); |
ebd31837 KW |
2219 | g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain); |
2220 | g_test_add_func("/bdrv-drain/detach/drain_subtree", test_detach_by_drain_subtree); | |
231281ab | 2221 | g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb); |
57320ca9 | 2222 | g_test_add_func("/bdrv-drain/detach/driver_cb", test_detach_by_driver_cb); |
4c8158e3 | 2223 | |
b994c5bc KW |
2224 | g_test_add_func("/bdrv-drain/attach/drain", test_append_to_drained); |
2225 | ||
247d2737 KW |
2226 | g_test_add_func("/bdrv-drain/set_aio_context", test_set_aio_context); |
2227 | ||
8e442810 HR |
2228 | g_test_add_func("/bdrv-drain/blockjob/commit_by_drained_end", |
2229 | test_blockjob_commit_by_drained_end); | |
2230 | ||
9746b35c HR |
2231 | g_test_add_func("/bdrv-drain/bdrv_drop_intermediate/poll", |
2232 | test_drop_intermediate_poll); | |
2233 | ||
0513f984 HR |
2234 | g_test_add_func("/bdrv-drain/replace_child/mid-drain", |
2235 | test_replace_child_mid_drain); | |
2236 | ||
bb675689 KW |
2237 | ret = g_test_run(); |
2238 | qemu_event_destroy(&done_event); | |
2239 | return ret; | |
881cfd17 | 2240 | } |