]> Git Repo - qemu.git/blame - block/io.c
hmp: Simplify qom-set
[qemu.git] / block / io.c
CommitLineData
61007b31
SH
1/*
2 * Block layer I/O functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
80c71a24 25#include "qemu/osdep.h"
61007b31 26#include "trace.h"
7f0e9da6 27#include "sysemu/block-backend.h"
7719f3c9 28#include "block/aio-wait.h"
61007b31 29#include "block/blockjob.h"
f321dcb5 30#include "block/blockjob_int.h"
61007b31 31#include "block/block_int.h"
f348b6d1 32#include "qemu/cutils.h"
da34e65c 33#include "qapi/error.h"
d49b6836 34#include "qemu/error-report.h"
db725815 35#include "qemu/main-loop.h"
c8aa7895 36#include "sysemu/replay.h"
61007b31
SH
37
38#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
39
cb2e2878
EB
40/* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
41#define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
42
7f8f03ef 43static void bdrv_parent_cb_resize(BlockDriverState *bs);
d05aa8bb 44static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
f5a5ca79 45 int64_t offset, int bytes, BdrvRequestFlags flags);
61007b31 46
f4c8a43b
HR
47static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
48 bool ignore_bds_parents)
61007b31 49{
02d21300 50 BdrvChild *c, *next;
27ccdd52 51
02d21300 52 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
bd86fb99 53 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
0152bf40
KW
54 continue;
55 }
4be6a6d1 56 bdrv_parent_drained_begin_single(c, false);
ce0f1412
PB
57 }
58}
61007b31 59
e037c09c
HR
60static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c,
61 int *drained_end_counter)
804db8ea
HR
62{
63 assert(c->parent_quiesce_counter > 0);
64 c->parent_quiesce_counter--;
bd86fb99
HR
65 if (c->klass->drained_end) {
66 c->klass->drained_end(c, drained_end_counter);
804db8ea
HR
67 }
68}
69
e037c09c
HR
70void bdrv_parent_drained_end_single(BdrvChild *c)
71{
72 int drained_end_counter = 0;
73 bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter);
74 BDRV_POLL_WHILE(c->bs, atomic_read(&drained_end_counter) > 0);
75}
76
f4c8a43b 77static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
e037c09c
HR
78 bool ignore_bds_parents,
79 int *drained_end_counter)
ce0f1412 80{
61ad631c 81 BdrvChild *c;
27ccdd52 82
61ad631c 83 QLIST_FOREACH(c, &bs->parents, next_parent) {
bd86fb99 84 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
0152bf40
KW
85 continue;
86 }
e037c09c 87 bdrv_parent_drained_end_single_no_poll(c, drained_end_counter);
27ccdd52 88 }
61007b31
SH
89}
90
4be6a6d1
KW
91static bool bdrv_parent_drained_poll_single(BdrvChild *c)
92{
bd86fb99
HR
93 if (c->klass->drained_poll) {
94 return c->klass->drained_poll(c);
4be6a6d1
KW
95 }
96 return false;
97}
98
6cd5c9d7
KW
99static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
100 bool ignore_bds_parents)
89bd0305
KW
101{
102 BdrvChild *c, *next;
103 bool busy = false;
104
105 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
bd86fb99 106 if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
89bd0305
KW
107 continue;
108 }
4be6a6d1 109 busy |= bdrv_parent_drained_poll_single(c);
89bd0305
KW
110 }
111
112 return busy;
113}
114
4be6a6d1
KW
115void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
116{
804db8ea 117 c->parent_quiesce_counter++;
bd86fb99
HR
118 if (c->klass->drained_begin) {
119 c->klass->drained_begin(c);
4be6a6d1
KW
120 }
121 if (poll) {
122 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c));
123 }
124}
125
d9e0dfa2
EB
126static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
127{
128 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
129 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
130 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
131 src->opt_mem_alignment);
132 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
133 src->min_mem_alignment);
134 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
135}
136
61007b31
SH
137void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
138{
139 BlockDriver *drv = bs->drv;
140 Error *local_err = NULL;
141
142 memset(&bs->bl, 0, sizeof(bs->bl));
143
144 if (!drv) {
145 return;
146 }
147
79ba8c98 148 /* Default alignment based on whether driver has byte interface */
e31f6864 149 bs->bl.request_alignment = (drv->bdrv_co_preadv ||
ac850bf0
VSO
150 drv->bdrv_aio_preadv ||
151 drv->bdrv_co_preadv_part) ? 1 : 512;
79ba8c98 152
61007b31
SH
153 /* Take some limits from the children as a default */
154 if (bs->file) {
9a4f4c31 155 bdrv_refresh_limits(bs->file->bs, &local_err);
61007b31
SH
156 if (local_err) {
157 error_propagate(errp, local_err);
158 return;
159 }
d9e0dfa2 160 bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
61007b31 161 } else {
4196d2f0 162 bs->bl.min_mem_alignment = 512;
038adc2f 163 bs->bl.opt_mem_alignment = qemu_real_host_page_size;
bd44feb7
SH
164
165 /* Safe default since most protocols use readv()/writev()/etc */
166 bs->bl.max_iov = IOV_MAX;
61007b31
SH
167 }
168
760e0063
KW
169 if (bs->backing) {
170 bdrv_refresh_limits(bs->backing->bs, &local_err);
61007b31
SH
171 if (local_err) {
172 error_propagate(errp, local_err);
173 return;
174 }
d9e0dfa2 175 bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
61007b31
SH
176 }
177
178 /* Then let the driver override it */
179 if (drv->bdrv_refresh_limits) {
180 drv->bdrv_refresh_limits(bs, errp);
181 }
182}
183
184/**
185 * The copy-on-read flag is actually a reference count so multiple users may
186 * use the feature without worrying about clobbering its previous state.
187 * Copy-on-read stays enabled until all users have called to disable it.
188 */
189void bdrv_enable_copy_on_read(BlockDriverState *bs)
190{
d3faa13e 191 atomic_inc(&bs->copy_on_read);
61007b31
SH
192}
193
194void bdrv_disable_copy_on_read(BlockDriverState *bs)
195{
d3faa13e
PB
196 int old = atomic_fetch_dec(&bs->copy_on_read);
197 assert(old >= 1);
61007b31
SH
198}
199
61124f03
PB
200typedef struct {
201 Coroutine *co;
202 BlockDriverState *bs;
203 bool done;
481cad48 204 bool begin;
b0165585 205 bool recursive;
fe4f0614 206 bool poll;
0152bf40 207 BdrvChild *parent;
6cd5c9d7 208 bool ignore_bds_parents;
8e1da77e 209 int *drained_end_counter;
61124f03
PB
210} BdrvCoDrainData;
211
212static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
213{
214 BdrvCoDrainData *data = opaque;
215 BlockDriverState *bs = data->bs;
216
481cad48 217 if (data->begin) {
f8ea8dac 218 bs->drv->bdrv_co_drain_begin(bs);
481cad48
MP
219 } else {
220 bs->drv->bdrv_co_drain_end(bs);
221 }
61124f03 222
65181d63 223 /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */
61124f03 224 atomic_mb_set(&data->done, true);
e037c09c 225 if (!data->begin) {
8e1da77e
HR
226 atomic_dec(data->drained_end_counter);
227 }
65181d63 228 bdrv_dec_in_flight(bs);
8e1da77e 229
e037c09c 230 g_free(data);
61124f03
PB
231}
232
db0289b9 233/* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
8e1da77e
HR
234static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
235 int *drained_end_counter)
61124f03 236{
0109e7e6 237 BdrvCoDrainData *data;
61124f03 238
f8ea8dac 239 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
481cad48 240 (!begin && !bs->drv->bdrv_co_drain_end)) {
61124f03
PB
241 return;
242 }
243
0109e7e6
KW
244 data = g_new(BdrvCoDrainData, 1);
245 *data = (BdrvCoDrainData) {
246 .bs = bs,
247 .done = false,
8e1da77e
HR
248 .begin = begin,
249 .drained_end_counter = drained_end_counter,
0109e7e6
KW
250 };
251
e037c09c 252 if (!begin) {
8e1da77e
HR
253 atomic_inc(drained_end_counter);
254 }
255
0109e7e6
KW
256 /* Make sure the driver callback completes during the polling phase for
257 * drain_begin. */
258 bdrv_inc_in_flight(bs);
259 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
260 aio_co_schedule(bdrv_get_aio_context(bs), data->co);
61124f03
PB
261}
262
1cc8e54a 263/* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
fe4f0614 264bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
6cd5c9d7 265 BdrvChild *ignore_parent, bool ignore_bds_parents)
89bd0305 266{
fe4f0614
KW
267 BdrvChild *child, *next;
268
6cd5c9d7 269 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
89bd0305
KW
270 return true;
271 }
272
fe4f0614
KW
273 if (atomic_read(&bs->in_flight)) {
274 return true;
275 }
276
277 if (recursive) {
6cd5c9d7 278 assert(!ignore_bds_parents);
fe4f0614 279 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
6cd5c9d7 280 if (bdrv_drain_poll(child->bs, recursive, child, false)) {
fe4f0614
KW
281 return true;
282 }
283 }
284 }
285
286 return false;
89bd0305
KW
287}
288
fe4f0614 289static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
89bd0305 290 BdrvChild *ignore_parent)
1cc8e54a 291{
6cd5c9d7 292 return bdrv_drain_poll(bs, recursive, ignore_parent, false);
1cc8e54a
KW
293}
294
b0165585 295static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
6cd5c9d7
KW
296 BdrvChild *parent, bool ignore_bds_parents,
297 bool poll);
b0165585 298static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
8e1da77e
HR
299 BdrvChild *parent, bool ignore_bds_parents,
300 int *drained_end_counter);
0152bf40 301
a77fd4bb
FZ
302static void bdrv_co_drain_bh_cb(void *opaque)
303{
304 BdrvCoDrainData *data = opaque;
305 Coroutine *co = data->co;
99723548 306 BlockDriverState *bs = data->bs;
a77fd4bb 307
c8ca33d0 308 if (bs) {
aa1361d5
KW
309 AioContext *ctx = bdrv_get_aio_context(bs);
310 AioContext *co_ctx = qemu_coroutine_get_aio_context(co);
311
312 /*
313 * When the coroutine yielded, the lock for its home context was
314 * released, so we need to re-acquire it here. If it explicitly
315 * acquired a different context, the lock is still held and we don't
316 * want to lock it a second time (or AIO_WAIT_WHILE() would hang).
317 */
318 if (ctx == co_ctx) {
319 aio_context_acquire(ctx);
320 }
c8ca33d0
KW
321 bdrv_dec_in_flight(bs);
322 if (data->begin) {
e037c09c 323 assert(!data->drained_end_counter);
6cd5c9d7
KW
324 bdrv_do_drained_begin(bs, data->recursive, data->parent,
325 data->ignore_bds_parents, data->poll);
c8ca33d0 326 } else {
e037c09c 327 assert(!data->poll);
6cd5c9d7 328 bdrv_do_drained_end(bs, data->recursive, data->parent,
8e1da77e
HR
329 data->ignore_bds_parents,
330 data->drained_end_counter);
c8ca33d0 331 }
aa1361d5
KW
332 if (ctx == co_ctx) {
333 aio_context_release(ctx);
334 }
481cad48 335 } else {
c8ca33d0
KW
336 assert(data->begin);
337 bdrv_drain_all_begin();
481cad48
MP
338 }
339
a77fd4bb 340 data->done = true;
1919631e 341 aio_co_wake(co);
a77fd4bb
FZ
342}
343
481cad48 344static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
b0165585 345 bool begin, bool recursive,
6cd5c9d7
KW
346 BdrvChild *parent,
347 bool ignore_bds_parents,
8e1da77e
HR
348 bool poll,
349 int *drained_end_counter)
a77fd4bb
FZ
350{
351 BdrvCoDrainData data;
352
353 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
c40a2545 354 * other coroutines run if they were queued by aio_co_enter(). */
a77fd4bb
FZ
355
356 assert(qemu_in_coroutine());
357 data = (BdrvCoDrainData) {
358 .co = qemu_coroutine_self(),
359 .bs = bs,
360 .done = false,
481cad48 361 .begin = begin,
b0165585 362 .recursive = recursive,
0152bf40 363 .parent = parent,
6cd5c9d7 364 .ignore_bds_parents = ignore_bds_parents,
fe4f0614 365 .poll = poll,
8e1da77e 366 .drained_end_counter = drained_end_counter,
a77fd4bb 367 };
8e1da77e 368
c8ca33d0
KW
369 if (bs) {
370 bdrv_inc_in_flight(bs);
371 }
e4ec5ad4
PD
372 replay_bh_schedule_oneshot_event(bdrv_get_aio_context(bs),
373 bdrv_co_drain_bh_cb, &data);
a77fd4bb
FZ
374
375 qemu_coroutine_yield();
376 /* If we are resumed from some other event (such as an aio completion or a
377 * timer callback), it is a bug in the caller that should be fixed. */
378 assert(data.done);
379}
380
dcf94a23 381void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
6cd5c9d7 382 BdrvChild *parent, bool ignore_bds_parents)
6820643f 383{
dcf94a23 384 assert(!qemu_in_coroutine());
d42cf288 385
60369b86 386 /* Stop things in parent-to-child order */
414c2ec3 387 if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
6820643f 388 aio_disable_external(bdrv_get_aio_context(bs));
6820643f
KW
389 }
390
6cd5c9d7 391 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
8e1da77e 392 bdrv_drain_invoke(bs, true, NULL);
dcf94a23
KW
393}
394
395static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
6cd5c9d7
KW
396 BdrvChild *parent, bool ignore_bds_parents,
397 bool poll)
dcf94a23
KW
398{
399 BdrvChild *child, *next;
400
401 if (qemu_in_coroutine()) {
6cd5c9d7 402 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
8e1da77e 403 poll, NULL);
dcf94a23
KW
404 return;
405 }
406
6cd5c9d7 407 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents);
d30b8e64 408
b0165585 409 if (recursive) {
6cd5c9d7 410 assert(!ignore_bds_parents);
d736f119 411 bs->recursive_quiesce_counter++;
b0165585 412 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
6cd5c9d7
KW
413 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents,
414 false);
b0165585
KW
415 }
416 }
fe4f0614
KW
417
418 /*
419 * Wait for drained requests to finish.
420 *
421 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
422 * call is needed so things in this AioContext can make progress even
423 * though we don't return to the main AioContext loop - this automatically
424 * includes other nodes in the same AioContext and therefore all child
425 * nodes.
426 */
427 if (poll) {
6cd5c9d7 428 assert(!ignore_bds_parents);
fe4f0614
KW
429 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent));
430 }
6820643f
KW
431}
432
0152bf40
KW
433void bdrv_drained_begin(BlockDriverState *bs)
434{
6cd5c9d7 435 bdrv_do_drained_begin(bs, false, NULL, false, true);
b0165585
KW
436}
437
438void bdrv_subtree_drained_begin(BlockDriverState *bs)
439{
6cd5c9d7 440 bdrv_do_drained_begin(bs, true, NULL, false, true);
0152bf40
KW
441}
442
e037c09c
HR
443/**
444 * This function does not poll, nor must any of its recursively called
445 * functions. The *drained_end_counter pointee will be incremented
446 * once for every background operation scheduled, and decremented once
447 * the operation settles. Therefore, the pointer must remain valid
448 * until the pointee reaches 0. That implies that whoever sets up the
449 * pointee has to poll until it is 0.
450 *
451 * We use atomic operations to access *drained_end_counter, because
452 * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of
453 * @bs may contain nodes in different AioContexts,
454 * (2) bdrv_drain_all_end() uses the same counter for all nodes,
455 * regardless of which AioContext they are in.
456 */
6cd5c9d7 457static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
8e1da77e
HR
458 BdrvChild *parent, bool ignore_bds_parents,
459 int *drained_end_counter)
6820643f 460{
61ad631c 461 BdrvChild *child;
0f115168
KW
462 int old_quiesce_counter;
463
e037c09c
HR
464 assert(drained_end_counter != NULL);
465
481cad48 466 if (qemu_in_coroutine()) {
6cd5c9d7 467 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
8e1da77e 468 false, drained_end_counter);
481cad48
MP
469 return;
470 }
6820643f 471 assert(bs->quiesce_counter > 0);
6820643f 472
60369b86 473 /* Re-enable things in child-to-parent order */
8e1da77e 474 bdrv_drain_invoke(bs, false, drained_end_counter);
e037c09c
HR
475 bdrv_parent_drained_end(bs, parent, ignore_bds_parents,
476 drained_end_counter);
5cb2737e
HR
477
478 old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter);
0f115168 479 if (old_quiesce_counter == 1) {
0f115168
KW
480 aio_enable_external(bdrv_get_aio_context(bs));
481 }
b0165585
KW
482
483 if (recursive) {
6cd5c9d7 484 assert(!ignore_bds_parents);
d736f119 485 bs->recursive_quiesce_counter--;
61ad631c 486 QLIST_FOREACH(child, &bs->children, next) {
8e1da77e
HR
487 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents,
488 drained_end_counter);
b0165585
KW
489 }
490 }
6820643f
KW
491}
492
0152bf40
KW
493void bdrv_drained_end(BlockDriverState *bs)
494{
e037c09c
HR
495 int drained_end_counter = 0;
496 bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter);
497 BDRV_POLL_WHILE(bs, atomic_read(&drained_end_counter) > 0);
498}
499
500void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter)
501{
502 bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter);
b0165585
KW
503}
504
505void bdrv_subtree_drained_end(BlockDriverState *bs)
506{
e037c09c
HR
507 int drained_end_counter = 0;
508 bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
509 BDRV_POLL_WHILE(bs, atomic_read(&drained_end_counter) > 0);
0152bf40
KW
510}
511
d736f119
KW
512void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
513{
514 int i;
515
516 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
6cd5c9d7 517 bdrv_do_drained_begin(child->bs, true, child, false, true);
d736f119
KW
518 }
519}
520
521void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
522{
e037c09c 523 int drained_end_counter = 0;
d736f119
KW
524 int i;
525
526 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
e037c09c
HR
527 bdrv_do_drained_end(child->bs, true, child, false,
528 &drained_end_counter);
d736f119 529 }
e037c09c
HR
530
531 BDRV_POLL_WHILE(child->bs, atomic_read(&drained_end_counter) > 0);
d736f119
KW
532}
533
61007b31 534/*
67da1dc5
FZ
535 * Wait for pending requests to complete on a single BlockDriverState subtree,
536 * and suspend block driver's internal I/O until next request arrives.
61007b31 537 *
61007b31
SH
538 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
539 * AioContext.
540 */
b6e84c97 541void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
61007b31 542{
6820643f
KW
543 assert(qemu_in_coroutine());
544 bdrv_drained_begin(bs);
545 bdrv_drained_end(bs);
b6e84c97 546}
f406c03c 547
b6e84c97
PB
548void bdrv_drain(BlockDriverState *bs)
549{
6820643f
KW
550 bdrv_drained_begin(bs);
551 bdrv_drained_end(bs);
61007b31
SH
552}
553
c13ad59f
KW
554static void bdrv_drain_assert_idle(BlockDriverState *bs)
555{
556 BdrvChild *child, *next;
557
558 assert(atomic_read(&bs->in_flight) == 0);
559 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
560 bdrv_drain_assert_idle(child->bs);
561 }
562}
563
0f12264e
KW
564unsigned int bdrv_drain_all_count = 0;
565
566static bool bdrv_drain_all_poll(void)
567{
568 BlockDriverState *bs = NULL;
569 bool result = false;
570
0f12264e
KW
571 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
572 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
573 while ((bs = bdrv_next_all_states(bs))) {
574 AioContext *aio_context = bdrv_get_aio_context(bs);
575 aio_context_acquire(aio_context);
576 result |= bdrv_drain_poll(bs, false, NULL, true);
577 aio_context_release(aio_context);
578 }
579
580 return result;
581}
582
61007b31
SH
583/*
584 * Wait for pending requests to complete across all BlockDriverStates
585 *
586 * This function does not flush data to disk, use bdrv_flush_all() for that
587 * after calling this function.
c0778f66
AG
588 *
589 * This pauses all block jobs and disables external clients. It must
590 * be paired with bdrv_drain_all_end().
591 *
592 * NOTE: no new block jobs or BlockDriverStates can be created between
593 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
61007b31 594 */
c0778f66 595void bdrv_drain_all_begin(void)
61007b31 596{
0f12264e 597 BlockDriverState *bs = NULL;
61007b31 598
c8ca33d0 599 if (qemu_in_coroutine()) {
8e1da77e 600 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
c8ca33d0
KW
601 return;
602 }
603
c8aa7895
PD
604 /*
605 * bdrv queue is managed by record/replay,
606 * waiting for finishing the I/O requests may
607 * be infinite
608 */
609 if (replay_events_enabled()) {
610 return;
611 }
612
0f12264e
KW
613 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
614 * loop AioContext, so make sure we're in the main context. */
9a7e86c8 615 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
0f12264e
KW
616 assert(bdrv_drain_all_count < INT_MAX);
617 bdrv_drain_all_count++;
9a7e86c8 618
0f12264e
KW
619 /* Quiesce all nodes, without polling in-flight requests yet. The graph
620 * cannot change during this loop. */
621 while ((bs = bdrv_next_all_states(bs))) {
61007b31
SH
622 AioContext *aio_context = bdrv_get_aio_context(bs);
623
624 aio_context_acquire(aio_context);
0f12264e 625 bdrv_do_drained_begin(bs, false, NULL, true, false);
61007b31
SH
626 aio_context_release(aio_context);
627 }
628
0f12264e 629 /* Now poll the in-flight requests */
cfe29d82 630 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
0f12264e
KW
631
632 while ((bs = bdrv_next_all_states(bs))) {
c13ad59f 633 bdrv_drain_assert_idle(bs);
61007b31 634 }
c0778f66
AG
635}
636
637void bdrv_drain_all_end(void)
638{
0f12264e 639 BlockDriverState *bs = NULL;
e037c09c 640 int drained_end_counter = 0;
c0778f66 641
c8aa7895
PD
642 /*
643 * bdrv queue is managed by record/replay,
644 * waiting for finishing the I/O requests may
645 * be endless
646 */
647 if (replay_events_enabled()) {
648 return;
649 }
650
0f12264e 651 while ((bs = bdrv_next_all_states(bs))) {
61007b31
SH
652 AioContext *aio_context = bdrv_get_aio_context(bs);
653
654 aio_context_acquire(aio_context);
e037c09c 655 bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
61007b31
SH
656 aio_context_release(aio_context);
657 }
0f12264e 658
e037c09c
HR
659 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
660 AIO_WAIT_WHILE(NULL, atomic_read(&drained_end_counter) > 0);
661
0f12264e
KW
662 assert(bdrv_drain_all_count > 0);
663 bdrv_drain_all_count--;
61007b31
SH
664}
665
c0778f66
AG
666void bdrv_drain_all(void)
667{
668 bdrv_drain_all_begin();
669 bdrv_drain_all_end();
670}
671
61007b31
SH
672/**
673 * Remove an active request from the tracked requests list
674 *
675 * This function should be called when a tracked request is completing.
676 */
677static void tracked_request_end(BdrvTrackedRequest *req)
678{
679 if (req->serialising) {
20fc71b2 680 atomic_dec(&req->bs->serialising_in_flight);
61007b31
SH
681 }
682
3783fa3d 683 qemu_co_mutex_lock(&req->bs->reqs_lock);
61007b31
SH
684 QLIST_REMOVE(req, list);
685 qemu_co_queue_restart_all(&req->wait_queue);
3783fa3d 686 qemu_co_mutex_unlock(&req->bs->reqs_lock);
61007b31
SH
687}
688
689/**
690 * Add an active request to the tracked requests list
691 */
692static void tracked_request_begin(BdrvTrackedRequest *req,
693 BlockDriverState *bs,
694 int64_t offset,
22931a15 695 uint64_t bytes,
ebde595c 696 enum BdrvTrackedRequestType type)
61007b31 697{
22931a15
FZ
698 assert(bytes <= INT64_MAX && offset <= INT64_MAX - bytes);
699
61007b31
SH
700 *req = (BdrvTrackedRequest){
701 .bs = bs,
702 .offset = offset,
703 .bytes = bytes,
ebde595c 704 .type = type,
61007b31
SH
705 .co = qemu_coroutine_self(),
706 .serialising = false,
707 .overlap_offset = offset,
708 .overlap_bytes = bytes,
709 };
710
711 qemu_co_queue_init(&req->wait_queue);
712
3783fa3d 713 qemu_co_mutex_lock(&bs->reqs_lock);
61007b31 714 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
3783fa3d 715 qemu_co_mutex_unlock(&bs->reqs_lock);
61007b31
SH
716}
717
3ba0e1a0
PB
718static bool tracked_request_overlaps(BdrvTrackedRequest *req,
719 int64_t offset, uint64_t bytes)
720{
721 /* aaaa bbbb */
722 if (offset >= req->overlap_offset + req->overlap_bytes) {
723 return false;
724 }
725 /* bbbb aaaa */
726 if (req->overlap_offset >= offset + bytes) {
727 return false;
728 }
729 return true;
730}
731
732static bool coroutine_fn
733bdrv_wait_serialising_requests_locked(BlockDriverState *bs,
734 BdrvTrackedRequest *self)
735{
736 BdrvTrackedRequest *req;
737 bool retry;
738 bool waited = false;
739
740 do {
741 retry = false;
742 QLIST_FOREACH(req, &bs->tracked_requests, list) {
743 if (req == self || (!req->serialising && !self->serialising)) {
744 continue;
745 }
746 if (tracked_request_overlaps(req, self->overlap_offset,
747 self->overlap_bytes))
748 {
749 /* Hitting this means there was a reentrant request, for
750 * example, a block driver issuing nested requests. This must
751 * never happen since it means deadlock.
752 */
753 assert(qemu_coroutine_self() != req->co);
754
755 /* If the request is already (indirectly) waiting for us, or
756 * will wait for us as soon as it wakes up, then just go on
757 * (instead of producing a deadlock in the former case). */
758 if (!req->waiting_for) {
759 self->waiting_for = req;
760 qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
761 self->waiting_for = NULL;
762 retry = true;
763 waited = true;
764 break;
765 }
766 }
767 }
768 } while (retry);
769 return waited;
770}
771
18fbd0de 772bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
61007b31 773{
3ba0e1a0 774 BlockDriverState *bs = req->bs;
61007b31 775 int64_t overlap_offset = req->offset & ~(align - 1);
22931a15 776 uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
61007b31 777 - overlap_offset;
3ba0e1a0 778 bool waited;
61007b31 779
3ba0e1a0 780 qemu_co_mutex_lock(&bs->reqs_lock);
61007b31 781 if (!req->serialising) {
20fc71b2 782 atomic_inc(&req->bs->serialising_in_flight);
61007b31
SH
783 req->serialising = true;
784 }
785
786 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
787 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
3ba0e1a0
PB
788 waited = bdrv_wait_serialising_requests_locked(bs, req);
789 qemu_co_mutex_unlock(&bs->reqs_lock);
790 return waited;
09d2f948
VSO
791}
792
c28107e9
HR
793/**
794 * Return the tracked request on @bs for the current coroutine, or
795 * NULL if there is none.
796 */
797BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
798{
799 BdrvTrackedRequest *req;
800 Coroutine *self = qemu_coroutine_self();
801
802 QLIST_FOREACH(req, &bs->tracked_requests, list) {
803 if (req->co == self) {
804 return req;
805 }
806 }
807
808 return NULL;
809}
810
244483e6
KW
811/**
812 * Round a region to cluster boundaries
813 */
814void bdrv_round_to_clusters(BlockDriverState *bs,
7cfd5275 815 int64_t offset, int64_t bytes,
244483e6 816 int64_t *cluster_offset,
7cfd5275 817 int64_t *cluster_bytes)
244483e6
KW
818{
819 BlockDriverInfo bdi;
820
821 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
822 *cluster_offset = offset;
823 *cluster_bytes = bytes;
824 } else {
825 int64_t c = bdi.cluster_size;
826 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
827 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
828 }
829}
830
61007b31
SH
831static int bdrv_get_cluster_size(BlockDriverState *bs)
832{
833 BlockDriverInfo bdi;
834 int ret;
835
836 ret = bdrv_get_info(bs, &bdi);
837 if (ret < 0 || bdi.cluster_size == 0) {
a5b8dd2c 838 return bs->bl.request_alignment;
61007b31
SH
839 } else {
840 return bdi.cluster_size;
841 }
842}
843
99723548
PB
844void bdrv_inc_in_flight(BlockDriverState *bs)
845{
846 atomic_inc(&bs->in_flight);
847}
848
c9d1a561
PB
849void bdrv_wakeup(BlockDriverState *bs)
850{
cfe29d82 851 aio_wait_kick();
c9d1a561
PB
852}
853
99723548
PB
854void bdrv_dec_in_flight(BlockDriverState *bs)
855{
856 atomic_dec(&bs->in_flight);
c9d1a561 857 bdrv_wakeup(bs);
99723548
PB
858}
859
18fbd0de 860static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
61007b31
SH
861{
862 BlockDriverState *bs = self->bs;
61007b31
SH
863 bool waited = false;
864
20fc71b2 865 if (!atomic_read(&bs->serialising_in_flight)) {
61007b31
SH
866 return false;
867 }
868
3ba0e1a0
PB
869 qemu_co_mutex_lock(&bs->reqs_lock);
870 waited = bdrv_wait_serialising_requests_locked(bs, self);
871 qemu_co_mutex_unlock(&bs->reqs_lock);
61007b31
SH
872
873 return waited;
874}
875
876static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
877 size_t size)
878{
41ae31e3 879 if (size > BDRV_REQUEST_MAX_BYTES) {
61007b31
SH
880 return -EIO;
881 }
882
883 if (!bdrv_is_inserted(bs)) {
884 return -ENOMEDIUM;
885 }
886
887 if (offset < 0) {
888 return -EIO;
889 }
890
891 return 0;
892}
893
61007b31 894typedef struct RwCo {
e293b7a3 895 BdrvChild *child;
61007b31
SH
896 int64_t offset;
897 QEMUIOVector *qiov;
898 bool is_write;
899 int ret;
900 BdrvRequestFlags flags;
901} RwCo;
902
903static void coroutine_fn bdrv_rw_co_entry(void *opaque)
904{
905 RwCo *rwco = opaque;
906
907 if (!rwco->is_write) {
a03ef88f 908 rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
cab3a356
KW
909 rwco->qiov->size, rwco->qiov,
910 rwco->flags);
61007b31 911 } else {
a03ef88f 912 rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
cab3a356
KW
913 rwco->qiov->size, rwco->qiov,
914 rwco->flags);
61007b31 915 }
4720cbee 916 aio_wait_kick();
61007b31
SH
917}
918
919/*
920 * Process a vectored synchronous request using coroutines
921 */
e293b7a3 922static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
61007b31
SH
923 QEMUIOVector *qiov, bool is_write,
924 BdrvRequestFlags flags)
925{
926 Coroutine *co;
927 RwCo rwco = {
e293b7a3 928 .child = child,
61007b31
SH
929 .offset = offset,
930 .qiov = qiov,
931 .is_write = is_write,
932 .ret = NOT_DONE,
933 .flags = flags,
934 };
935
61007b31
SH
936 if (qemu_in_coroutine()) {
937 /* Fast-path if already in coroutine context */
938 bdrv_rw_co_entry(&rwco);
939 } else {
0b8b8753 940 co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
e92f0e19 941 bdrv_coroutine_enter(child->bs, co);
88b062c2 942 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
61007b31
SH
943 }
944 return rwco.ret;
945}
946
720ff280 947int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
f5a5ca79 948 int bytes, BdrvRequestFlags flags)
61007b31 949{
0d93ed08 950 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
74021bc4 951
e293b7a3 952 return bdrv_prwv_co(child, offset, &qiov, true,
74021bc4 953 BDRV_REQ_ZERO_WRITE | flags);
61007b31
SH
954}
955
956/*
74021bc4 957 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
61007b31
SH
958 * The operation is sped up by checking the block status and only writing
959 * zeroes to the device if they currently do not return zeroes. Optional
74021bc4 960 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
465fe887 961 * BDRV_REQ_FUA).
61007b31 962 *
f4649069 963 * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
61007b31 964 */
720ff280 965int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
61007b31 966{
237d78f8
EB
967 int ret;
968 int64_t target_size, bytes, offset = 0;
720ff280 969 BlockDriverState *bs = child->bs;
61007b31 970
7286d610
EB
971 target_size = bdrv_getlength(bs);
972 if (target_size < 0) {
973 return target_size;
61007b31
SH
974 }
975
976 for (;;) {
7286d610
EB
977 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
978 if (bytes <= 0) {
61007b31
SH
979 return 0;
980 }
237d78f8 981 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
61007b31 982 if (ret < 0) {
61007b31
SH
983 return ret;
984 }
985 if (ret & BDRV_BLOCK_ZERO) {
237d78f8 986 offset += bytes;
61007b31
SH
987 continue;
988 }
237d78f8 989 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
61007b31 990 if (ret < 0) {
61007b31
SH
991 return ret;
992 }
237d78f8 993 offset += bytes;
61007b31
SH
994 }
995}
996
f4649069 997/* return < 0 if error. See bdrv_pwrite() for the return codes */
cf2ab8fc 998int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
f1e84741
KW
999{
1000 int ret;
1001
e293b7a3 1002 ret = bdrv_prwv_co(child, offset, qiov, false, 0);
f1e84741
KW
1003 if (ret < 0) {
1004 return ret;
1005 }
1006
1007 return qiov->size;
1008}
1009
2e11d756 1010/* See bdrv_pwrite() for the return codes */
cf2ab8fc 1011int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
61007b31 1012{
0d93ed08 1013 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
61007b31
SH
1014
1015 if (bytes < 0) {
1016 return -EINVAL;
1017 }
1018
cf2ab8fc 1019 return bdrv_preadv(child, offset, &qiov);
61007b31
SH
1020}
1021
d9ca2ea2 1022int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
61007b31
SH
1023{
1024 int ret;
1025
e293b7a3 1026 ret = bdrv_prwv_co(child, offset, qiov, true, 0);
61007b31
SH
1027 if (ret < 0) {
1028 return ret;
1029 }
1030
1031 return qiov->size;
1032}
1033
2e11d756
AG
1034/* Return no. of bytes on success or < 0 on error. Important errors are:
1035 -EIO generic I/O error (may happen for all errors)
1036 -ENOMEDIUM No media inserted.
1037 -EINVAL Invalid offset or number of bytes
1038 -EACCES Trying to write a read-only device
1039*/
d9ca2ea2 1040int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
61007b31 1041{
0d93ed08 1042 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
61007b31
SH
1043
1044 if (bytes < 0) {
1045 return -EINVAL;
1046 }
1047
d9ca2ea2 1048 return bdrv_pwritev(child, offset, &qiov);
61007b31
SH
1049}
1050
1051/*
1052 * Writes to the file and ensures that no writes are reordered across this
1053 * request (acts as a barrier)
1054 *
1055 * Returns 0 on success, -errno in error cases.
1056 */
d9ca2ea2
KW
1057int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
1058 const void *buf, int count)
61007b31
SH
1059{
1060 int ret;
1061
d9ca2ea2 1062 ret = bdrv_pwrite(child, offset, buf, count);
61007b31
SH
1063 if (ret < 0) {
1064 return ret;
1065 }
1066
d9ca2ea2 1067 ret = bdrv_flush(child->bs);
855a6a93
KW
1068 if (ret < 0) {
1069 return ret;
61007b31
SH
1070 }
1071
1072 return 0;
1073}
1074
08844473
KW
1075typedef struct CoroutineIOCompletion {
1076 Coroutine *coroutine;
1077 int ret;
1078} CoroutineIOCompletion;
1079
1080static void bdrv_co_io_em_complete(void *opaque, int ret)
1081{
1082 CoroutineIOCompletion *co = opaque;
1083
1084 co->ret = ret;
b9e413dd 1085 aio_co_wake(co->coroutine);
08844473
KW
1086}
1087
166fe960
KW
1088static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
1089 uint64_t offset, uint64_t bytes,
ac850bf0
VSO
1090 QEMUIOVector *qiov,
1091 size_t qiov_offset, int flags)
166fe960
KW
1092{
1093 BlockDriver *drv = bs->drv;
3fb06697
KW
1094 int64_t sector_num;
1095 unsigned int nb_sectors;
ac850bf0
VSO
1096 QEMUIOVector local_qiov;
1097 int ret;
3fb06697 1098
fa166538 1099 assert(!(flags & ~BDRV_REQ_MASK));
fe0480d6 1100 assert(!(flags & BDRV_REQ_NO_FALLBACK));
fa166538 1101
d470ad42
HR
1102 if (!drv) {
1103 return -ENOMEDIUM;
1104 }
1105
ac850bf0
VSO
1106 if (drv->bdrv_co_preadv_part) {
1107 return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
1108 flags);
1109 }
1110
1111 if (qiov_offset > 0 || bytes != qiov->size) {
1112 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1113 qiov = &local_qiov;
1114 }
1115
3fb06697 1116 if (drv->bdrv_co_preadv) {
ac850bf0
VSO
1117 ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1118 goto out;
3fb06697
KW
1119 }
1120
edfab6a0 1121 if (drv->bdrv_aio_preadv) {
08844473
KW
1122 BlockAIOCB *acb;
1123 CoroutineIOCompletion co = {
1124 .coroutine = qemu_coroutine_self(),
1125 };
1126
edfab6a0
EB
1127 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1128 bdrv_co_io_em_complete, &co);
08844473 1129 if (acb == NULL) {
ac850bf0
VSO
1130 ret = -EIO;
1131 goto out;
08844473
KW
1132 } else {
1133 qemu_coroutine_yield();
ac850bf0
VSO
1134 ret = co.ret;
1135 goto out;
08844473
KW
1136 }
1137 }
edfab6a0
EB
1138
1139 sector_num = offset >> BDRV_SECTOR_BITS;
1140 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1141
1bbbf32d
NS
1142 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1143 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
41ae31e3 1144 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
edfab6a0
EB
1145 assert(drv->bdrv_co_readv);
1146
ac850bf0
VSO
1147 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1148
1149out:
1150 if (qiov == &local_qiov) {
1151 qemu_iovec_destroy(&local_qiov);
1152 }
1153
1154 return ret;
166fe960
KW
1155}
1156
78a07294
KW
1157static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
1158 uint64_t offset, uint64_t bytes,
ac850bf0
VSO
1159 QEMUIOVector *qiov,
1160 size_t qiov_offset, int flags)
78a07294
KW
1161{
1162 BlockDriver *drv = bs->drv;
3fb06697
KW
1163 int64_t sector_num;
1164 unsigned int nb_sectors;
ac850bf0 1165 QEMUIOVector local_qiov;
78a07294
KW
1166 int ret;
1167
fa166538 1168 assert(!(flags & ~BDRV_REQ_MASK));
fe0480d6 1169 assert(!(flags & BDRV_REQ_NO_FALLBACK));
fa166538 1170
d470ad42
HR
1171 if (!drv) {
1172 return -ENOMEDIUM;
1173 }
1174
ac850bf0
VSO
1175 if (drv->bdrv_co_pwritev_part) {
1176 ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1177 flags & bs->supported_write_flags);
1178 flags &= ~bs->supported_write_flags;
1179 goto emulate_flags;
1180 }
1181
1182 if (qiov_offset > 0 || bytes != qiov->size) {
1183 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1184 qiov = &local_qiov;
1185 }
1186
3fb06697 1187 if (drv->bdrv_co_pwritev) {
515c2f43
KW
1188 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
1189 flags & bs->supported_write_flags);
1190 flags &= ~bs->supported_write_flags;
3fb06697
KW
1191 goto emulate_flags;
1192 }
1193
edfab6a0 1194 if (drv->bdrv_aio_pwritev) {
08844473
KW
1195 BlockAIOCB *acb;
1196 CoroutineIOCompletion co = {
1197 .coroutine = qemu_coroutine_self(),
1198 };
1199
edfab6a0
EB
1200 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov,
1201 flags & bs->supported_write_flags,
1202 bdrv_co_io_em_complete, &co);
1203 flags &= ~bs->supported_write_flags;
08844473 1204 if (acb == NULL) {
3fb06697 1205 ret = -EIO;
08844473
KW
1206 } else {
1207 qemu_coroutine_yield();
3fb06697 1208 ret = co.ret;
08844473 1209 }
edfab6a0
EB
1210 goto emulate_flags;
1211 }
1212
1213 sector_num = offset >> BDRV_SECTOR_BITS;
1214 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1215
1bbbf32d
NS
1216 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1217 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
41ae31e3 1218 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
edfab6a0 1219
e18a58b4
EB
1220 assert(drv->bdrv_co_writev);
1221 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov,
1222 flags & bs->supported_write_flags);
1223 flags &= ~bs->supported_write_flags;
78a07294 1224
3fb06697 1225emulate_flags:
4df863f3 1226 if (ret == 0 && (flags & BDRV_REQ_FUA)) {
78a07294
KW
1227 ret = bdrv_co_flush(bs);
1228 }
1229
ac850bf0
VSO
1230 if (qiov == &local_qiov) {
1231 qemu_iovec_destroy(&local_qiov);
1232 }
1233
78a07294
KW
1234 return ret;
1235}
1236
29a298af
PB
1237static int coroutine_fn
1238bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
ac850bf0
VSO
1239 uint64_t bytes, QEMUIOVector *qiov,
1240 size_t qiov_offset)
29a298af
PB
1241{
1242 BlockDriver *drv = bs->drv;
ac850bf0
VSO
1243 QEMUIOVector local_qiov;
1244 int ret;
29a298af 1245
d470ad42
HR
1246 if (!drv) {
1247 return -ENOMEDIUM;
1248 }
1249
ac850bf0 1250 if (!block_driver_can_compress(drv)) {
29a298af
PB
1251 return -ENOTSUP;
1252 }
1253
ac850bf0
VSO
1254 if (drv->bdrv_co_pwritev_compressed_part) {
1255 return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1256 qiov, qiov_offset);
1257 }
1258
1259 if (qiov_offset == 0) {
1260 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1261 }
1262
1263 qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1264 ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1265 qemu_iovec_destroy(&local_qiov);
1266
1267 return ret;
29a298af
PB
1268}
1269
85c97ca7 1270static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
3299e5ec 1271 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1143ec5e 1272 size_t qiov_offset, int flags)
61007b31 1273{
85c97ca7
KW
1274 BlockDriverState *bs = child->bs;
1275
61007b31
SH
1276 /* Perform I/O through a temporary buffer so that users who scribble over
1277 * their read buffer while the operation is in progress do not end up
1278 * modifying the image file. This is critical for zero-copy guest I/O
1279 * where anything might happen inside guest memory.
1280 */
2275cc90 1281 void *bounce_buffer = NULL;
61007b31
SH
1282
1283 BlockDriver *drv = bs->drv;
244483e6 1284 int64_t cluster_offset;
7cfd5275 1285 int64_t cluster_bytes;
61007b31
SH
1286 size_t skip_bytes;
1287 int ret;
cb2e2878
EB
1288 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1289 BDRV_REQUEST_MAX_BYTES);
1290 unsigned int progress = 0;
8644476e 1291 bool skip_write;
61007b31 1292
d470ad42
HR
1293 if (!drv) {
1294 return -ENOMEDIUM;
1295 }
1296
8644476e
HR
1297 /*
1298 * Do not write anything when the BDS is inactive. That is not
1299 * allowed, and it would not help.
1300 */
1301 skip_write = (bs->open_flags & BDRV_O_INACTIVE);
1302
1bf03e66
KW
1303 /* FIXME We cannot require callers to have write permissions when all they
1304 * are doing is a read request. If we did things right, write permissions
1305 * would be obtained anyway, but internally by the copy-on-read code. As
765d9df9 1306 * long as it is implemented here rather than in a separate filter driver,
1bf03e66
KW
1307 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1308 * it could request permissions. Therefore we have to bypass the permission
1309 * system for the moment. */
1310 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
afa4b293 1311
61007b31 1312 /* Cover entire cluster so no additional backing file I/O is required when
cb2e2878
EB
1313 * allocating cluster in the image file. Note that this value may exceed
1314 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1315 * is one reason we loop rather than doing it all at once.
61007b31 1316 */
244483e6 1317 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
cb2e2878 1318 skip_bytes = offset - cluster_offset;
61007b31 1319
244483e6
KW
1320 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1321 cluster_offset, cluster_bytes);
61007b31 1322
cb2e2878
EB
1323 while (cluster_bytes) {
1324 int64_t pnum;
61007b31 1325
8644476e
HR
1326 if (skip_write) {
1327 ret = 1; /* "already allocated", so nothing will be copied */
cb2e2878 1328 pnum = MIN(cluster_bytes, max_transfer);
8644476e
HR
1329 } else {
1330 ret = bdrv_is_allocated(bs, cluster_offset,
1331 MIN(cluster_bytes, max_transfer), &pnum);
1332 if (ret < 0) {
1333 /*
1334 * Safe to treat errors in querying allocation as if
1335 * unallocated; we'll probably fail again soon on the
1336 * read, but at least that will set a decent errno.
1337 */
1338 pnum = MIN(cluster_bytes, max_transfer);
1339 }
61007b31 1340
8644476e
HR
1341 /* Stop at EOF if the image ends in the middle of the cluster */
1342 if (ret == 0 && pnum == 0) {
1343 assert(progress >= bytes);
1344 break;
1345 }
b0ddcbbb 1346
8644476e
HR
1347 assert(skip_bytes < pnum);
1348 }
61007b31 1349
cb2e2878 1350 if (ret <= 0) {
1143ec5e
VSO
1351 QEMUIOVector local_qiov;
1352
cb2e2878 1353 /* Must copy-on-read; use the bounce buffer */
0d93ed08 1354 pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
2275cc90
VSO
1355 if (!bounce_buffer) {
1356 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum);
1357 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
1358 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
1359
1360 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
1361 if (!bounce_buffer) {
1362 ret = -ENOMEM;
1363 goto err;
1364 }
1365 }
0d93ed08 1366 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
61007b31 1367
cb2e2878 1368 ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
ac850bf0 1369 &local_qiov, 0, 0);
cb2e2878
EB
1370 if (ret < 0) {
1371 goto err;
1372 }
1373
1374 bdrv_debug_event(bs, BLKDBG_COR_WRITE);
1375 if (drv->bdrv_co_pwrite_zeroes &&
1376 buffer_is_zero(bounce_buffer, pnum)) {
1377 /* FIXME: Should we (perhaps conditionally) be setting
1378 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1379 * that still correctly reads as zero? */
7adcf59f
HR
1380 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
1381 BDRV_REQ_WRITE_UNCHANGED);
cb2e2878
EB
1382 } else {
1383 /* This does not change the data on the disk, it is not
1384 * necessary to flush even in cache=writethrough mode.
1385 */
1386 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
ac850bf0 1387 &local_qiov, 0,
7adcf59f 1388 BDRV_REQ_WRITE_UNCHANGED);
cb2e2878
EB
1389 }
1390
1391 if (ret < 0) {
1392 /* It might be okay to ignore write errors for guest
1393 * requests. If this is a deliberate copy-on-read
1394 * then we don't want to ignore the error. Simply
1395 * report it in all cases.
1396 */
1397 goto err;
1398 }
1399
3299e5ec 1400 if (!(flags & BDRV_REQ_PREFETCH)) {
1143ec5e
VSO
1401 qemu_iovec_from_buf(qiov, qiov_offset + progress,
1402 bounce_buffer + skip_bytes,
4ab78b19 1403 MIN(pnum - skip_bytes, bytes - progress));
3299e5ec
VSO
1404 }
1405 } else if (!(flags & BDRV_REQ_PREFETCH)) {
cb2e2878 1406 /* Read directly into the destination */
1143ec5e
VSO
1407 ret = bdrv_driver_preadv(bs, offset + progress,
1408 MIN(pnum - skip_bytes, bytes - progress),
1409 qiov, qiov_offset + progress, 0);
cb2e2878
EB
1410 if (ret < 0) {
1411 goto err;
1412 }
1413 }
1414
1415 cluster_offset += pnum;
1416 cluster_bytes -= pnum;
1417 progress += pnum - skip_bytes;
1418 skip_bytes = 0;
1419 }
1420 ret = 0;
61007b31
SH
1421
1422err:
1423 qemu_vfree(bounce_buffer);
1424 return ret;
1425}
1426
1427/*
1428 * Forwards an already correctly aligned request to the BlockDriver. This
1a62d0ac
EB
1429 * handles copy on read, zeroing after EOF, and fragmentation of large
1430 * reads; any other features must be implemented by the caller.
61007b31 1431 */
85c97ca7 1432static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
61007b31 1433 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
65cd4424 1434 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
61007b31 1435{
85c97ca7 1436 BlockDriverState *bs = child->bs;
c9d20029 1437 int64_t total_bytes, max_bytes;
1a62d0ac
EB
1438 int ret = 0;
1439 uint64_t bytes_remaining = bytes;
1440 int max_transfer;
61007b31 1441
49c07526
KW
1442 assert(is_power_of_2(align));
1443 assert((offset & (align - 1)) == 0);
1444 assert((bytes & (align - 1)) == 0);
abb06c5a 1445 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1a62d0ac
EB
1446 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1447 align);
a604fa2b
EB
1448
1449 /* TODO: We would need a per-BDS .supported_read_flags and
1450 * potential fallback support, if we ever implement any read flags
1451 * to pass through to drivers. For now, there aren't any
1452 * passthrough flags. */
c53cb427 1453 assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH)));
61007b31
SH
1454
1455 /* Handle Copy on Read and associated serialisation */
1456 if (flags & BDRV_REQ_COPY_ON_READ) {
1457 /* If we touch the same cluster it counts as an overlap. This
1458 * guarantees that allocating writes will be serialized and not race
1459 * with each other for the same cluster. For example, in copy-on-read
1460 * it ensures that the CoR read and write operations are atomic and
1461 * guest writes cannot interleave between them. */
304d9d7f 1462 bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
18fbd0de
PB
1463 } else {
1464 bdrv_wait_serialising_requests(req);
61007b31
SH
1465 }
1466
61007b31 1467 if (flags & BDRV_REQ_COPY_ON_READ) {
d6a644bb 1468 int64_t pnum;
61007b31 1469
88e63df2 1470 ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
61007b31
SH
1471 if (ret < 0) {
1472 goto out;
1473 }
1474
88e63df2 1475 if (!ret || pnum != bytes) {
65cd4424
VSO
1476 ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
1477 qiov, qiov_offset, flags);
3299e5ec
VSO
1478 goto out;
1479 } else if (flags & BDRV_REQ_PREFETCH) {
61007b31
SH
1480 goto out;
1481 }
1482 }
1483
1a62d0ac 1484 /* Forward the request to the BlockDriver, possibly fragmenting it */
c9d20029
KW
1485 total_bytes = bdrv_getlength(bs);
1486 if (total_bytes < 0) {
1487 ret = total_bytes;
1488 goto out;
1489 }
61007b31 1490
c9d20029 1491 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1a62d0ac 1492 if (bytes <= max_bytes && bytes <= max_transfer) {
65cd4424 1493 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, 0);
1a62d0ac
EB
1494 goto out;
1495 }
61007b31 1496
1a62d0ac
EB
1497 while (bytes_remaining) {
1498 int num;
61007b31 1499
1a62d0ac 1500 if (max_bytes) {
1a62d0ac
EB
1501 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1502 assert(num);
61007b31 1503
1a62d0ac 1504 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
65cd4424 1505 num, qiov, bytes - bytes_remaining, 0);
1a62d0ac 1506 max_bytes -= num;
1a62d0ac
EB
1507 } else {
1508 num = bytes_remaining;
1509 ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
1510 bytes_remaining);
1511 }
1512 if (ret < 0) {
1513 goto out;
1514 }
1515 bytes_remaining -= num;
61007b31
SH
1516 }
1517
1518out:
1a62d0ac 1519 return ret < 0 ? ret : 0;
61007b31
SH
1520}
1521
61007b31 1522/*
7a3f542f
VSO
1523 * Request padding
1524 *
1525 * |<---- align ----->| |<----- align ---->|
1526 * |<- head ->|<------------- bytes ------------->|<-- tail -->|
1527 * | | | | | |
1528 * -*----------$-------*-------- ... --------*-----$------------*---
1529 * | | | | | |
1530 * | offset | | end |
1531 * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end)
1532 * [buf ... ) [tail_buf )
1533 *
1534 * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1535 * is placed at the beginning of @buf and @tail at the @end.
1536 *
1537 * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1538 * around tail, if tail exists.
1539 *
1540 * @merge_reads is true for small requests,
1541 * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1542 * head and tail exist but @buf_len == align and @tail_buf == @buf.
1543 */
1544typedef struct BdrvRequestPadding {
1545 uint8_t *buf;
1546 size_t buf_len;
1547 uint8_t *tail_buf;
1548 size_t head;
1549 size_t tail;
1550 bool merge_reads;
1551 QEMUIOVector local_qiov;
1552} BdrvRequestPadding;
1553
1554static bool bdrv_init_padding(BlockDriverState *bs,
1555 int64_t offset, int64_t bytes,
1556 BdrvRequestPadding *pad)
1557{
1558 uint64_t align = bs->bl.request_alignment;
1559 size_t sum;
1560
1561 memset(pad, 0, sizeof(*pad));
1562
1563 pad->head = offset & (align - 1);
1564 pad->tail = ((offset + bytes) & (align - 1));
1565 if (pad->tail) {
1566 pad->tail = align - pad->tail;
1567 }
1568
ac9d00bf 1569 if (!pad->head && !pad->tail) {
7a3f542f
VSO
1570 return false;
1571 }
1572
ac9d00bf
VSO
1573 assert(bytes); /* Nothing good in aligning zero-length requests */
1574
7a3f542f
VSO
1575 sum = pad->head + bytes + pad->tail;
1576 pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
1577 pad->buf = qemu_blockalign(bs, pad->buf_len);
1578 pad->merge_reads = sum == pad->buf_len;
1579 if (pad->tail) {
1580 pad->tail_buf = pad->buf + pad->buf_len - align;
1581 }
1582
1583 return true;
1584}
1585
1586static int bdrv_padding_rmw_read(BdrvChild *child,
1587 BdrvTrackedRequest *req,
1588 BdrvRequestPadding *pad,
1589 bool zero_middle)
1590{
1591 QEMUIOVector local_qiov;
1592 BlockDriverState *bs = child->bs;
1593 uint64_t align = bs->bl.request_alignment;
1594 int ret;
1595
1596 assert(req->serialising && pad->buf);
1597
1598 if (pad->head || pad->merge_reads) {
1599 uint64_t bytes = pad->merge_reads ? pad->buf_len : align;
1600
1601 qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
1602
1603 if (pad->head) {
1604 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1605 }
1606 if (pad->merge_reads && pad->tail) {
1607 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1608 }
1609 ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
65cd4424 1610 align, &local_qiov, 0, 0);
7a3f542f
VSO
1611 if (ret < 0) {
1612 return ret;
1613 }
1614 if (pad->head) {
1615 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1616 }
1617 if (pad->merge_reads && pad->tail) {
1618 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1619 }
1620
1621 if (pad->merge_reads) {
1622 goto zero_mem;
1623 }
1624 }
1625
1626 if (pad->tail) {
1627 qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
1628
1629 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1630 ret = bdrv_aligned_preadv(
1631 child, req,
1632 req->overlap_offset + req->overlap_bytes - align,
65cd4424 1633 align, align, &local_qiov, 0, 0);
7a3f542f
VSO
1634 if (ret < 0) {
1635 return ret;
1636 }
1637 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1638 }
1639
1640zero_mem:
1641 if (zero_middle) {
1642 memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
1643 }
1644
1645 return 0;
1646}
1647
1648static void bdrv_padding_destroy(BdrvRequestPadding *pad)
1649{
1650 if (pad->buf) {
1651 qemu_vfree(pad->buf);
1652 qemu_iovec_destroy(&pad->local_qiov);
1653 }
1654}
1655
1656/*
1657 * bdrv_pad_request
1658 *
1659 * Exchange request parameters with padded request if needed. Don't include RMW
1660 * read of padding, bdrv_padding_rmw_read() should be called separately if
1661 * needed.
1662 *
1663 * All parameters except @bs are in-out: they represent original request at
1664 * function call and padded (if padding needed) at function finish.
1665 *
1666 * Function always succeeds.
61007b31 1667 */
1acc3466
VSO
1668static bool bdrv_pad_request(BlockDriverState *bs,
1669 QEMUIOVector **qiov, size_t *qiov_offset,
7a3f542f
VSO
1670 int64_t *offset, unsigned int *bytes,
1671 BdrvRequestPadding *pad)
1672{
1673 if (!bdrv_init_padding(bs, *offset, *bytes, pad)) {
1674 return false;
1675 }
1676
1677 qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head,
1acc3466 1678 *qiov, *qiov_offset, *bytes,
7a3f542f
VSO
1679 pad->buf + pad->buf_len - pad->tail, pad->tail);
1680 *bytes += pad->head + pad->tail;
1681 *offset -= pad->head;
1682 *qiov = &pad->local_qiov;
1acc3466 1683 *qiov_offset = 0;
7a3f542f
VSO
1684
1685 return true;
1686}
1687
a03ef88f 1688int coroutine_fn bdrv_co_preadv(BdrvChild *child,
61007b31
SH
1689 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1690 BdrvRequestFlags flags)
1acc3466
VSO
1691{
1692 return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
1693}
1694
1695int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
1696 int64_t offset, unsigned int bytes,
1697 QEMUIOVector *qiov, size_t qiov_offset,
1698 BdrvRequestFlags flags)
61007b31 1699{
a03ef88f 1700 BlockDriverState *bs = child->bs;
61007b31 1701 BdrvTrackedRequest req;
7a3f542f 1702 BdrvRequestPadding pad;
61007b31
SH
1703 int ret;
1704
7a3f542f 1705 trace_bdrv_co_preadv(bs, offset, bytes, flags);
61007b31
SH
1706
1707 ret = bdrv_check_byte_request(bs, offset, bytes);
1708 if (ret < 0) {
1709 return ret;
1710 }
1711
ac9d00bf
VSO
1712 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1713 /*
1714 * Aligning zero request is nonsense. Even if driver has special meaning
1715 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1716 * it to driver due to request_alignment.
1717 *
1718 * Still, no reason to return an error if someone do unaligned
1719 * zero-length read occasionally.
1720 */
1721 return 0;
1722 }
1723
99723548
PB
1724 bdrv_inc_in_flight(bs);
1725
9568b511 1726 /* Don't do copy-on-read if we read data before write operation */
c53cb427 1727 if (atomic_read(&bs->copy_on_read)) {
61007b31
SH
1728 flags |= BDRV_REQ_COPY_ON_READ;
1729 }
1730
1acc3466 1731 bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad);
61007b31 1732
ebde595c 1733 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
7a3f542f
VSO
1734 ret = bdrv_aligned_preadv(child, &req, offset, bytes,
1735 bs->bl.request_alignment,
1acc3466 1736 qiov, qiov_offset, flags);
61007b31 1737 tracked_request_end(&req);
99723548 1738 bdrv_dec_in_flight(bs);
61007b31 1739
7a3f542f 1740 bdrv_padding_destroy(&pad);
61007b31
SH
1741
1742 return ret;
1743}
1744
d05aa8bb 1745static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
f5a5ca79 1746 int64_t offset, int bytes, BdrvRequestFlags flags)
61007b31
SH
1747{
1748 BlockDriver *drv = bs->drv;
1749 QEMUIOVector qiov;
0d93ed08 1750 void *buf = NULL;
61007b31 1751 int ret = 0;
465fe887 1752 bool need_flush = false;
443668ca
DL
1753 int head = 0;
1754 int tail = 0;
61007b31 1755
cf081fca 1756 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
a5b8dd2c
EB
1757 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1758 bs->bl.request_alignment);
cb2e2878 1759 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
d05aa8bb 1760
d470ad42
HR
1761 if (!drv) {
1762 return -ENOMEDIUM;
1763 }
1764
fe0480d6
KW
1765 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1766 return -ENOTSUP;
1767 }
1768
b8d0a980
EB
1769 assert(alignment % bs->bl.request_alignment == 0);
1770 head = offset % alignment;
f5a5ca79 1771 tail = (offset + bytes) % alignment;
b8d0a980
EB
1772 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1773 assert(max_write_zeroes >= bs->bl.request_alignment);
61007b31 1774
f5a5ca79
MP
1775 while (bytes > 0 && !ret) {
1776 int num = bytes;
61007b31
SH
1777
1778 /* Align request. Block drivers can expect the "bulk" of the request
443668ca
DL
1779 * to be aligned, and that unaligned requests do not cross cluster
1780 * boundaries.
61007b31 1781 */
443668ca 1782 if (head) {
b2f95fee
EB
1783 /* Make a small request up to the first aligned sector. For
1784 * convenience, limit this request to max_transfer even if
1785 * we don't need to fall back to writes. */
f5a5ca79 1786 num = MIN(MIN(bytes, max_transfer), alignment - head);
b2f95fee
EB
1787 head = (head + num) % alignment;
1788 assert(num < max_write_zeroes);
d05aa8bb 1789 } else if (tail && num > alignment) {
443668ca
DL
1790 /* Shorten the request to the last aligned sector. */
1791 num -= tail;
61007b31
SH
1792 }
1793
1794 /* limit request size */
1795 if (num > max_write_zeroes) {
1796 num = max_write_zeroes;
1797 }
1798
1799 ret = -ENOTSUP;
1800 /* First try the efficient write zeroes operation */
d05aa8bb
EB
1801 if (drv->bdrv_co_pwrite_zeroes) {
1802 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1803 flags & bs->supported_zero_flags);
1804 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1805 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1806 need_flush = true;
1807 }
465fe887
EB
1808 } else {
1809 assert(!bs->supported_zero_flags);
61007b31
SH
1810 }
1811
294682cc 1812 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
61007b31 1813 /* Fall back to bounce buffer if write zeroes is unsupported */
465fe887
EB
1814 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1815
1816 if ((flags & BDRV_REQ_FUA) &&
1817 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1818 /* No need for bdrv_driver_pwrite() to do a fallback
1819 * flush on each chunk; use just one at the end */
1820 write_flags &= ~BDRV_REQ_FUA;
1821 need_flush = true;
1822 }
5def6b80 1823 num = MIN(num, max_transfer);
0d93ed08
VSO
1824 if (buf == NULL) {
1825 buf = qemu_try_blockalign0(bs, num);
1826 if (buf == NULL) {
61007b31
SH
1827 ret = -ENOMEM;
1828 goto fail;
1829 }
61007b31 1830 }
0d93ed08 1831 qemu_iovec_init_buf(&qiov, buf, num);
61007b31 1832
ac850bf0 1833 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
61007b31
SH
1834
1835 /* Keep bounce buffer around if it is big enough for all
1836 * all future requests.
1837 */
5def6b80 1838 if (num < max_transfer) {
0d93ed08
VSO
1839 qemu_vfree(buf);
1840 buf = NULL;
61007b31
SH
1841 }
1842 }
1843
d05aa8bb 1844 offset += num;
f5a5ca79 1845 bytes -= num;
61007b31
SH
1846 }
1847
1848fail:
465fe887
EB
1849 if (ret == 0 && need_flush) {
1850 ret = bdrv_co_flush(bs);
1851 }
0d93ed08 1852 qemu_vfree(buf);
61007b31
SH
1853 return ret;
1854}
1855
85fe2479
FZ
1856static inline int coroutine_fn
1857bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
1858 BdrvTrackedRequest *req, int flags)
1859{
1860 BlockDriverState *bs = child->bs;
1861 bool waited;
1862 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1863
1864 if (bs->read_only) {
1865 return -EPERM;
1866 }
1867
85fe2479
FZ
1868 assert(!(bs->open_flags & BDRV_O_INACTIVE));
1869 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1870 assert(!(flags & ~BDRV_REQ_MASK));
1871
1872 if (flags & BDRV_REQ_SERIALISING) {
18fbd0de
PB
1873 waited = bdrv_mark_request_serialising(req, bdrv_get_cluster_size(bs));
1874 /*
1875 * For a misaligned request we should have already waited earlier,
1876 * because we come after bdrv_padding_rmw_read which must be called
1877 * with the request already marked as serialising.
1878 */
1879 assert(!waited ||
1880 (req->offset == req->overlap_offset &&
1881 req->bytes == req->overlap_bytes));
1882 } else {
1883 bdrv_wait_serialising_requests(req);
85fe2479
FZ
1884 }
1885
85fe2479
FZ
1886 assert(req->overlap_offset <= offset);
1887 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
cd47d792 1888 assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE);
85fe2479 1889
cd47d792
FZ
1890 switch (req->type) {
1891 case BDRV_TRACKED_WRITE:
1892 case BDRV_TRACKED_DISCARD:
1893 if (flags & BDRV_REQ_WRITE_UNCHANGED) {
1894 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1895 } else {
1896 assert(child->perm & BLK_PERM_WRITE);
1897 }
1898 return notifier_with_return_list_notify(&bs->before_write_notifiers,
1899 req);
1900 case BDRV_TRACKED_TRUNCATE:
1901 assert(child->perm & BLK_PERM_RESIZE);
1902 return 0;
1903 default:
1904 abort();
85fe2479 1905 }
85fe2479
FZ
1906}
1907
1908static inline void coroutine_fn
1909bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes,
1910 BdrvTrackedRequest *req, int ret)
1911{
1912 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1913 BlockDriverState *bs = child->bs;
1914
1915 atomic_inc(&bs->write_gen);
85fe2479 1916
00695c27
FZ
1917 /*
1918 * Discard cannot extend the image, but in error handling cases, such as
1919 * when reverting a qcow2 cluster allocation, the discarded range can pass
1920 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
1921 * here. Instead, just skip it, since semantically a discard request
1922 * beyond EOF cannot expand the image anyway.
1923 */
7f8f03ef 1924 if (ret == 0 &&
cd47d792
FZ
1925 (req->type == BDRV_TRACKED_TRUNCATE ||
1926 end_sector > bs->total_sectors) &&
1927 req->type != BDRV_TRACKED_DISCARD) {
7f8f03ef
FZ
1928 bs->total_sectors = end_sector;
1929 bdrv_parent_cb_resize(bs);
1930 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
85fe2479 1931 }
00695c27
FZ
1932 if (req->bytes) {
1933 switch (req->type) {
1934 case BDRV_TRACKED_WRITE:
1935 stat64_max(&bs->wr_highest_offset, offset + bytes);
1936 /* fall through, to set dirty bits */
1937 case BDRV_TRACKED_DISCARD:
1938 bdrv_set_dirty(bs, offset, bytes);
1939 break;
1940 default:
1941 break;
1942 }
1943 }
85fe2479
FZ
1944}
1945
61007b31 1946/*
04ed95f4
EB
1947 * Forwards an already correctly aligned write request to the BlockDriver,
1948 * after possibly fragmenting it.
61007b31 1949 */
85c97ca7 1950static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
61007b31 1951 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
28c4da28 1952 int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
61007b31 1953{
85c97ca7 1954 BlockDriverState *bs = child->bs;
61007b31 1955 BlockDriver *drv = bs->drv;
61007b31
SH
1956 int ret;
1957
04ed95f4
EB
1958 uint64_t bytes_remaining = bytes;
1959 int max_transfer;
61007b31 1960
d470ad42
HR
1961 if (!drv) {
1962 return -ENOMEDIUM;
1963 }
1964
d6883bc9
VSO
1965 if (bdrv_has_readonly_bitmaps(bs)) {
1966 return -EPERM;
1967 }
1968
cff86b38
EB
1969 assert(is_power_of_2(align));
1970 assert((offset & (align - 1)) == 0);
1971 assert((bytes & (align - 1)) == 0);
28c4da28 1972 assert(!qiov || qiov_offset + bytes <= qiov->size);
04ed95f4
EB
1973 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1974 align);
61007b31 1975
85fe2479 1976 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
61007b31
SH
1977
1978 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
c1499a5e 1979 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
28c4da28 1980 qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
61007b31
SH
1981 flags |= BDRV_REQ_ZERO_WRITE;
1982 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1983 flags |= BDRV_REQ_MAY_UNMAP;
1984 }
1985 }
1986
1987 if (ret < 0) {
1988 /* Do nothing, write notifier decided to fail this request */
1989 } else if (flags & BDRV_REQ_ZERO_WRITE) {
9a4f4c31 1990 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
9896c876 1991 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
3ea1a091 1992 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
28c4da28
VSO
1993 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
1994 qiov, qiov_offset);
04ed95f4 1995 } else if (bytes <= max_transfer) {
9a4f4c31 1996 bdrv_debug_event(bs, BLKDBG_PWRITEV);
28c4da28 1997 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
04ed95f4
EB
1998 } else {
1999 bdrv_debug_event(bs, BLKDBG_PWRITEV);
2000 while (bytes_remaining) {
2001 int num = MIN(bytes_remaining, max_transfer);
04ed95f4
EB
2002 int local_flags = flags;
2003
2004 assert(num);
2005 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
2006 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
2007 /* If FUA is going to be emulated by flush, we only
2008 * need to flush on the last iteration */
2009 local_flags &= ~BDRV_REQ_FUA;
2010 }
04ed95f4
EB
2011
2012 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
28c4da28
VSO
2013 num, qiov, bytes - bytes_remaining,
2014 local_flags);
04ed95f4
EB
2015 if (ret < 0) {
2016 break;
2017 }
2018 bytes_remaining -= num;
2019 }
61007b31 2020 }
9a4f4c31 2021 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
61007b31 2022
61007b31 2023 if (ret >= 0) {
04ed95f4 2024 ret = 0;
61007b31 2025 }
85fe2479 2026 bdrv_co_write_req_finish(child, offset, bytes, req, ret);
61007b31
SH
2027
2028 return ret;
2029}
2030
85c97ca7 2031static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
9eeb6dd1
FZ
2032 int64_t offset,
2033 unsigned int bytes,
2034 BdrvRequestFlags flags,
2035 BdrvTrackedRequest *req)
2036{
85c97ca7 2037 BlockDriverState *bs = child->bs;
9eeb6dd1 2038 QEMUIOVector local_qiov;
a5b8dd2c 2039 uint64_t align = bs->bl.request_alignment;
9eeb6dd1 2040 int ret = 0;
7a3f542f
VSO
2041 bool padding;
2042 BdrvRequestPadding pad;
9eeb6dd1 2043
7a3f542f
VSO
2044 padding = bdrv_init_padding(bs, offset, bytes, &pad);
2045 if (padding) {
304d9d7f 2046 bdrv_mark_request_serialising(req, align);
9eeb6dd1 2047
7a3f542f
VSO
2048 bdrv_padding_rmw_read(child, req, &pad, true);
2049
2050 if (pad.head || pad.merge_reads) {
2051 int64_t aligned_offset = offset & ~(align - 1);
2052 int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
2053
2054 qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
2055 ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
28c4da28 2056 align, &local_qiov, 0,
7a3f542f
VSO
2057 flags & ~BDRV_REQ_ZERO_WRITE);
2058 if (ret < 0 || pad.merge_reads) {
2059 /* Error or all work is done */
2060 goto out;
2061 }
2062 offset += write_bytes - pad.head;
2063 bytes -= write_bytes - pad.head;
9eeb6dd1 2064 }
9eeb6dd1
FZ
2065 }
2066
2067 assert(!bytes || (offset & (align - 1)) == 0);
2068 if (bytes >= align) {
2069 /* Write the aligned part in the middle. */
2070 uint64_t aligned_bytes = bytes & ~(align - 1);
85c97ca7 2071 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
28c4da28 2072 NULL, 0, flags);
9eeb6dd1 2073 if (ret < 0) {
7a3f542f 2074 goto out;
9eeb6dd1
FZ
2075 }
2076 bytes -= aligned_bytes;
2077 offset += aligned_bytes;
2078 }
2079
2080 assert(!bytes || (offset & (align - 1)) == 0);
2081 if (bytes) {
7a3f542f 2082 assert(align == pad.tail + bytes);
9eeb6dd1 2083
7a3f542f 2084 qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
85c97ca7 2085 ret = bdrv_aligned_pwritev(child, req, offset, align, align,
28c4da28
VSO
2086 &local_qiov, 0,
2087 flags & ~BDRV_REQ_ZERO_WRITE);
9eeb6dd1 2088 }
9eeb6dd1 2089
7a3f542f
VSO
2090out:
2091 bdrv_padding_destroy(&pad);
2092
2093 return ret;
9eeb6dd1
FZ
2094}
2095
61007b31
SH
2096/*
2097 * Handle a write request in coroutine context
2098 */
a03ef88f 2099int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
61007b31
SH
2100 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
2101 BdrvRequestFlags flags)
1acc3466
VSO
2102{
2103 return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
2104}
2105
2106int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
2107 int64_t offset, unsigned int bytes, QEMUIOVector *qiov, size_t qiov_offset,
2108 BdrvRequestFlags flags)
61007b31 2109{
a03ef88f 2110 BlockDriverState *bs = child->bs;
61007b31 2111 BdrvTrackedRequest req;
a5b8dd2c 2112 uint64_t align = bs->bl.request_alignment;
7a3f542f 2113 BdrvRequestPadding pad;
61007b31
SH
2114 int ret;
2115
f42cf447
DB
2116 trace_bdrv_co_pwritev(child->bs, offset, bytes, flags);
2117
61007b31
SH
2118 if (!bs->drv) {
2119 return -ENOMEDIUM;
2120 }
61007b31
SH
2121
2122 ret = bdrv_check_byte_request(bs, offset, bytes);
2123 if (ret < 0) {
2124 return ret;
2125 }
2126
f2208fdc
AG
2127 /* If the request is misaligned then we can't make it efficient */
2128 if ((flags & BDRV_REQ_NO_FALLBACK) &&
2129 !QEMU_IS_ALIGNED(offset | bytes, align))
2130 {
2131 return -ENOTSUP;
2132 }
2133
ac9d00bf
VSO
2134 if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2135 /*
2136 * Aligning zero request is nonsense. Even if driver has special meaning
2137 * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2138 * it to driver due to request_alignment.
2139 *
2140 * Still, no reason to return an error if someone do unaligned
2141 * zero-length write occasionally.
2142 */
2143 return 0;
2144 }
2145
99723548 2146 bdrv_inc_in_flight(bs);
61007b31
SH
2147 /*
2148 * Align write if necessary by performing a read-modify-write cycle.
2149 * Pad qiov with the read parts and be sure to have a tracked request not
2150 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
2151 */
ebde595c 2152 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
61007b31 2153
18a59f03 2154 if (flags & BDRV_REQ_ZERO_WRITE) {
85c97ca7 2155 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
9eeb6dd1
FZ
2156 goto out;
2157 }
2158
1acc3466 2159 if (bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad)) {
304d9d7f 2160 bdrv_mark_request_serialising(&req, align);
7a3f542f 2161 bdrv_padding_rmw_read(child, &req, &pad, false);
61007b31
SH
2162 }
2163
85c97ca7 2164 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
1acc3466 2165 qiov, qiov_offset, flags);
61007b31 2166
7a3f542f 2167 bdrv_padding_destroy(&pad);
61007b31 2168
9eeb6dd1
FZ
2169out:
2170 tracked_request_end(&req);
99723548 2171 bdrv_dec_in_flight(bs);
7a3f542f 2172
61007b31
SH
2173 return ret;
2174}
2175
a03ef88f 2176int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
f5a5ca79 2177 int bytes, BdrvRequestFlags flags)
61007b31 2178{
f5a5ca79 2179 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
61007b31 2180
a03ef88f 2181 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
61007b31
SH
2182 flags &= ~BDRV_REQ_MAY_UNMAP;
2183 }
61007b31 2184
f5a5ca79 2185 return bdrv_co_pwritev(child, offset, bytes, NULL,
74021bc4 2186 BDRV_REQ_ZERO_WRITE | flags);
61007b31
SH
2187}
2188
4085f5c7
JS
2189/*
2190 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2191 */
2192int bdrv_flush_all(void)
2193{
2194 BdrvNextIterator it;
2195 BlockDriverState *bs = NULL;
2196 int result = 0;
2197
c8aa7895
PD
2198 /*
2199 * bdrv queue is managed by record/replay,
2200 * creating new flush request for stopping
2201 * the VM may break the determinism
2202 */
2203 if (replay_events_enabled()) {
2204 return result;
2205 }
2206
4085f5c7
JS
2207 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
2208 AioContext *aio_context = bdrv_get_aio_context(bs);
2209 int ret;
2210
2211 aio_context_acquire(aio_context);
2212 ret = bdrv_flush(bs);
2213 if (ret < 0 && !result) {
2214 result = ret;
2215 }
2216 aio_context_release(aio_context);
2217 }
2218
2219 return result;
2220}
2221
2222
4bcd936e 2223typedef struct BdrvCoBlockStatusData {
61007b31
SH
2224 BlockDriverState *bs;
2225 BlockDriverState *base;
c9ce8c4d 2226 bool want_zero;
4bcd936e
EB
2227 int64_t offset;
2228 int64_t bytes;
2229 int64_t *pnum;
2230 int64_t *map;
c9ce8c4d 2231 BlockDriverState **file;
4bcd936e 2232 int ret;
61007b31 2233 bool done;
4bcd936e 2234} BdrvCoBlockStatusData;
61007b31 2235
3e4d0e72
EB
2236int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs,
2237 bool want_zero,
2238 int64_t offset,
2239 int64_t bytes,
2240 int64_t *pnum,
2241 int64_t *map,
2242 BlockDriverState **file)
f7cc69b3
MP
2243{
2244 assert(bs->file && bs->file->bs);
3e4d0e72
EB
2245 *pnum = bytes;
2246 *map = offset;
f7cc69b3 2247 *file = bs->file->bs;
3e4d0e72 2248 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
f7cc69b3
MP
2249}
2250
3e4d0e72
EB
2251int coroutine_fn bdrv_co_block_status_from_backing(BlockDriverState *bs,
2252 bool want_zero,
2253 int64_t offset,
2254 int64_t bytes,
2255 int64_t *pnum,
2256 int64_t *map,
2257 BlockDriverState **file)
f7cc69b3
MP
2258{
2259 assert(bs->backing && bs->backing->bs);
3e4d0e72
EB
2260 *pnum = bytes;
2261 *map = offset;
f7cc69b3 2262 *file = bs->backing->bs;
3e4d0e72 2263 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
f7cc69b3
MP
2264}
2265
61007b31
SH
2266/*
2267 * Returns the allocation status of the specified sectors.
2268 * Drivers not implementing the functionality are assumed to not support
2269 * backing files, hence all their sectors are reported as allocated.
2270 *
86a3d5c6
EB
2271 * If 'want_zero' is true, the caller is querying for mapping
2272 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2273 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2274 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
c9ce8c4d 2275 *
2e8bc787 2276 * If 'offset' is beyond the end of the disk image the return value is
fb0d8654 2277 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
61007b31 2278 *
2e8bc787 2279 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
fb0d8654
EB
2280 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2281 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
67a0fd2a 2282 *
2e8bc787
EB
2283 * 'pnum' is set to the number of bytes (including and immediately
2284 * following the specified offset) that are easily known to be in the
2285 * same allocated/unallocated state. Note that a second call starting
2286 * at the original offset plus returned pnum may have the same status.
2287 * The returned value is non-zero on success except at end-of-file.
2288 *
2289 * Returns negative errno on failure. Otherwise, if the
2290 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2291 * set to the host mapping and BDS corresponding to the guest offset.
61007b31 2292 */
2e8bc787
EB
2293static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
2294 bool want_zero,
2295 int64_t offset, int64_t bytes,
2296 int64_t *pnum, int64_t *map,
2297 BlockDriverState **file)
2298{
2299 int64_t total_size;
2300 int64_t n; /* bytes */
efa6e2ed 2301 int ret;
2e8bc787 2302 int64_t local_map = 0;
298a1665 2303 BlockDriverState *local_file = NULL;
efa6e2ed
EB
2304 int64_t aligned_offset, aligned_bytes;
2305 uint32_t align;
61007b31 2306
298a1665
EB
2307 assert(pnum);
2308 *pnum = 0;
2e8bc787
EB
2309 total_size = bdrv_getlength(bs);
2310 if (total_size < 0) {
2311 ret = total_size;
298a1665 2312 goto early_out;
61007b31
SH
2313 }
2314
2e8bc787 2315 if (offset >= total_size) {
298a1665
EB
2316 ret = BDRV_BLOCK_EOF;
2317 goto early_out;
61007b31 2318 }
2e8bc787 2319 if (!bytes) {
298a1665
EB
2320 ret = 0;
2321 goto early_out;
9cdcfd9f 2322 }
61007b31 2323
2e8bc787
EB
2324 n = total_size - offset;
2325 if (n < bytes) {
2326 bytes = n;
61007b31
SH
2327 }
2328
d470ad42
HR
2329 /* Must be non-NULL or bdrv_getlength() would have failed */
2330 assert(bs->drv);
636cb512 2331 if (!bs->drv->bdrv_co_block_status) {
2e8bc787 2332 *pnum = bytes;
61007b31 2333 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2e8bc787 2334 if (offset + bytes == total_size) {
fb0d8654
EB
2335 ret |= BDRV_BLOCK_EOF;
2336 }
61007b31 2337 if (bs->drv->protocol_name) {
2e8bc787
EB
2338 ret |= BDRV_BLOCK_OFFSET_VALID;
2339 local_map = offset;
298a1665 2340 local_file = bs;
61007b31 2341 }
298a1665 2342 goto early_out;
61007b31
SH
2343 }
2344
99723548 2345 bdrv_inc_in_flight(bs);
efa6e2ed
EB
2346
2347 /* Round out to request_alignment boundaries */
86a3d5c6 2348 align = bs->bl.request_alignment;
efa6e2ed
EB
2349 aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2350 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2351
636cb512
EB
2352 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2353 aligned_bytes, pnum, &local_map,
2354 &local_file);
2355 if (ret < 0) {
2356 *pnum = 0;
2357 goto out;
efa6e2ed
EB
2358 }
2359
2e8bc787 2360 /*
636cb512 2361 * The driver's result must be a non-zero multiple of request_alignment.
efa6e2ed 2362 * Clamp pnum and adjust map to original request.
2e8bc787 2363 */
636cb512
EB
2364 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2365 align > offset - aligned_offset);
69f47505
VSO
2366 if (ret & BDRV_BLOCK_RECURSE) {
2367 assert(ret & BDRV_BLOCK_DATA);
2368 assert(ret & BDRV_BLOCK_OFFSET_VALID);
2369 assert(!(ret & BDRV_BLOCK_ZERO));
2370 }
2371
efa6e2ed
EB
2372 *pnum -= offset - aligned_offset;
2373 if (*pnum > bytes) {
2374 *pnum = bytes;
61007b31 2375 }
2e8bc787 2376 if (ret & BDRV_BLOCK_OFFSET_VALID) {
efa6e2ed 2377 local_map += offset - aligned_offset;
2e8bc787 2378 }
61007b31
SH
2379
2380 if (ret & BDRV_BLOCK_RAW) {
298a1665 2381 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2e8bc787
EB
2382 ret = bdrv_co_block_status(local_file, want_zero, local_map,
2383 *pnum, pnum, &local_map, &local_file);
99723548 2384 goto out;
61007b31
SH
2385 }
2386
2387 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2388 ret |= BDRV_BLOCK_ALLOCATED;
c9ce8c4d 2389 } else if (want_zero) {
61007b31
SH
2390 if (bdrv_unallocated_blocks_are_zero(bs)) {
2391 ret |= BDRV_BLOCK_ZERO;
760e0063
KW
2392 } else if (bs->backing) {
2393 BlockDriverState *bs2 = bs->backing->bs;
2e8bc787 2394 int64_t size2 = bdrv_getlength(bs2);
c9ce8c4d 2395
2e8bc787 2396 if (size2 >= 0 && offset >= size2) {
61007b31
SH
2397 ret |= BDRV_BLOCK_ZERO;
2398 }
2399 }
2400 }
2401
69f47505
VSO
2402 if (want_zero && ret & BDRV_BLOCK_RECURSE &&
2403 local_file && local_file != bs &&
61007b31
SH
2404 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2405 (ret & BDRV_BLOCK_OFFSET_VALID)) {
2e8bc787
EB
2406 int64_t file_pnum;
2407 int ret2;
61007b31 2408
2e8bc787
EB
2409 ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
2410 *pnum, &file_pnum, NULL, NULL);
61007b31
SH
2411 if (ret2 >= 0) {
2412 /* Ignore errors. This is just providing extra information, it
2413 * is useful but not necessary.
2414 */
c61e684e
EB
2415 if (ret2 & BDRV_BLOCK_EOF &&
2416 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2417 /*
2418 * It is valid for the format block driver to read
2419 * beyond the end of the underlying file's current
2420 * size; such areas read as zero.
2421 */
61007b31
SH
2422 ret |= BDRV_BLOCK_ZERO;
2423 } else {
2424 /* Limit request to the range reported by the protocol driver */
2425 *pnum = file_pnum;
2426 ret |= (ret2 & BDRV_BLOCK_ZERO);
2427 }
2428 }
2429 }
2430
99723548
PB
2431out:
2432 bdrv_dec_in_flight(bs);
2e8bc787 2433 if (ret >= 0 && offset + *pnum == total_size) {
fb0d8654
EB
2434 ret |= BDRV_BLOCK_EOF;
2435 }
298a1665
EB
2436early_out:
2437 if (file) {
2438 *file = local_file;
2439 }
2e8bc787
EB
2440 if (map) {
2441 *map = local_map;
2442 }
61007b31
SH
2443 return ret;
2444}
2445
5b648c67
EB
2446static int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
2447 BlockDriverState *base,
2448 bool want_zero,
2449 int64_t offset,
2450 int64_t bytes,
2451 int64_t *pnum,
2452 int64_t *map,
2453 BlockDriverState **file)
ba3f0e25
FZ
2454{
2455 BlockDriverState *p;
5b648c67 2456 int ret = 0;
c61e684e 2457 bool first = true;
ba3f0e25
FZ
2458
2459 assert(bs != base);
760e0063 2460 for (p = bs; p != base; p = backing_bs(p)) {
5b648c67
EB
2461 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
2462 file);
c61e684e
EB
2463 if (ret < 0) {
2464 break;
2465 }
2466 if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) {
2467 /*
2468 * Reading beyond the end of the file continues to read
2469 * zeroes, but we can only widen the result to the
2470 * unallocated length we learned from an earlier
2471 * iteration.
2472 */
5b648c67 2473 *pnum = bytes;
c61e684e
EB
2474 }
2475 if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) {
ba3f0e25
FZ
2476 break;
2477 }
5b648c67
EB
2478 /* [offset, pnum] unallocated on this layer, which could be only
2479 * the first part of [offset, bytes]. */
2480 bytes = MIN(bytes, *pnum);
c61e684e 2481 first = false;
ba3f0e25
FZ
2482 }
2483 return ret;
2484}
2485
31826642 2486/* Coroutine wrapper for bdrv_block_status_above() */
5b648c67 2487static void coroutine_fn bdrv_block_status_above_co_entry(void *opaque)
61007b31 2488{
4bcd936e 2489 BdrvCoBlockStatusData *data = opaque;
61007b31 2490
5b648c67
EB
2491 data->ret = bdrv_co_block_status_above(data->bs, data->base,
2492 data->want_zero,
2493 data->offset, data->bytes,
2494 data->pnum, data->map, data->file);
61007b31 2495 data->done = true;
4720cbee 2496 aio_wait_kick();
61007b31
SH
2497}
2498
2499/*
5b648c67 2500 * Synchronous wrapper around bdrv_co_block_status_above().
61007b31 2501 *
5b648c67 2502 * See bdrv_co_block_status_above() for details.
61007b31 2503 */
7ddb99b9
EB
2504static int bdrv_common_block_status_above(BlockDriverState *bs,
2505 BlockDriverState *base,
2506 bool want_zero, int64_t offset,
2507 int64_t bytes, int64_t *pnum,
2508 int64_t *map,
2509 BlockDriverState **file)
61007b31
SH
2510{
2511 Coroutine *co;
4bcd936e 2512 BdrvCoBlockStatusData data = {
61007b31 2513 .bs = bs,
ba3f0e25 2514 .base = base,
c9ce8c4d 2515 .want_zero = want_zero,
7ddb99b9
EB
2516 .offset = offset,
2517 .bytes = bytes,
2518 .pnum = pnum,
2519 .map = map,
c9ce8c4d 2520 .file = file,
61007b31
SH
2521 .done = false,
2522 };
2523
2524 if (qemu_in_coroutine()) {
2525 /* Fast-path if already in coroutine context */
5b648c67 2526 bdrv_block_status_above_co_entry(&data);
61007b31 2527 } else {
5b648c67 2528 co = qemu_coroutine_create(bdrv_block_status_above_co_entry, &data);
e92f0e19 2529 bdrv_coroutine_enter(bs, co);
88b062c2 2530 BDRV_POLL_WHILE(bs, !data.done);
61007b31 2531 }
7ddb99b9 2532 return data.ret;
61007b31
SH
2533}
2534
31826642
EB
2535int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
2536 int64_t offset, int64_t bytes, int64_t *pnum,
2537 int64_t *map, BlockDriverState **file)
c9ce8c4d 2538{
31826642
EB
2539 return bdrv_common_block_status_above(bs, base, true, offset, bytes,
2540 pnum, map, file);
c9ce8c4d
EB
2541}
2542
237d78f8
EB
2543int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2544 int64_t *pnum, int64_t *map, BlockDriverState **file)
ba3f0e25 2545{
31826642
EB
2546 return bdrv_block_status_above(bs, backing_bs(bs),
2547 offset, bytes, pnum, map, file);
ba3f0e25
FZ
2548}
2549
d6a644bb
EB
2550int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
2551 int64_t bytes, int64_t *pnum)
61007b31 2552{
7ddb99b9
EB
2553 int ret;
2554 int64_t dummy;
d6a644bb 2555
7ddb99b9
EB
2556 ret = bdrv_common_block_status_above(bs, backing_bs(bs), false, offset,
2557 bytes, pnum ? pnum : &dummy, NULL,
c9ce8c4d 2558 NULL);
61007b31
SH
2559 if (ret < 0) {
2560 return ret;
2561 }
2562 return !!(ret & BDRV_BLOCK_ALLOCATED);
2563}
2564
2565/*
2566 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2567 *
170d3bd3
AS
2568 * Return 1 if (a prefix of) the given range is allocated in any image
2569 * between BASE and TOP (BASE is only included if include_base is set).
2570 * BASE can be NULL to check if the given offset is allocated in any
2571 * image of the chain. Return 0 otherwise, or negative errno on
2572 * failure.
61007b31 2573 *
51b0a488
EB
2574 * 'pnum' is set to the number of bytes (including and immediately
2575 * following the specified offset) that are known to be in the same
2576 * allocated/unallocated state. Note that a subsequent call starting
2577 * at 'offset + *pnum' may return the same allocation status (in other
2578 * words, the result is not necessarily the maximum possible range);
2579 * but 'pnum' will only be 0 when end of file is reached.
61007b31
SH
2580 *
2581 */
2582int bdrv_is_allocated_above(BlockDriverState *top,
2583 BlockDriverState *base,
170d3bd3
AS
2584 bool include_base, int64_t offset,
2585 int64_t bytes, int64_t *pnum)
61007b31
SH
2586{
2587 BlockDriverState *intermediate;
51b0a488
EB
2588 int ret;
2589 int64_t n = bytes;
61007b31 2590
170d3bd3
AS
2591 assert(base || !include_base);
2592
61007b31 2593 intermediate = top;
170d3bd3 2594 while (include_base || intermediate != base) {
d6a644bb 2595 int64_t pnum_inter;
c00716be 2596 int64_t size_inter;
d6a644bb 2597
170d3bd3 2598 assert(intermediate);
51b0a488 2599 ret = bdrv_is_allocated(intermediate, offset, bytes, &pnum_inter);
61007b31
SH
2600 if (ret < 0) {
2601 return ret;
d6a644bb 2602 }
d6a644bb 2603 if (ret) {
51b0a488 2604 *pnum = pnum_inter;
61007b31
SH
2605 return 1;
2606 }
2607
51b0a488 2608 size_inter = bdrv_getlength(intermediate);
c00716be
EB
2609 if (size_inter < 0) {
2610 return size_inter;
2611 }
51b0a488
EB
2612 if (n > pnum_inter &&
2613 (intermediate == top || offset + pnum_inter < size_inter)) {
2614 n = pnum_inter;
61007b31
SH
2615 }
2616
170d3bd3
AS
2617 if (intermediate == base) {
2618 break;
2619 }
2620
760e0063 2621 intermediate = backing_bs(intermediate);
61007b31
SH
2622 }
2623
2624 *pnum = n;
2625 return 0;
2626}
2627
1a8ae822
KW
2628typedef struct BdrvVmstateCo {
2629 BlockDriverState *bs;
2630 QEMUIOVector *qiov;
2631 int64_t pos;
2632 bool is_read;
2633 int ret;
2634} BdrvVmstateCo;
2635
2636static int coroutine_fn
2637bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2638 bool is_read)
2639{
2640 BlockDriver *drv = bs->drv;
dc88a467
SH
2641 int ret = -ENOTSUP;
2642
2643 bdrv_inc_in_flight(bs);
1a8ae822
KW
2644
2645 if (!drv) {
dc88a467 2646 ret = -ENOMEDIUM;
1a8ae822 2647 } else if (drv->bdrv_load_vmstate) {
dc88a467
SH
2648 if (is_read) {
2649 ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2650 } else {
2651 ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2652 }
1a8ae822 2653 } else if (bs->file) {
dc88a467 2654 ret = bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
1a8ae822
KW
2655 }
2656
dc88a467
SH
2657 bdrv_dec_in_flight(bs);
2658 return ret;
1a8ae822
KW
2659}
2660
2661static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
2662{
2663 BdrvVmstateCo *co = opaque;
2664 co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
4720cbee 2665 aio_wait_kick();
1a8ae822
KW
2666}
2667
2668static inline int
2669bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2670 bool is_read)
2671{
2672 if (qemu_in_coroutine()) {
2673 return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
2674 } else {
2675 BdrvVmstateCo data = {
2676 .bs = bs,
2677 .qiov = qiov,
2678 .pos = pos,
2679 .is_read = is_read,
2680 .ret = -EINPROGRESS,
2681 };
0b8b8753 2682 Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
1a8ae822 2683
e92f0e19 2684 bdrv_coroutine_enter(bs, co);
ea17c9d2 2685 BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS);
1a8ae822
KW
2686 return data.ret;
2687 }
2688}
2689
61007b31
SH
2690int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2691 int64_t pos, int size)
2692{
0d93ed08 2693 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
b433d942 2694 int ret;
61007b31 2695
b433d942
KW
2696 ret = bdrv_writev_vmstate(bs, &qiov, pos);
2697 if (ret < 0) {
2698 return ret;
2699 }
2700
2701 return size;
61007b31
SH
2702}
2703
2704int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2705{
1a8ae822 2706 return bdrv_rw_vmstate(bs, qiov, pos, false);
61007b31
SH
2707}
2708
2709int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2710 int64_t pos, int size)
5ddda0b8 2711{
0d93ed08 2712 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
b433d942 2713 int ret;
5ddda0b8 2714
b433d942
KW
2715 ret = bdrv_readv_vmstate(bs, &qiov, pos);
2716 if (ret < 0) {
2717 return ret;
2718 }
2719
2720 return size;
5ddda0b8
KW
2721}
2722
2723int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
61007b31 2724{
1a8ae822 2725 return bdrv_rw_vmstate(bs, qiov, pos, true);
61007b31
SH
2726}
2727
2728/**************************************************************/
2729/* async I/Os */
2730
61007b31
SH
2731void bdrv_aio_cancel(BlockAIOCB *acb)
2732{
2733 qemu_aio_ref(acb);
2734 bdrv_aio_cancel_async(acb);
2735 while (acb->refcnt > 1) {
2736 if (acb->aiocb_info->get_aio_context) {
2737 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2738 } else if (acb->bs) {
2f47da5f
PB
2739 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2740 * assert that we're not using an I/O thread. Thread-safe
2741 * code should use bdrv_aio_cancel_async exclusively.
2742 */
2743 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
61007b31
SH
2744 aio_poll(bdrv_get_aio_context(acb->bs), true);
2745 } else {
2746 abort();
2747 }
2748 }
2749 qemu_aio_unref(acb);
2750}
2751
2752/* Async version of aio cancel. The caller is not blocked if the acb implements
2753 * cancel_async, otherwise we do nothing and let the request normally complete.
2754 * In either case the completion callback must be called. */
2755void bdrv_aio_cancel_async(BlockAIOCB *acb)
2756{
2757 if (acb->aiocb_info->cancel_async) {
2758 acb->aiocb_info->cancel_async(acb);
2759 }
2760}
2761
61007b31
SH
2762/**************************************************************/
2763/* Coroutine block device emulation */
2764
e293b7a3
KW
2765typedef struct FlushCo {
2766 BlockDriverState *bs;
2767 int ret;
2768} FlushCo;
2769
2770
61007b31
SH
2771static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2772{
e293b7a3 2773 FlushCo *rwco = opaque;
61007b31
SH
2774
2775 rwco->ret = bdrv_co_flush(rwco->bs);
4720cbee 2776 aio_wait_kick();
61007b31
SH
2777}
2778
2779int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2780{
49ca6259
FZ
2781 int current_gen;
2782 int ret = 0;
2783
2784 bdrv_inc_in_flight(bs);
61007b31 2785
e914404e 2786 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
1b6bc94d 2787 bdrv_is_sg(bs)) {
49ca6259 2788 goto early_exit;
61007b31
SH
2789 }
2790
3783fa3d 2791 qemu_co_mutex_lock(&bs->reqs_lock);
47fec599 2792 current_gen = atomic_read(&bs->write_gen);
3ff2f67a
EY
2793
2794 /* Wait until any previous flushes are completed */
99723548 2795 while (bs->active_flush_req) {
3783fa3d 2796 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
3ff2f67a
EY
2797 }
2798
3783fa3d 2799 /* Flushes reach this point in nondecreasing current_gen order. */
99723548 2800 bs->active_flush_req = true;
3783fa3d 2801 qemu_co_mutex_unlock(&bs->reqs_lock);
3ff2f67a 2802
c32b82af
PD
2803 /* Write back all layers by calling one driver function */
2804 if (bs->drv->bdrv_co_flush) {
2805 ret = bs->drv->bdrv_co_flush(bs);
2806 goto out;
2807 }
2808
61007b31
SH
2809 /* Write back cached data to the OS even with cache=unsafe */
2810 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2811 if (bs->drv->bdrv_co_flush_to_os) {
2812 ret = bs->drv->bdrv_co_flush_to_os(bs);
2813 if (ret < 0) {
cdb5e315 2814 goto out;
61007b31
SH
2815 }
2816 }
2817
2818 /* But don't actually force it to the disk with cache=unsafe */
2819 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2820 goto flush_parent;
2821 }
2822
3ff2f67a
EY
2823 /* Check if we really need to flush anything */
2824 if (bs->flushed_gen == current_gen) {
2825 goto flush_parent;
2826 }
2827
61007b31 2828 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
d470ad42
HR
2829 if (!bs->drv) {
2830 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2831 * (even in case of apparent success) */
2832 ret = -ENOMEDIUM;
2833 goto out;
2834 }
61007b31
SH
2835 if (bs->drv->bdrv_co_flush_to_disk) {
2836 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2837 } else if (bs->drv->bdrv_aio_flush) {
2838 BlockAIOCB *acb;
2839 CoroutineIOCompletion co = {
2840 .coroutine = qemu_coroutine_self(),
2841 };
2842
2843 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2844 if (acb == NULL) {
2845 ret = -EIO;
2846 } else {
2847 qemu_coroutine_yield();
2848 ret = co.ret;
2849 }
2850 } else {
2851 /*
2852 * Some block drivers always operate in either writethrough or unsafe
2853 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2854 * know how the server works (because the behaviour is hardcoded or
2855 * depends on server-side configuration), so we can't ensure that
2856 * everything is safe on disk. Returning an error doesn't work because
2857 * that would break guests even if the server operates in writethrough
2858 * mode.
2859 *
2860 * Let's hope the user knows what he's doing.
2861 */
2862 ret = 0;
2863 }
3ff2f67a 2864
61007b31 2865 if (ret < 0) {
cdb5e315 2866 goto out;
61007b31
SH
2867 }
2868
2869 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2870 * in the case of cache=unsafe, so there are no useless flushes.
2871 */
2872flush_parent:
cdb5e315
FZ
2873 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2874out:
3ff2f67a 2875 /* Notify any pending flushes that we have completed */
e6af1e08
KW
2876 if (ret == 0) {
2877 bs->flushed_gen = current_gen;
2878 }
3783fa3d
PB
2879
2880 qemu_co_mutex_lock(&bs->reqs_lock);
99723548 2881 bs->active_flush_req = false;
156af3ac
DL
2882 /* Return value is ignored - it's ok if wait queue is empty */
2883 qemu_co_queue_next(&bs->flush_queue);
3783fa3d 2884 qemu_co_mutex_unlock(&bs->reqs_lock);
3ff2f67a 2885
49ca6259 2886early_exit:
99723548 2887 bdrv_dec_in_flight(bs);
cdb5e315 2888 return ret;
61007b31
SH
2889}
2890
2891int bdrv_flush(BlockDriverState *bs)
2892{
2893 Coroutine *co;
e293b7a3 2894 FlushCo flush_co = {
61007b31
SH
2895 .bs = bs,
2896 .ret = NOT_DONE,
2897 };
2898
2899 if (qemu_in_coroutine()) {
2900 /* Fast-path if already in coroutine context */
e293b7a3 2901 bdrv_flush_co_entry(&flush_co);
61007b31 2902 } else {
0b8b8753 2903 co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
e92f0e19 2904 bdrv_coroutine_enter(bs, co);
88b062c2 2905 BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
61007b31
SH
2906 }
2907
e293b7a3 2908 return flush_co.ret;
61007b31
SH
2909}
2910
2911typedef struct DiscardCo {
0b9fd3f4 2912 BdrvChild *child;
0c51a893 2913 int64_t offset;
d93e5726 2914 int64_t bytes;
61007b31
SH
2915 int ret;
2916} DiscardCo;
0c51a893 2917static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
61007b31
SH
2918{
2919 DiscardCo *rwco = opaque;
2920
0b9fd3f4 2921 rwco->ret = bdrv_co_pdiscard(rwco->child, rwco->offset, rwco->bytes);
4720cbee 2922 aio_wait_kick();
61007b31
SH
2923}
2924
d93e5726
VSO
2925int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
2926 int64_t bytes)
61007b31 2927{
b1066c87 2928 BdrvTrackedRequest req;
9f1963b3 2929 int max_pdiscard, ret;
3482b9bc 2930 int head, tail, align;
0b9fd3f4 2931 BlockDriverState *bs = child->bs;
61007b31 2932
d93e5726 2933 if (!bs || !bs->drv || !bdrv_is_inserted(bs)) {
61007b31
SH
2934 return -ENOMEDIUM;
2935 }
2936
d6883bc9
VSO
2937 if (bdrv_has_readonly_bitmaps(bs)) {
2938 return -EPERM;
2939 }
2940
d93e5726
VSO
2941 if (offset < 0 || bytes < 0 || bytes > INT64_MAX - offset) {
2942 return -EIO;
61007b31
SH
2943 }
2944
61007b31
SH
2945 /* Do nothing if disabled. */
2946 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2947 return 0;
2948 }
2949
02aefe43 2950 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
61007b31
SH
2951 return 0;
2952 }
2953
3482b9bc
EB
2954 /* Discard is advisory, but some devices track and coalesce
2955 * unaligned requests, so we must pass everything down rather than
2956 * round here. Still, most devices will just silently ignore
2957 * unaligned requests (by returning -ENOTSUP), so we must fragment
2958 * the request accordingly. */
02aefe43 2959 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
b8d0a980
EB
2960 assert(align % bs->bl.request_alignment == 0);
2961 head = offset % align;
f5a5ca79 2962 tail = (offset + bytes) % align;
9f1963b3 2963
99723548 2964 bdrv_inc_in_flight(bs);
f5a5ca79 2965 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
50824995 2966
00695c27 2967 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
ec050f77
DL
2968 if (ret < 0) {
2969 goto out;
2970 }
2971
9f1963b3
EB
2972 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
2973 align);
3482b9bc 2974 assert(max_pdiscard >= bs->bl.request_alignment);
61007b31 2975
f5a5ca79 2976 while (bytes > 0) {
d93e5726 2977 int64_t num = bytes;
3482b9bc
EB
2978
2979 if (head) {
2980 /* Make small requests to get to alignment boundaries. */
f5a5ca79 2981 num = MIN(bytes, align - head);
3482b9bc
EB
2982 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
2983 num %= bs->bl.request_alignment;
2984 }
2985 head = (head + num) % align;
2986 assert(num < max_pdiscard);
2987 } else if (tail) {
2988 if (num > align) {
2989 /* Shorten the request to the last aligned cluster. */
2990 num -= tail;
2991 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
2992 tail > bs->bl.request_alignment) {
2993 tail %= bs->bl.request_alignment;
2994 num -= tail;
2995 }
2996 }
2997 /* limit request size */
2998 if (num > max_pdiscard) {
2999 num = max_pdiscard;
3000 }
61007b31 3001
d470ad42
HR
3002 if (!bs->drv) {
3003 ret = -ENOMEDIUM;
3004 goto out;
3005 }
47a5486d
EB
3006 if (bs->drv->bdrv_co_pdiscard) {
3007 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
61007b31
SH
3008 } else {
3009 BlockAIOCB *acb;
3010 CoroutineIOCompletion co = {
3011 .coroutine = qemu_coroutine_self(),
3012 };
3013
4da444a0
EB
3014 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
3015 bdrv_co_io_em_complete, &co);
61007b31 3016 if (acb == NULL) {
b1066c87
FZ
3017 ret = -EIO;
3018 goto out;
61007b31
SH
3019 } else {
3020 qemu_coroutine_yield();
3021 ret = co.ret;
3022 }
3023 }
3024 if (ret && ret != -ENOTSUP) {
b1066c87 3025 goto out;
61007b31
SH
3026 }
3027
9f1963b3 3028 offset += num;
f5a5ca79 3029 bytes -= num;
61007b31 3030 }
b1066c87
FZ
3031 ret = 0;
3032out:
00695c27 3033 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
b1066c87 3034 tracked_request_end(&req);
99723548 3035 bdrv_dec_in_flight(bs);
b1066c87 3036 return ret;
61007b31
SH
3037}
3038
d93e5726 3039int bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes)
61007b31
SH
3040{
3041 Coroutine *co;
3042 DiscardCo rwco = {
0b9fd3f4 3043 .child = child,
0c51a893 3044 .offset = offset,
f5a5ca79 3045 .bytes = bytes,
61007b31
SH
3046 .ret = NOT_DONE,
3047 };
3048
3049 if (qemu_in_coroutine()) {
3050 /* Fast-path if already in coroutine context */
0c51a893 3051 bdrv_pdiscard_co_entry(&rwco);
61007b31 3052 } else {
0c51a893 3053 co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
0b9fd3f4
FZ
3054 bdrv_coroutine_enter(child->bs, co);
3055 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
61007b31
SH
3056 }
3057
3058 return rwco.ret;
3059}
3060
48af776a 3061int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
61007b31
SH
3062{
3063 BlockDriver *drv = bs->drv;
5c5ae76a
FZ
3064 CoroutineIOCompletion co = {
3065 .coroutine = qemu_coroutine_self(),
3066 };
3067 BlockAIOCB *acb;
61007b31 3068
99723548 3069 bdrv_inc_in_flight(bs);
16a389dc 3070 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
5c5ae76a
FZ
3071 co.ret = -ENOTSUP;
3072 goto out;
3073 }
3074
16a389dc
KW
3075 if (drv->bdrv_co_ioctl) {
3076 co.ret = drv->bdrv_co_ioctl(bs, req, buf);
3077 } else {
3078 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
3079 if (!acb) {
3080 co.ret = -ENOTSUP;
3081 goto out;
3082 }
3083 qemu_coroutine_yield();
5c5ae76a 3084 }
5c5ae76a 3085out:
99723548 3086 bdrv_dec_in_flight(bs);
5c5ae76a
FZ
3087 return co.ret;
3088}
3089
61007b31
SH
3090void *qemu_blockalign(BlockDriverState *bs, size_t size)
3091{
3092 return qemu_memalign(bdrv_opt_mem_align(bs), size);
3093}
3094
3095void *qemu_blockalign0(BlockDriverState *bs, size_t size)
3096{
3097 return memset(qemu_blockalign(bs, size), 0, size);
3098}
3099
3100void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
3101{
3102 size_t align = bdrv_opt_mem_align(bs);
3103
3104 /* Ensure that NULL is never returned on success */
3105 assert(align > 0);
3106 if (size == 0) {
3107 size = align;
3108 }
3109
3110 return qemu_try_memalign(align, size);
3111}
3112
3113void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
3114{
3115 void *mem = qemu_try_blockalign(bs, size);
3116
3117 if (mem) {
3118 memset(mem, 0, size);
3119 }
3120
3121 return mem;
3122}
3123
3124/*
3125 * Check if all memory in this vector is sector aligned.
3126 */
3127bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
3128{
3129 int i;
4196d2f0 3130 size_t alignment = bdrv_min_mem_align(bs);
61007b31
SH
3131
3132 for (i = 0; i < qiov->niov; i++) {
3133 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
3134 return false;
3135 }
3136 if (qiov->iov[i].iov_len % alignment) {
3137 return false;
3138 }
3139 }
3140
3141 return true;
3142}
3143
3144void bdrv_add_before_write_notifier(BlockDriverState *bs,
3145 NotifierWithReturn *notifier)
3146{
3147 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
3148}
3149
3150void bdrv_io_plug(BlockDriverState *bs)
3151{
6b98bd64
PB
3152 BdrvChild *child;
3153
3154 QLIST_FOREACH(child, &bs->children, next) {
3155 bdrv_io_plug(child->bs);
3156 }
3157
850d54a2 3158 if (atomic_fetch_inc(&bs->io_plugged) == 0) {
6b98bd64
PB
3159 BlockDriver *drv = bs->drv;
3160 if (drv && drv->bdrv_io_plug) {
3161 drv->bdrv_io_plug(bs);
3162 }
61007b31
SH
3163 }
3164}
3165
3166void bdrv_io_unplug(BlockDriverState *bs)
3167{
6b98bd64
PB
3168 BdrvChild *child;
3169
3170 assert(bs->io_plugged);
850d54a2 3171 if (atomic_fetch_dec(&bs->io_plugged) == 1) {
6b98bd64
PB
3172 BlockDriver *drv = bs->drv;
3173 if (drv && drv->bdrv_io_unplug) {
3174 drv->bdrv_io_unplug(bs);
3175 }
3176 }
3177
3178 QLIST_FOREACH(child, &bs->children, next) {
3179 bdrv_io_unplug(child->bs);
61007b31
SH
3180 }
3181}
23d0ba93
FZ
3182
3183void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size)
3184{
3185 BdrvChild *child;
3186
3187 if (bs->drv && bs->drv->bdrv_register_buf) {
3188 bs->drv->bdrv_register_buf(bs, host, size);
3189 }
3190 QLIST_FOREACH(child, &bs->children, next) {
3191 bdrv_register_buf(child->bs, host, size);
3192 }
3193}
3194
3195void bdrv_unregister_buf(BlockDriverState *bs, void *host)
3196{
3197 BdrvChild *child;
3198
3199 if (bs->drv && bs->drv->bdrv_unregister_buf) {
3200 bs->drv->bdrv_unregister_buf(bs, host);
3201 }
3202 QLIST_FOREACH(child, &bs->children, next) {
3203 bdrv_unregister_buf(child->bs, host);
3204 }
3205}
fcc67678 3206
67b51fb9
VSO
3207static int coroutine_fn bdrv_co_copy_range_internal(
3208 BdrvChild *src, uint64_t src_offset, BdrvChild *dst,
3209 uint64_t dst_offset, uint64_t bytes,
3210 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3211 bool recurse_src)
fcc67678 3212{
999658a0 3213 BdrvTrackedRequest req;
fcc67678
FZ
3214 int ret;
3215
fe0480d6
KW
3216 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3217 assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3218 assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
3219
d4d3e5a0 3220 if (!dst || !dst->bs) {
fcc67678
FZ
3221 return -ENOMEDIUM;
3222 }
fcc67678
FZ
3223 ret = bdrv_check_byte_request(dst->bs, dst_offset, bytes);
3224 if (ret) {
3225 return ret;
3226 }
67b51fb9
VSO
3227 if (write_flags & BDRV_REQ_ZERO_WRITE) {
3228 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
fcc67678
FZ
3229 }
3230
d4d3e5a0
FZ
3231 if (!src || !src->bs) {
3232 return -ENOMEDIUM;
3233 }
3234 ret = bdrv_check_byte_request(src->bs, src_offset, bytes);
3235 if (ret) {
3236 return ret;
3237 }
3238
fcc67678
FZ
3239 if (!src->bs->drv->bdrv_co_copy_range_from
3240 || !dst->bs->drv->bdrv_co_copy_range_to
3241 || src->bs->encrypted || dst->bs->encrypted) {
3242 return -ENOTSUP;
3243 }
37aec7d7 3244
fcc67678 3245 if (recurse_src) {
999658a0
VSO
3246 bdrv_inc_in_flight(src->bs);
3247 tracked_request_begin(&req, src->bs, src_offset, bytes,
3248 BDRV_TRACKED_READ);
3249
09d2f948
VSO
3250 /* BDRV_REQ_SERIALISING is only for write operation */
3251 assert(!(read_flags & BDRV_REQ_SERIALISING));
c53cb427 3252 bdrv_wait_serialising_requests(&req);
999658a0 3253
37aec7d7
FZ
3254 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3255 src, src_offset,
3256 dst, dst_offset,
67b51fb9
VSO
3257 bytes,
3258 read_flags, write_flags);
999658a0
VSO
3259
3260 tracked_request_end(&req);
3261 bdrv_dec_in_flight(src->bs);
fcc67678 3262 } else {
999658a0
VSO
3263 bdrv_inc_in_flight(dst->bs);
3264 tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3265 BDRV_TRACKED_WRITE);
0eb1e891
FZ
3266 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3267 write_flags);
3268 if (!ret) {
3269 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3270 src, src_offset,
3271 dst, dst_offset,
3272 bytes,
3273 read_flags, write_flags);
3274 }
3275 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
999658a0
VSO
3276 tracked_request_end(&req);
3277 bdrv_dec_in_flight(dst->bs);
fcc67678 3278 }
999658a0 3279
37aec7d7 3280 return ret;
fcc67678
FZ
3281}
3282
3283/* Copy range from @src to @dst.
3284 *
3285 * See the comment of bdrv_co_copy_range for the parameter and return value
3286 * semantics. */
3287int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset,
3288 BdrvChild *dst, uint64_t dst_offset,
67b51fb9
VSO
3289 uint64_t bytes,
3290 BdrvRequestFlags read_flags,
3291 BdrvRequestFlags write_flags)
fcc67678 3292{
ecc983a5
FZ
3293 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3294 read_flags, write_flags);
fcc67678 3295 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
67b51fb9 3296 bytes, read_flags, write_flags, true);
fcc67678
FZ
3297}
3298
3299/* Copy range from @src to @dst.
3300 *
3301 * See the comment of bdrv_co_copy_range for the parameter and return value
3302 * semantics. */
3303int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset,
3304 BdrvChild *dst, uint64_t dst_offset,
67b51fb9
VSO
3305 uint64_t bytes,
3306 BdrvRequestFlags read_flags,
3307 BdrvRequestFlags write_flags)
fcc67678 3308{
ecc983a5
FZ
3309 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3310 read_flags, write_flags);
fcc67678 3311 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
67b51fb9 3312 bytes, read_flags, write_flags, false);
fcc67678
FZ
3313}
3314
3315int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset,
3316 BdrvChild *dst, uint64_t dst_offset,
67b51fb9
VSO
3317 uint64_t bytes, BdrvRequestFlags read_flags,
3318 BdrvRequestFlags write_flags)
fcc67678 3319{
37aec7d7
FZ
3320 return bdrv_co_copy_range_from(src, src_offset,
3321 dst, dst_offset,
67b51fb9 3322 bytes, read_flags, write_flags);
fcc67678 3323}
3d9f2d2a
KW
3324
3325static void bdrv_parent_cb_resize(BlockDriverState *bs)
3326{
3327 BdrvChild *c;
3328 QLIST_FOREACH(c, &bs->parents, next_parent) {
bd86fb99
HR
3329 if (c->klass->resize) {
3330 c->klass->resize(c);
3d9f2d2a
KW
3331 }
3332 }
3333}
3334
3335/**
3336 * Truncate file to 'offset' bytes (needed only for file protocols)
c80d8b06
HR
3337 *
3338 * If 'exact' is true, the file must be resized to exactly the given
3339 * 'offset'. Otherwise, it is sufficient for the node to be at least
3340 * 'offset' bytes in length.
3d9f2d2a 3341 */
c80d8b06 3342int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
7b8e4857
KW
3343 PreallocMode prealloc, BdrvRequestFlags flags,
3344 Error **errp)
3d9f2d2a
KW
3345{
3346 BlockDriverState *bs = child->bs;
3347 BlockDriver *drv = bs->drv;
1bc5f09f
KW
3348 BdrvTrackedRequest req;
3349 int64_t old_size, new_bytes;
3d9f2d2a
KW
3350 int ret;
3351
3d9f2d2a
KW
3352
3353 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3354 if (!drv) {
3355 error_setg(errp, "No medium inserted");
3356 return -ENOMEDIUM;
3357 }
3358 if (offset < 0) {
3359 error_setg(errp, "Image size cannot be negative");
3360 return -EINVAL;
3361 }
3362
1bc5f09f
KW
3363 old_size = bdrv_getlength(bs);
3364 if (old_size < 0) {
3365 error_setg_errno(errp, -old_size, "Failed to get old image size");
3366 return old_size;
3367 }
3368
3369 if (offset > old_size) {
3370 new_bytes = offset - old_size;
3371 } else {
3372 new_bytes = 0;
3373 }
3374
3d9f2d2a 3375 bdrv_inc_in_flight(bs);
5416a11e
FZ
3376 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3377 BDRV_TRACKED_TRUNCATE);
1bc5f09f
KW
3378
3379 /* If we are growing the image and potentially using preallocation for the
3380 * new area, we need to make sure that no write requests are made to it
3381 * concurrently or they might be overwritten by preallocation. */
3382 if (new_bytes) {
304d9d7f 3383 bdrv_mark_request_serialising(&req, 1);
cd47d792
FZ
3384 }
3385 if (bs->read_only) {
3386 error_setg(errp, "Image is read-only");
3387 ret = -EACCES;
3388 goto out;
3389 }
3390 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3391 0);
3392 if (ret < 0) {
3393 error_setg_errno(errp, -ret,
3394 "Failed to prepare request for truncation");
3395 goto out;
1bc5f09f 3396 }
3d9f2d2a 3397
955c7d66
KW
3398 /*
3399 * If the image has a backing file that is large enough that it would
3400 * provide data for the new area, we cannot leave it unallocated because
3401 * then the backing file content would become visible. Instead, zero-fill
3402 * the new area.
3403 *
3404 * Note that if the image has a backing file, but was opened without the
3405 * backing file, taking care of keeping things consistent with that backing
3406 * file is the user's responsibility.
3407 */
3408 if (new_bytes && bs->backing) {
3409 int64_t backing_len;
3410
3411 backing_len = bdrv_getlength(backing_bs(bs));
3412 if (backing_len < 0) {
3413 ret = backing_len;
3414 error_setg_errno(errp, -ret, "Could not get backing file size");
3415 goto out;
3416 }
3417
3418 if (backing_len > old_size) {
3419 flags |= BDRV_REQ_ZERO_WRITE;
3420 }
3421 }
3422
6b7e8f8b 3423 if (drv->bdrv_co_truncate) {
92b92799
KW
3424 if (flags & ~bs->supported_truncate_flags) {
3425 error_setg(errp, "Block driver does not support requested flags");
3426 ret = -ENOTSUP;
3427 goto out;
3428 }
3429 ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
6b7e8f8b 3430 } else if (bs->file && drv->is_filter) {
7b8e4857 3431 ret = bdrv_co_truncate(bs->file, offset, exact, prealloc, flags, errp);
6b7e8f8b 3432 } else {
3d9f2d2a
KW
3433 error_setg(errp, "Image format driver does not support resize");
3434 ret = -ENOTSUP;
3435 goto out;
3436 }
3d9f2d2a
KW
3437 if (ret < 0) {
3438 goto out;
3439 }
6b7e8f8b 3440
3d9f2d2a
KW
3441 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3442 if (ret < 0) {
3443 error_setg_errno(errp, -ret, "Could not refresh total sector count");
3444 } else {
3445 offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3446 }
cd47d792
FZ
3447 /* It's possible that truncation succeeded but refresh_total_sectors
3448 * failed, but the latter doesn't affect how we should finish the request.
3449 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3450 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
3d9f2d2a
KW
3451
3452out:
1bc5f09f 3453 tracked_request_end(&req);
3d9f2d2a 3454 bdrv_dec_in_flight(bs);
1bc5f09f 3455
3d9f2d2a
KW
3456 return ret;
3457}
3458
3459typedef struct TruncateCo {
3460 BdrvChild *child;
3461 int64_t offset;
c80d8b06 3462 bool exact;
3d9f2d2a 3463 PreallocMode prealloc;
7b8e4857 3464 BdrvRequestFlags flags;
3d9f2d2a
KW
3465 Error **errp;
3466 int ret;
3467} TruncateCo;
3468
3469static void coroutine_fn bdrv_truncate_co_entry(void *opaque)
3470{
3471 TruncateCo *tco = opaque;
c80d8b06 3472 tco->ret = bdrv_co_truncate(tco->child, tco->offset, tco->exact,
7b8e4857 3473 tco->prealloc, tco->flags, tco->errp);
4720cbee 3474 aio_wait_kick();
3d9f2d2a
KW
3475}
3476
c80d8b06 3477int bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
7b8e4857 3478 PreallocMode prealloc, BdrvRequestFlags flags, Error **errp)
3d9f2d2a
KW
3479{
3480 Coroutine *co;
3481 TruncateCo tco = {
3482 .child = child,
3483 .offset = offset,
c80d8b06 3484 .exact = exact,
3d9f2d2a 3485 .prealloc = prealloc,
7b8e4857 3486 .flags = flags,
3d9f2d2a
KW
3487 .errp = errp,
3488 .ret = NOT_DONE,
3489 };
3490
3491 if (qemu_in_coroutine()) {
3492 /* Fast-path if already in coroutine context */
3493 bdrv_truncate_co_entry(&tco);
3494 } else {
3495 co = qemu_coroutine_create(bdrv_truncate_co_entry, &tco);
4720cbee 3496 bdrv_coroutine_enter(child->bs, co);
3d9f2d2a
KW
3497 BDRV_POLL_WHILE(child->bs, tco.ret == NOT_DONE);
3498 }
3499
3500 return tco.ret;
3501}
This page took 0.923959 seconds and 4 git commands to generate.