]> Git Repo - qemu.git/blame - block/io.c
block: Add copy offloading trace points
[qemu.git] / block / io.c
CommitLineData
61007b31
SH
1/*
2 * Block layer I/O functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
80c71a24 25#include "qemu/osdep.h"
61007b31 26#include "trace.h"
7f0e9da6 27#include "sysemu/block-backend.h"
7719f3c9 28#include "block/aio-wait.h"
61007b31 29#include "block/blockjob.h"
f321dcb5 30#include "block/blockjob_int.h"
61007b31 31#include "block/block_int.h"
f348b6d1 32#include "qemu/cutils.h"
da34e65c 33#include "qapi/error.h"
d49b6836 34#include "qemu/error-report.h"
61007b31
SH
35
36#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
37
cb2e2878
EB
38/* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
39#define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
40
0f12264e
KW
41static AioWait drain_all_aio_wait;
42
d05aa8bb 43static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
f5a5ca79 44 int64_t offset, int bytes, BdrvRequestFlags flags);
61007b31 45
6cd5c9d7
KW
46void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
47 bool ignore_bds_parents)
61007b31 48{
02d21300 49 BdrvChild *c, *next;
27ccdd52 50
02d21300 51 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
6cd5c9d7 52 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
0152bf40
KW
53 continue;
54 }
4be6a6d1 55 bdrv_parent_drained_begin_single(c, false);
ce0f1412
PB
56 }
57}
61007b31 58
6cd5c9d7
KW
59void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
60 bool ignore_bds_parents)
ce0f1412 61{
02d21300 62 BdrvChild *c, *next;
27ccdd52 63
02d21300 64 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
6cd5c9d7 65 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
0152bf40
KW
66 continue;
67 }
c2066af0
KW
68 if (c->role->drained_end) {
69 c->role->drained_end(c);
70 }
27ccdd52 71 }
61007b31
SH
72}
73
4be6a6d1
KW
74static bool bdrv_parent_drained_poll_single(BdrvChild *c)
75{
76 if (c->role->drained_poll) {
77 return c->role->drained_poll(c);
78 }
79 return false;
80}
81
6cd5c9d7
KW
82static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
83 bool ignore_bds_parents)
89bd0305
KW
84{
85 BdrvChild *c, *next;
86 bool busy = false;
87
88 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
6cd5c9d7 89 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
89bd0305
KW
90 continue;
91 }
4be6a6d1 92 busy |= bdrv_parent_drained_poll_single(c);
89bd0305
KW
93 }
94
95 return busy;
96}
97
4be6a6d1
KW
98void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
99{
100 if (c->role->drained_begin) {
101 c->role->drained_begin(c);
102 }
103 if (poll) {
104 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c));
105 }
106}
107
d9e0dfa2
EB
108static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
109{
110 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
111 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
112 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
113 src->opt_mem_alignment);
114 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
115 src->min_mem_alignment);
116 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
117}
118
61007b31
SH
119void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
120{
121 BlockDriver *drv = bs->drv;
122 Error *local_err = NULL;
123
124 memset(&bs->bl, 0, sizeof(bs->bl));
125
126 if (!drv) {
127 return;
128 }
129
79ba8c98 130 /* Default alignment based on whether driver has byte interface */
e31f6864
EB
131 bs->bl.request_alignment = (drv->bdrv_co_preadv ||
132 drv->bdrv_aio_preadv) ? 1 : 512;
79ba8c98 133
61007b31
SH
134 /* Take some limits from the children as a default */
135 if (bs->file) {
9a4f4c31 136 bdrv_refresh_limits(bs->file->bs, &local_err);
61007b31
SH
137 if (local_err) {
138 error_propagate(errp, local_err);
139 return;
140 }
d9e0dfa2 141 bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
61007b31 142 } else {
4196d2f0 143 bs->bl.min_mem_alignment = 512;
459b4e66 144 bs->bl.opt_mem_alignment = getpagesize();
bd44feb7
SH
145
146 /* Safe default since most protocols use readv()/writev()/etc */
147 bs->bl.max_iov = IOV_MAX;
61007b31
SH
148 }
149
760e0063
KW
150 if (bs->backing) {
151 bdrv_refresh_limits(bs->backing->bs, &local_err);
61007b31
SH
152 if (local_err) {
153 error_propagate(errp, local_err);
154 return;
155 }
d9e0dfa2 156 bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
61007b31
SH
157 }
158
159 /* Then let the driver override it */
160 if (drv->bdrv_refresh_limits) {
161 drv->bdrv_refresh_limits(bs, errp);
162 }
163}
164
165/**
166 * The copy-on-read flag is actually a reference count so multiple users may
167 * use the feature without worrying about clobbering its previous state.
168 * Copy-on-read stays enabled until all users have called to disable it.
169 */
170void bdrv_enable_copy_on_read(BlockDriverState *bs)
171{
d3faa13e 172 atomic_inc(&bs->copy_on_read);
61007b31
SH
173}
174
175void bdrv_disable_copy_on_read(BlockDriverState *bs)
176{
d3faa13e
PB
177 int old = atomic_fetch_dec(&bs->copy_on_read);
178 assert(old >= 1);
61007b31
SH
179}
180
61124f03
PB
181typedef struct {
182 Coroutine *co;
183 BlockDriverState *bs;
184 bool done;
481cad48 185 bool begin;
b0165585 186 bool recursive;
fe4f0614 187 bool poll;
0152bf40 188 BdrvChild *parent;
6cd5c9d7 189 bool ignore_bds_parents;
61124f03
PB
190} BdrvCoDrainData;
191
192static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
193{
194 BdrvCoDrainData *data = opaque;
195 BlockDriverState *bs = data->bs;
196
481cad48 197 if (data->begin) {
f8ea8dac 198 bs->drv->bdrv_co_drain_begin(bs);
481cad48
MP
199 } else {
200 bs->drv->bdrv_co_drain_end(bs);
201 }
61124f03
PB
202
203 /* Set data->done before reading bs->wakeup. */
204 atomic_mb_set(&data->done, true);
0109e7e6
KW
205 bdrv_dec_in_flight(bs);
206
207 if (data->begin) {
208 g_free(data);
209 }
61124f03
PB
210}
211
db0289b9 212/* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
7d40d9ef 213static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
61124f03 214{
0109e7e6 215 BdrvCoDrainData *data;
61124f03 216
f8ea8dac 217 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
481cad48 218 (!begin && !bs->drv->bdrv_co_drain_end)) {
61124f03
PB
219 return;
220 }
221
0109e7e6
KW
222 data = g_new(BdrvCoDrainData, 1);
223 *data = (BdrvCoDrainData) {
224 .bs = bs,
225 .done = false,
226 .begin = begin
227 };
228
229 /* Make sure the driver callback completes during the polling phase for
230 * drain_begin. */
231 bdrv_inc_in_flight(bs);
232 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
233 aio_co_schedule(bdrv_get_aio_context(bs), data->co);
234
235 if (!begin) {
236 BDRV_POLL_WHILE(bs, !data->done);
237 g_free(data);
238 }
61124f03
PB
239}
240
1cc8e54a 241/* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
fe4f0614 242bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
6cd5c9d7 243 BdrvChild *ignore_parent, bool ignore_bds_parents)
89bd0305 244{
fe4f0614
KW
245 BdrvChild *child, *next;
246
6cd5c9d7 247 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
89bd0305
KW
248 return true;
249 }
250
fe4f0614
KW
251 if (atomic_read(&bs->in_flight)) {
252 return true;
253 }
254
255 if (recursive) {
6cd5c9d7 256 assert(!ignore_bds_parents);
fe4f0614 257 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
6cd5c9d7 258 if (bdrv_drain_poll(child->bs, recursive, child, false)) {
fe4f0614
KW
259 return true;
260 }
261 }
262 }
263
264 return false;
89bd0305
KW
265}
266
fe4f0614 267static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
89bd0305 268 BdrvChild *ignore_parent)
1cc8e54a
KW
269{
270 /* Execute pending BHs first and check everything else only after the BHs
271 * have executed. */
272 while (aio_poll(bs->aio_context, false));
89bd0305 273
6cd5c9d7 274 return bdrv_drain_poll(bs, recursive, ignore_parent, false);
1cc8e54a
KW
275}
276
b0165585 277static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
6cd5c9d7
KW
278 BdrvChild *parent, bool ignore_bds_parents,
279 bool poll);
b0165585 280static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
6cd5c9d7 281 BdrvChild *parent, bool ignore_bds_parents);
0152bf40 282
a77fd4bb
FZ
283static void bdrv_co_drain_bh_cb(void *opaque)
284{
285 BdrvCoDrainData *data = opaque;
286 Coroutine *co = data->co;
99723548 287 BlockDriverState *bs = data->bs;
a77fd4bb 288
c8ca33d0
KW
289 if (bs) {
290 bdrv_dec_in_flight(bs);
291 if (data->begin) {
6cd5c9d7
KW
292 bdrv_do_drained_begin(bs, data->recursive, data->parent,
293 data->ignore_bds_parents, data->poll);
c8ca33d0 294 } else {
6cd5c9d7
KW
295 bdrv_do_drained_end(bs, data->recursive, data->parent,
296 data->ignore_bds_parents);
c8ca33d0 297 }
481cad48 298 } else {
c8ca33d0
KW
299 assert(data->begin);
300 bdrv_drain_all_begin();
481cad48
MP
301 }
302
a77fd4bb 303 data->done = true;
1919631e 304 aio_co_wake(co);
a77fd4bb
FZ
305}
306
481cad48 307static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
b0165585 308 bool begin, bool recursive,
6cd5c9d7
KW
309 BdrvChild *parent,
310 bool ignore_bds_parents,
311 bool poll)
a77fd4bb
FZ
312{
313 BdrvCoDrainData data;
314
315 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
c40a2545 316 * other coroutines run if they were queued by aio_co_enter(). */
a77fd4bb
FZ
317
318 assert(qemu_in_coroutine());
319 data = (BdrvCoDrainData) {
320 .co = qemu_coroutine_self(),
321 .bs = bs,
322 .done = false,
481cad48 323 .begin = begin,
b0165585 324 .recursive = recursive,
0152bf40 325 .parent = parent,
6cd5c9d7 326 .ignore_bds_parents = ignore_bds_parents,
fe4f0614 327 .poll = poll,
a77fd4bb 328 };
c8ca33d0
KW
329 if (bs) {
330 bdrv_inc_in_flight(bs);
331 }
fffb6e12
PB
332 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
333 bdrv_co_drain_bh_cb, &data);
a77fd4bb
FZ
334
335 qemu_coroutine_yield();
336 /* If we are resumed from some other event (such as an aio completion or a
337 * timer callback), it is a bug in the caller that should be fixed. */
338 assert(data.done);
339}
340
dcf94a23 341void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
6cd5c9d7 342 BdrvChild *parent, bool ignore_bds_parents)
6820643f 343{
dcf94a23 344 assert(!qemu_in_coroutine());
d42cf288 345
60369b86 346 /* Stop things in parent-to-child order */
414c2ec3 347 if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
6820643f 348 aio_disable_external(bdrv_get_aio_context(bs));
6820643f
KW
349 }
350
6cd5c9d7 351 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
7d40d9ef 352 bdrv_drain_invoke(bs, true);
dcf94a23
KW
353}
354
355static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
6cd5c9d7
KW
356 BdrvChild *parent, bool ignore_bds_parents,
357 bool poll)
dcf94a23
KW
358{
359 BdrvChild *child, *next;
360
361 if (qemu_in_coroutine()) {
6cd5c9d7
KW
362 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
363 poll);
dcf94a23
KW
364 return;
365 }
366
6cd5c9d7 367 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents);
d30b8e64 368
b0165585 369 if (recursive) {
6cd5c9d7 370 assert(!ignore_bds_parents);
d736f119 371 bs->recursive_quiesce_counter++;
b0165585 372 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
6cd5c9d7
KW
373 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents,
374 false);
b0165585
KW
375 }
376 }
fe4f0614
KW
377
378 /*
379 * Wait for drained requests to finish.
380 *
381 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
382 * call is needed so things in this AioContext can make progress even
383 * though we don't return to the main AioContext loop - this automatically
384 * includes other nodes in the same AioContext and therefore all child
385 * nodes.
386 */
387 if (poll) {
6cd5c9d7 388 assert(!ignore_bds_parents);
fe4f0614
KW
389 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent));
390 }
6820643f
KW
391}
392
0152bf40
KW
393void bdrv_drained_begin(BlockDriverState *bs)
394{
6cd5c9d7 395 bdrv_do_drained_begin(bs, false, NULL, false, true);
b0165585
KW
396}
397
398void bdrv_subtree_drained_begin(BlockDriverState *bs)
399{
6cd5c9d7 400 bdrv_do_drained_begin(bs, true, NULL, false, true);
0152bf40
KW
401}
402
6cd5c9d7
KW
403static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
404 BdrvChild *parent, bool ignore_bds_parents)
6820643f 405{
b0165585 406 BdrvChild *child, *next;
0f115168
KW
407 int old_quiesce_counter;
408
481cad48 409 if (qemu_in_coroutine()) {
6cd5c9d7
KW
410 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
411 false);
481cad48
MP
412 return;
413 }
6820643f 414 assert(bs->quiesce_counter > 0);
0f115168 415 old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter);
6820643f 416
60369b86 417 /* Re-enable things in child-to-parent order */
7d40d9ef 418 bdrv_drain_invoke(bs, false);
6cd5c9d7 419 bdrv_parent_drained_end(bs, parent, ignore_bds_parents);
0f115168 420 if (old_quiesce_counter == 1) {
0f115168
KW
421 aio_enable_external(bdrv_get_aio_context(bs));
422 }
b0165585
KW
423
424 if (recursive) {
6cd5c9d7 425 assert(!ignore_bds_parents);
d736f119 426 bs->recursive_quiesce_counter--;
b0165585 427 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
6cd5c9d7 428 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents);
b0165585
KW
429 }
430 }
6820643f
KW
431}
432
0152bf40
KW
433void bdrv_drained_end(BlockDriverState *bs)
434{
6cd5c9d7 435 bdrv_do_drained_end(bs, false, NULL, false);
b0165585
KW
436}
437
438void bdrv_subtree_drained_end(BlockDriverState *bs)
439{
6cd5c9d7 440 bdrv_do_drained_end(bs, true, NULL, false);
0152bf40
KW
441}
442
d736f119
KW
443void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
444{
445 int i;
446
447 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
6cd5c9d7 448 bdrv_do_drained_begin(child->bs, true, child, false, true);
d736f119
KW
449 }
450}
451
452void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
453{
454 int i;
455
456 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
6cd5c9d7 457 bdrv_do_drained_end(child->bs, true, child, false);
d736f119
KW
458 }
459}
460
61007b31 461/*
67da1dc5
FZ
462 * Wait for pending requests to complete on a single BlockDriverState subtree,
463 * and suspend block driver's internal I/O until next request arrives.
61007b31 464 *
61007b31
SH
465 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
466 * AioContext.
467 */
b6e84c97 468void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
61007b31 469{
6820643f
KW
470 assert(qemu_in_coroutine());
471 bdrv_drained_begin(bs);
472 bdrv_drained_end(bs);
b6e84c97 473}
f406c03c 474
b6e84c97
PB
475void bdrv_drain(BlockDriverState *bs)
476{
6820643f
KW
477 bdrv_drained_begin(bs);
478 bdrv_drained_end(bs);
61007b31
SH
479}
480
c13ad59f
KW
481static void bdrv_drain_assert_idle(BlockDriverState *bs)
482{
483 BdrvChild *child, *next;
484
485 assert(atomic_read(&bs->in_flight) == 0);
486 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
487 bdrv_drain_assert_idle(child->bs);
488 }
489}
490
0f12264e
KW
491unsigned int bdrv_drain_all_count = 0;
492
493static bool bdrv_drain_all_poll(void)
494{
495 BlockDriverState *bs = NULL;
496 bool result = false;
497
498 /* Execute pending BHs first (may modify the graph) and check everything
499 * else only after the BHs have executed. */
500 while (aio_poll(qemu_get_aio_context(), false));
501
502 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
503 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
504 while ((bs = bdrv_next_all_states(bs))) {
505 AioContext *aio_context = bdrv_get_aio_context(bs);
506 aio_context_acquire(aio_context);
507 result |= bdrv_drain_poll(bs, false, NULL, true);
508 aio_context_release(aio_context);
509 }
510
511 return result;
512}
513
61007b31
SH
514/*
515 * Wait for pending requests to complete across all BlockDriverStates
516 *
517 * This function does not flush data to disk, use bdrv_flush_all() for that
518 * after calling this function.
c0778f66
AG
519 *
520 * This pauses all block jobs and disables external clients. It must
521 * be paired with bdrv_drain_all_end().
522 *
523 * NOTE: no new block jobs or BlockDriverStates can be created between
524 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
61007b31 525 */
c0778f66 526void bdrv_drain_all_begin(void)
61007b31 527{
0f12264e 528 BlockDriverState *bs = NULL;
61007b31 529
c8ca33d0 530 if (qemu_in_coroutine()) {
0f12264e 531 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true);
c8ca33d0
KW
532 return;
533 }
534
0f12264e
KW
535 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
536 * loop AioContext, so make sure we're in the main context. */
9a7e86c8 537 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
0f12264e
KW
538 assert(bdrv_drain_all_count < INT_MAX);
539 bdrv_drain_all_count++;
9a7e86c8 540
0f12264e
KW
541 /* Quiesce all nodes, without polling in-flight requests yet. The graph
542 * cannot change during this loop. */
543 while ((bs = bdrv_next_all_states(bs))) {
61007b31
SH
544 AioContext *aio_context = bdrv_get_aio_context(bs);
545
546 aio_context_acquire(aio_context);
0f12264e 547 bdrv_do_drained_begin(bs, false, NULL, true, false);
61007b31
SH
548 aio_context_release(aio_context);
549 }
550
0f12264e
KW
551 /* Now poll the in-flight requests */
552 AIO_WAIT_WHILE(&drain_all_aio_wait, NULL, bdrv_drain_all_poll());
553
554 while ((bs = bdrv_next_all_states(bs))) {
c13ad59f 555 bdrv_drain_assert_idle(bs);
61007b31 556 }
c0778f66
AG
557}
558
559void bdrv_drain_all_end(void)
560{
0f12264e 561 BlockDriverState *bs = NULL;
c0778f66 562
0f12264e 563 while ((bs = bdrv_next_all_states(bs))) {
61007b31
SH
564 AioContext *aio_context = bdrv_get_aio_context(bs);
565
566 aio_context_acquire(aio_context);
0f12264e 567 bdrv_do_drained_end(bs, false, NULL, true);
61007b31
SH
568 aio_context_release(aio_context);
569 }
0f12264e
KW
570
571 assert(bdrv_drain_all_count > 0);
572 bdrv_drain_all_count--;
61007b31
SH
573}
574
c0778f66
AG
575void bdrv_drain_all(void)
576{
577 bdrv_drain_all_begin();
578 bdrv_drain_all_end();
579}
580
61007b31
SH
581/**
582 * Remove an active request from the tracked requests list
583 *
584 * This function should be called when a tracked request is completing.
585 */
586static void tracked_request_end(BdrvTrackedRequest *req)
587{
588 if (req->serialising) {
20fc71b2 589 atomic_dec(&req->bs->serialising_in_flight);
61007b31
SH
590 }
591
3783fa3d 592 qemu_co_mutex_lock(&req->bs->reqs_lock);
61007b31
SH
593 QLIST_REMOVE(req, list);
594 qemu_co_queue_restart_all(&req->wait_queue);
3783fa3d 595 qemu_co_mutex_unlock(&req->bs->reqs_lock);
61007b31
SH
596}
597
598/**
599 * Add an active request to the tracked requests list
600 */
601static void tracked_request_begin(BdrvTrackedRequest *req,
602 BlockDriverState *bs,
603 int64_t offset,
ebde595c
FZ
604 unsigned int bytes,
605 enum BdrvTrackedRequestType type)
61007b31
SH
606{
607 *req = (BdrvTrackedRequest){
608 .bs = bs,
609 .offset = offset,
610 .bytes = bytes,
ebde595c 611 .type = type,
61007b31
SH
612 .co = qemu_coroutine_self(),
613 .serialising = false,
614 .overlap_offset = offset,
615 .overlap_bytes = bytes,
616 };
617
618 qemu_co_queue_init(&req->wait_queue);
619
3783fa3d 620 qemu_co_mutex_lock(&bs->reqs_lock);
61007b31 621 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
3783fa3d 622 qemu_co_mutex_unlock(&bs->reqs_lock);
61007b31
SH
623}
624
625static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
626{
627 int64_t overlap_offset = req->offset & ~(align - 1);
628 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
629 - overlap_offset;
630
631 if (!req->serialising) {
20fc71b2 632 atomic_inc(&req->bs->serialising_in_flight);
61007b31
SH
633 req->serialising = true;
634 }
635
636 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
637 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
638}
639
09d2f948
VSO
640static bool is_request_serialising_and_aligned(BdrvTrackedRequest *req)
641{
642 /*
643 * If the request is serialising, overlap_offset and overlap_bytes are set,
644 * so we can check if the request is aligned. Otherwise, don't care and
645 * return false.
646 */
647
648 return req->serialising && (req->offset == req->overlap_offset) &&
649 (req->bytes == req->overlap_bytes);
650}
651
244483e6
KW
652/**
653 * Round a region to cluster boundaries
654 */
655void bdrv_round_to_clusters(BlockDriverState *bs,
7cfd5275 656 int64_t offset, int64_t bytes,
244483e6 657 int64_t *cluster_offset,
7cfd5275 658 int64_t *cluster_bytes)
244483e6
KW
659{
660 BlockDriverInfo bdi;
661
662 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
663 *cluster_offset = offset;
664 *cluster_bytes = bytes;
665 } else {
666 int64_t c = bdi.cluster_size;
667 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
668 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
669 }
670}
671
61007b31
SH
672static int bdrv_get_cluster_size(BlockDriverState *bs)
673{
674 BlockDriverInfo bdi;
675 int ret;
676
677 ret = bdrv_get_info(bs, &bdi);
678 if (ret < 0 || bdi.cluster_size == 0) {
a5b8dd2c 679 return bs->bl.request_alignment;
61007b31
SH
680 } else {
681 return bdi.cluster_size;
682 }
683}
684
685static bool tracked_request_overlaps(BdrvTrackedRequest *req,
686 int64_t offset, unsigned int bytes)
687{
688 /* aaaa bbbb */
689 if (offset >= req->overlap_offset + req->overlap_bytes) {
690 return false;
691 }
692 /* bbbb aaaa */
693 if (req->overlap_offset >= offset + bytes) {
694 return false;
695 }
696 return true;
697}
698
99723548
PB
699void bdrv_inc_in_flight(BlockDriverState *bs)
700{
701 atomic_inc(&bs->in_flight);
702}
703
c9d1a561
PB
704void bdrv_wakeup(BlockDriverState *bs)
705{
7719f3c9 706 aio_wait_kick(bdrv_get_aio_wait(bs));
0f12264e 707 aio_wait_kick(&drain_all_aio_wait);
c9d1a561
PB
708}
709
99723548
PB
710void bdrv_dec_in_flight(BlockDriverState *bs)
711{
712 atomic_dec(&bs->in_flight);
c9d1a561 713 bdrv_wakeup(bs);
99723548
PB
714}
715
61007b31
SH
716static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
717{
718 BlockDriverState *bs = self->bs;
719 BdrvTrackedRequest *req;
720 bool retry;
721 bool waited = false;
722
20fc71b2 723 if (!atomic_read(&bs->serialising_in_flight)) {
61007b31
SH
724 return false;
725 }
726
727 do {
728 retry = false;
3783fa3d 729 qemu_co_mutex_lock(&bs->reqs_lock);
61007b31
SH
730 QLIST_FOREACH(req, &bs->tracked_requests, list) {
731 if (req == self || (!req->serialising && !self->serialising)) {
732 continue;
733 }
734 if (tracked_request_overlaps(req, self->overlap_offset,
735 self->overlap_bytes))
736 {
737 /* Hitting this means there was a reentrant request, for
738 * example, a block driver issuing nested requests. This must
739 * never happen since it means deadlock.
740 */
741 assert(qemu_coroutine_self() != req->co);
742
743 /* If the request is already (indirectly) waiting for us, or
744 * will wait for us as soon as it wakes up, then just go on
745 * (instead of producing a deadlock in the former case). */
746 if (!req->waiting_for) {
747 self->waiting_for = req;
3783fa3d 748 qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
61007b31
SH
749 self->waiting_for = NULL;
750 retry = true;
751 waited = true;
752 break;
753 }
754 }
755 }
3783fa3d 756 qemu_co_mutex_unlock(&bs->reqs_lock);
61007b31
SH
757 } while (retry);
758
759 return waited;
760}
761
762static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
763 size_t size)
764{
765 if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
766 return -EIO;
767 }
768
769 if (!bdrv_is_inserted(bs)) {
770 return -ENOMEDIUM;
771 }
772
773 if (offset < 0) {
774 return -EIO;
775 }
776
777 return 0;
778}
779
61007b31 780typedef struct RwCo {
e293b7a3 781 BdrvChild *child;
61007b31
SH
782 int64_t offset;
783 QEMUIOVector *qiov;
784 bool is_write;
785 int ret;
786 BdrvRequestFlags flags;
787} RwCo;
788
789static void coroutine_fn bdrv_rw_co_entry(void *opaque)
790{
791 RwCo *rwco = opaque;
792
793 if (!rwco->is_write) {
a03ef88f 794 rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
cab3a356
KW
795 rwco->qiov->size, rwco->qiov,
796 rwco->flags);
61007b31 797 } else {
a03ef88f 798 rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
cab3a356
KW
799 rwco->qiov->size, rwco->qiov,
800 rwco->flags);
61007b31
SH
801 }
802}
803
804/*
805 * Process a vectored synchronous request using coroutines
806 */
e293b7a3 807static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
61007b31
SH
808 QEMUIOVector *qiov, bool is_write,
809 BdrvRequestFlags flags)
810{
811 Coroutine *co;
812 RwCo rwco = {
e293b7a3 813 .child = child,
61007b31
SH
814 .offset = offset,
815 .qiov = qiov,
816 .is_write = is_write,
817 .ret = NOT_DONE,
818 .flags = flags,
819 };
820
61007b31
SH
821 if (qemu_in_coroutine()) {
822 /* Fast-path if already in coroutine context */
823 bdrv_rw_co_entry(&rwco);
824 } else {
0b8b8753 825 co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
e92f0e19 826 bdrv_coroutine_enter(child->bs, co);
88b062c2 827 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
61007b31
SH
828 }
829 return rwco.ret;
830}
831
832/*
833 * Process a synchronous request using coroutines
834 */
e293b7a3 835static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
61007b31
SH
836 int nb_sectors, bool is_write, BdrvRequestFlags flags)
837{
838 QEMUIOVector qiov;
839 struct iovec iov = {
840 .iov_base = (void *)buf,
841 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
842 };
843
844 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
845 return -EINVAL;
846 }
847
848 qemu_iovec_init_external(&qiov, &iov, 1);
e293b7a3 849 return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
61007b31
SH
850 &qiov, is_write, flags);
851}
852
853/* return < 0 if error. See bdrv_write() for the return codes */
fbcbbf4e 854int bdrv_read(BdrvChild *child, int64_t sector_num,
61007b31
SH
855 uint8_t *buf, int nb_sectors)
856{
e293b7a3 857 return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0);
61007b31
SH
858}
859
61007b31
SH
860/* Return < 0 if error. Important errors are:
861 -EIO generic I/O error (may happen for all errors)
862 -ENOMEDIUM No media inserted.
863 -EINVAL Invalid sector number or nb_sectors
864 -EACCES Trying to write a read-only device
865*/
18d51c4b 866int bdrv_write(BdrvChild *child, int64_t sector_num,
61007b31
SH
867 const uint8_t *buf, int nb_sectors)
868{
e293b7a3 869 return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
61007b31
SH
870}
871
720ff280 872int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
f5a5ca79 873 int bytes, BdrvRequestFlags flags)
61007b31 874{
74021bc4
EB
875 QEMUIOVector qiov;
876 struct iovec iov = {
877 .iov_base = NULL,
f5a5ca79 878 .iov_len = bytes,
74021bc4
EB
879 };
880
881 qemu_iovec_init_external(&qiov, &iov, 1);
e293b7a3 882 return bdrv_prwv_co(child, offset, &qiov, true,
74021bc4 883 BDRV_REQ_ZERO_WRITE | flags);
61007b31
SH
884}
885
886/*
74021bc4 887 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
61007b31
SH
888 * The operation is sped up by checking the block status and only writing
889 * zeroes to the device if they currently do not return zeroes. Optional
74021bc4 890 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
465fe887 891 * BDRV_REQ_FUA).
61007b31
SH
892 *
893 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
894 */
720ff280 895int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
61007b31 896{
237d78f8
EB
897 int ret;
898 int64_t target_size, bytes, offset = 0;
720ff280 899 BlockDriverState *bs = child->bs;
61007b31 900
7286d610
EB
901 target_size = bdrv_getlength(bs);
902 if (target_size < 0) {
903 return target_size;
61007b31
SH
904 }
905
906 for (;;) {
7286d610
EB
907 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
908 if (bytes <= 0) {
61007b31
SH
909 return 0;
910 }
237d78f8 911 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
61007b31 912 if (ret < 0) {
7286d610
EB
913 error_report("error getting block status at offset %" PRId64 ": %s",
914 offset, strerror(-ret));
61007b31
SH
915 return ret;
916 }
917 if (ret & BDRV_BLOCK_ZERO) {
237d78f8 918 offset += bytes;
61007b31
SH
919 continue;
920 }
237d78f8 921 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
61007b31 922 if (ret < 0) {
7286d610
EB
923 error_report("error writing zeroes at offset %" PRId64 ": %s",
924 offset, strerror(-ret));
61007b31
SH
925 return ret;
926 }
237d78f8 927 offset += bytes;
61007b31
SH
928 }
929}
930
cf2ab8fc 931int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
f1e84741
KW
932{
933 int ret;
934
e293b7a3 935 ret = bdrv_prwv_co(child, offset, qiov, false, 0);
f1e84741
KW
936 if (ret < 0) {
937 return ret;
938 }
939
940 return qiov->size;
941}
942
cf2ab8fc 943int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
61007b31
SH
944{
945 QEMUIOVector qiov;
946 struct iovec iov = {
947 .iov_base = (void *)buf,
948 .iov_len = bytes,
949 };
61007b31
SH
950
951 if (bytes < 0) {
952 return -EINVAL;
953 }
954
955 qemu_iovec_init_external(&qiov, &iov, 1);
cf2ab8fc 956 return bdrv_preadv(child, offset, &qiov);
61007b31
SH
957}
958
d9ca2ea2 959int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
61007b31
SH
960{
961 int ret;
962
e293b7a3 963 ret = bdrv_prwv_co(child, offset, qiov, true, 0);
61007b31
SH
964 if (ret < 0) {
965 return ret;
966 }
967
968 return qiov->size;
969}
970
d9ca2ea2 971int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
61007b31
SH
972{
973 QEMUIOVector qiov;
974 struct iovec iov = {
975 .iov_base = (void *) buf,
976 .iov_len = bytes,
977 };
978
979 if (bytes < 0) {
980 return -EINVAL;
981 }
982
983 qemu_iovec_init_external(&qiov, &iov, 1);
d9ca2ea2 984 return bdrv_pwritev(child, offset, &qiov);
61007b31
SH
985}
986
987/*
988 * Writes to the file and ensures that no writes are reordered across this
989 * request (acts as a barrier)
990 *
991 * Returns 0 on success, -errno in error cases.
992 */
d9ca2ea2
KW
993int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
994 const void *buf, int count)
61007b31
SH
995{
996 int ret;
997
d9ca2ea2 998 ret = bdrv_pwrite(child, offset, buf, count);
61007b31
SH
999 if (ret < 0) {
1000 return ret;
1001 }
1002
d9ca2ea2 1003 ret = bdrv_flush(child->bs);
855a6a93
KW
1004 if (ret < 0) {
1005 return ret;
61007b31
SH
1006 }
1007
1008 return 0;
1009}
1010
08844473
KW
1011typedef struct CoroutineIOCompletion {
1012 Coroutine *coroutine;
1013 int ret;
1014} CoroutineIOCompletion;
1015
1016static void bdrv_co_io_em_complete(void *opaque, int ret)
1017{
1018 CoroutineIOCompletion *co = opaque;
1019
1020 co->ret = ret;
b9e413dd 1021 aio_co_wake(co->coroutine);
08844473
KW
1022}
1023
166fe960
KW
1024static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
1025 uint64_t offset, uint64_t bytes,
1026 QEMUIOVector *qiov, int flags)
1027{
1028 BlockDriver *drv = bs->drv;
3fb06697
KW
1029 int64_t sector_num;
1030 unsigned int nb_sectors;
1031
fa166538
EB
1032 assert(!(flags & ~BDRV_REQ_MASK));
1033
d470ad42
HR
1034 if (!drv) {
1035 return -ENOMEDIUM;
1036 }
1037
3fb06697
KW
1038 if (drv->bdrv_co_preadv) {
1039 return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1040 }
1041
edfab6a0 1042 if (drv->bdrv_aio_preadv) {
08844473
KW
1043 BlockAIOCB *acb;
1044 CoroutineIOCompletion co = {
1045 .coroutine = qemu_coroutine_self(),
1046 };
1047
edfab6a0
EB
1048 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1049 bdrv_co_io_em_complete, &co);
08844473
KW
1050 if (acb == NULL) {
1051 return -EIO;
1052 } else {
1053 qemu_coroutine_yield();
1054 return co.ret;
1055 }
1056 }
edfab6a0
EB
1057
1058 sector_num = offset >> BDRV_SECTOR_BITS;
1059 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1060
1061 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1062 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1063 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
1064 assert(drv->bdrv_co_readv);
1065
1066 return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
166fe960
KW
1067}
1068
78a07294
KW
1069static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
1070 uint64_t offset, uint64_t bytes,
1071 QEMUIOVector *qiov, int flags)
1072{
1073 BlockDriver *drv = bs->drv;
3fb06697
KW
1074 int64_t sector_num;
1075 unsigned int nb_sectors;
78a07294
KW
1076 int ret;
1077
fa166538
EB
1078 assert(!(flags & ~BDRV_REQ_MASK));
1079
d470ad42
HR
1080 if (!drv) {
1081 return -ENOMEDIUM;
1082 }
1083
3fb06697 1084 if (drv->bdrv_co_pwritev) {
515c2f43
KW
1085 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
1086 flags & bs->supported_write_flags);
1087 flags &= ~bs->supported_write_flags;
3fb06697
KW
1088 goto emulate_flags;
1089 }
1090
edfab6a0 1091 if (drv->bdrv_aio_pwritev) {
08844473
KW
1092 BlockAIOCB *acb;
1093 CoroutineIOCompletion co = {
1094 .coroutine = qemu_coroutine_self(),
1095 };
1096
edfab6a0
EB
1097 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov,
1098 flags & bs->supported_write_flags,
1099 bdrv_co_io_em_complete, &co);
1100 flags &= ~bs->supported_write_flags;
08844473 1101 if (acb == NULL) {
3fb06697 1102 ret = -EIO;
08844473
KW
1103 } else {
1104 qemu_coroutine_yield();
3fb06697 1105 ret = co.ret;
08844473 1106 }
edfab6a0
EB
1107 goto emulate_flags;
1108 }
1109
1110 sector_num = offset >> BDRV_SECTOR_BITS;
1111 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1112
1113 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1114 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1115 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
1116
e18a58b4
EB
1117 assert(drv->bdrv_co_writev);
1118 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov,
1119 flags & bs->supported_write_flags);
1120 flags &= ~bs->supported_write_flags;
78a07294 1121
3fb06697 1122emulate_flags:
4df863f3 1123 if (ret == 0 && (flags & BDRV_REQ_FUA)) {
78a07294
KW
1124 ret = bdrv_co_flush(bs);
1125 }
1126
1127 return ret;
1128}
1129
29a298af
PB
1130static int coroutine_fn
1131bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
1132 uint64_t bytes, QEMUIOVector *qiov)
1133{
1134 BlockDriver *drv = bs->drv;
1135
d470ad42
HR
1136 if (!drv) {
1137 return -ENOMEDIUM;
1138 }
1139
29a298af
PB
1140 if (!drv->bdrv_co_pwritev_compressed) {
1141 return -ENOTSUP;
1142 }
1143
29a298af
PB
1144 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1145}
1146
85c97ca7 1147static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
244483e6 1148 int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
61007b31 1149{
85c97ca7
KW
1150 BlockDriverState *bs = child->bs;
1151
61007b31
SH
1152 /* Perform I/O through a temporary buffer so that users who scribble over
1153 * their read buffer while the operation is in progress do not end up
1154 * modifying the image file. This is critical for zero-copy guest I/O
1155 * where anything might happen inside guest memory.
1156 */
1157 void *bounce_buffer;
1158
1159 BlockDriver *drv = bs->drv;
1160 struct iovec iov;
cb2e2878 1161 QEMUIOVector local_qiov;
244483e6 1162 int64_t cluster_offset;
7cfd5275 1163 int64_t cluster_bytes;
61007b31
SH
1164 size_t skip_bytes;
1165 int ret;
cb2e2878
EB
1166 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1167 BDRV_REQUEST_MAX_BYTES);
1168 unsigned int progress = 0;
61007b31 1169
d470ad42
HR
1170 if (!drv) {
1171 return -ENOMEDIUM;
1172 }
1173
1bf03e66
KW
1174 /* FIXME We cannot require callers to have write permissions when all they
1175 * are doing is a read request. If we did things right, write permissions
1176 * would be obtained anyway, but internally by the copy-on-read code. As
765d9df9 1177 * long as it is implemented here rather than in a separate filter driver,
1bf03e66
KW
1178 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1179 * it could request permissions. Therefore we have to bypass the permission
1180 * system for the moment. */
1181 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
afa4b293 1182
61007b31 1183 /* Cover entire cluster so no additional backing file I/O is required when
cb2e2878
EB
1184 * allocating cluster in the image file. Note that this value may exceed
1185 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1186 * is one reason we loop rather than doing it all at once.
61007b31 1187 */
244483e6 1188 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
cb2e2878 1189 skip_bytes = offset - cluster_offset;
61007b31 1190
244483e6
KW
1191 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1192 cluster_offset, cluster_bytes);
61007b31 1193
cb2e2878
EB
1194 bounce_buffer = qemu_try_blockalign(bs,
1195 MIN(MIN(max_transfer, cluster_bytes),
1196 MAX_BOUNCE_BUFFER));
61007b31
SH
1197 if (bounce_buffer == NULL) {
1198 ret = -ENOMEM;
1199 goto err;
1200 }
1201
cb2e2878
EB
1202 while (cluster_bytes) {
1203 int64_t pnum;
61007b31 1204
cb2e2878
EB
1205 ret = bdrv_is_allocated(bs, cluster_offset,
1206 MIN(cluster_bytes, max_transfer), &pnum);
1207 if (ret < 0) {
1208 /* Safe to treat errors in querying allocation as if
1209 * unallocated; we'll probably fail again soon on the
1210 * read, but at least that will set a decent errno.
1211 */
1212 pnum = MIN(cluster_bytes, max_transfer);
1213 }
61007b31 1214
b0ddcbbb
KW
1215 /* Stop at EOF if the image ends in the middle of the cluster */
1216 if (ret == 0 && pnum == 0) {
1217 assert(progress >= bytes);
1218 break;
1219 }
1220
cb2e2878 1221 assert(skip_bytes < pnum);
61007b31 1222
cb2e2878
EB
1223 if (ret <= 0) {
1224 /* Must copy-on-read; use the bounce buffer */
1225 iov.iov_base = bounce_buffer;
1226 iov.iov_len = pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1227 qemu_iovec_init_external(&local_qiov, &iov, 1);
61007b31 1228
cb2e2878
EB
1229 ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1230 &local_qiov, 0);
1231 if (ret < 0) {
1232 goto err;
1233 }
1234
1235 bdrv_debug_event(bs, BLKDBG_COR_WRITE);
1236 if (drv->bdrv_co_pwrite_zeroes &&
1237 buffer_is_zero(bounce_buffer, pnum)) {
1238 /* FIXME: Should we (perhaps conditionally) be setting
1239 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1240 * that still correctly reads as zero? */
7adcf59f
HR
1241 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
1242 BDRV_REQ_WRITE_UNCHANGED);
cb2e2878
EB
1243 } else {
1244 /* This does not change the data on the disk, it is not
1245 * necessary to flush even in cache=writethrough mode.
1246 */
1247 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
7adcf59f
HR
1248 &local_qiov,
1249 BDRV_REQ_WRITE_UNCHANGED);
cb2e2878
EB
1250 }
1251
1252 if (ret < 0) {
1253 /* It might be okay to ignore write errors for guest
1254 * requests. If this is a deliberate copy-on-read
1255 * then we don't want to ignore the error. Simply
1256 * report it in all cases.
1257 */
1258 goto err;
1259 }
1260
1261 qemu_iovec_from_buf(qiov, progress, bounce_buffer + skip_bytes,
1262 pnum - skip_bytes);
1263 } else {
1264 /* Read directly into the destination */
1265 qemu_iovec_init(&local_qiov, qiov->niov);
1266 qemu_iovec_concat(&local_qiov, qiov, progress, pnum - skip_bytes);
1267 ret = bdrv_driver_preadv(bs, offset + progress, local_qiov.size,
1268 &local_qiov, 0);
1269 qemu_iovec_destroy(&local_qiov);
1270 if (ret < 0) {
1271 goto err;
1272 }
1273 }
1274
1275 cluster_offset += pnum;
1276 cluster_bytes -= pnum;
1277 progress += pnum - skip_bytes;
1278 skip_bytes = 0;
1279 }
1280 ret = 0;
61007b31
SH
1281
1282err:
1283 qemu_vfree(bounce_buffer);
1284 return ret;
1285}
1286
1287/*
1288 * Forwards an already correctly aligned request to the BlockDriver. This
1a62d0ac
EB
1289 * handles copy on read, zeroing after EOF, and fragmentation of large
1290 * reads; any other features must be implemented by the caller.
61007b31 1291 */
85c97ca7 1292static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
61007b31
SH
1293 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1294 int64_t align, QEMUIOVector *qiov, int flags)
1295{
85c97ca7 1296 BlockDriverState *bs = child->bs;
c9d20029 1297 int64_t total_bytes, max_bytes;
1a62d0ac
EB
1298 int ret = 0;
1299 uint64_t bytes_remaining = bytes;
1300 int max_transfer;
61007b31 1301
49c07526
KW
1302 assert(is_power_of_2(align));
1303 assert((offset & (align - 1)) == 0);
1304 assert((bytes & (align - 1)) == 0);
61007b31 1305 assert(!qiov || bytes == qiov->size);
abb06c5a 1306 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1a62d0ac
EB
1307 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1308 align);
a604fa2b
EB
1309
1310 /* TODO: We would need a per-BDS .supported_read_flags and
1311 * potential fallback support, if we ever implement any read flags
1312 * to pass through to drivers. For now, there aren't any
1313 * passthrough flags. */
1314 assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ)));
61007b31
SH
1315
1316 /* Handle Copy on Read and associated serialisation */
1317 if (flags & BDRV_REQ_COPY_ON_READ) {
1318 /* If we touch the same cluster it counts as an overlap. This
1319 * guarantees that allocating writes will be serialized and not race
1320 * with each other for the same cluster. For example, in copy-on-read
1321 * it ensures that the CoR read and write operations are atomic and
1322 * guest writes cannot interleave between them. */
1323 mark_request_serialising(req, bdrv_get_cluster_size(bs));
1324 }
1325
09d2f948
VSO
1326 /* BDRV_REQ_SERIALISING is only for write operation */
1327 assert(!(flags & BDRV_REQ_SERIALISING));
1328
61408b25
FZ
1329 if (!(flags & BDRV_REQ_NO_SERIALISING)) {
1330 wait_serialising_requests(req);
1331 }
61007b31
SH
1332
1333 if (flags & BDRV_REQ_COPY_ON_READ) {
d6a644bb 1334 int64_t pnum;
61007b31 1335
88e63df2 1336 ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
61007b31
SH
1337 if (ret < 0) {
1338 goto out;
1339 }
1340
88e63df2 1341 if (!ret || pnum != bytes) {
85c97ca7 1342 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, qiov);
61007b31
SH
1343 goto out;
1344 }
1345 }
1346
1a62d0ac 1347 /* Forward the request to the BlockDriver, possibly fragmenting it */
c9d20029
KW
1348 total_bytes = bdrv_getlength(bs);
1349 if (total_bytes < 0) {
1350 ret = total_bytes;
1351 goto out;
1352 }
61007b31 1353
c9d20029 1354 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1a62d0ac 1355 if (bytes <= max_bytes && bytes <= max_transfer) {
c9d20029 1356 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
1a62d0ac
EB
1357 goto out;
1358 }
61007b31 1359
1a62d0ac
EB
1360 while (bytes_remaining) {
1361 int num;
61007b31 1362
1a62d0ac
EB
1363 if (max_bytes) {
1364 QEMUIOVector local_qiov;
61007b31 1365
1a62d0ac
EB
1366 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1367 assert(num);
1368 qemu_iovec_init(&local_qiov, qiov->niov);
1369 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
61007b31 1370
1a62d0ac
EB
1371 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1372 num, &local_qiov, 0);
1373 max_bytes -= num;
1374 qemu_iovec_destroy(&local_qiov);
1375 } else {
1376 num = bytes_remaining;
1377 ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
1378 bytes_remaining);
1379 }
1380 if (ret < 0) {
1381 goto out;
1382 }
1383 bytes_remaining -= num;
61007b31
SH
1384 }
1385
1386out:
1a62d0ac 1387 return ret < 0 ? ret : 0;
61007b31
SH
1388}
1389
61007b31
SH
1390/*
1391 * Handle a read request in coroutine context
1392 */
a03ef88f 1393int coroutine_fn bdrv_co_preadv(BdrvChild *child,
61007b31
SH
1394 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1395 BdrvRequestFlags flags)
1396{
a03ef88f 1397 BlockDriverState *bs = child->bs;
61007b31
SH
1398 BlockDriver *drv = bs->drv;
1399 BdrvTrackedRequest req;
1400
a5b8dd2c 1401 uint64_t align = bs->bl.request_alignment;
61007b31
SH
1402 uint8_t *head_buf = NULL;
1403 uint8_t *tail_buf = NULL;
1404 QEMUIOVector local_qiov;
1405 bool use_local_qiov = false;
1406 int ret;
1407
f42cf447
DB
1408 trace_bdrv_co_preadv(child->bs, offset, bytes, flags);
1409
61007b31
SH
1410 if (!drv) {
1411 return -ENOMEDIUM;
1412 }
1413
1414 ret = bdrv_check_byte_request(bs, offset, bytes);
1415 if (ret < 0) {
1416 return ret;
1417 }
1418
99723548
PB
1419 bdrv_inc_in_flight(bs);
1420
9568b511 1421 /* Don't do copy-on-read if we read data before write operation */
d3faa13e 1422 if (atomic_read(&bs->copy_on_read) && !(flags & BDRV_REQ_NO_SERIALISING)) {
61007b31
SH
1423 flags |= BDRV_REQ_COPY_ON_READ;
1424 }
1425
61007b31
SH
1426 /* Align read if necessary by padding qiov */
1427 if (offset & (align - 1)) {
1428 head_buf = qemu_blockalign(bs, align);
1429 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1430 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1431 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1432 use_local_qiov = true;
1433
1434 bytes += offset & (align - 1);
1435 offset = offset & ~(align - 1);
1436 }
1437
1438 if ((offset + bytes) & (align - 1)) {
1439 if (!use_local_qiov) {
1440 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1441 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1442 use_local_qiov = true;
1443 }
1444 tail_buf = qemu_blockalign(bs, align);
1445 qemu_iovec_add(&local_qiov, tail_buf,
1446 align - ((offset + bytes) & (align - 1)));
1447
1448 bytes = ROUND_UP(bytes, align);
1449 }
1450
ebde595c 1451 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
85c97ca7 1452 ret = bdrv_aligned_preadv(child, &req, offset, bytes, align,
61007b31
SH
1453 use_local_qiov ? &local_qiov : qiov,
1454 flags);
1455 tracked_request_end(&req);
99723548 1456 bdrv_dec_in_flight(bs);
61007b31
SH
1457
1458 if (use_local_qiov) {
1459 qemu_iovec_destroy(&local_qiov);
1460 qemu_vfree(head_buf);
1461 qemu_vfree(tail_buf);
1462 }
1463
1464 return ret;
1465}
1466
d05aa8bb 1467static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
f5a5ca79 1468 int64_t offset, int bytes, BdrvRequestFlags flags)
61007b31
SH
1469{
1470 BlockDriver *drv = bs->drv;
1471 QEMUIOVector qiov;
1472 struct iovec iov = {0};
1473 int ret = 0;
465fe887 1474 bool need_flush = false;
443668ca
DL
1475 int head = 0;
1476 int tail = 0;
61007b31 1477
cf081fca 1478 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
a5b8dd2c
EB
1479 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1480 bs->bl.request_alignment);
cb2e2878 1481 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
d05aa8bb 1482
d470ad42
HR
1483 if (!drv) {
1484 return -ENOMEDIUM;
1485 }
1486
b8d0a980
EB
1487 assert(alignment % bs->bl.request_alignment == 0);
1488 head = offset % alignment;
f5a5ca79 1489 tail = (offset + bytes) % alignment;
b8d0a980
EB
1490 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1491 assert(max_write_zeroes >= bs->bl.request_alignment);
61007b31 1492
f5a5ca79
MP
1493 while (bytes > 0 && !ret) {
1494 int num = bytes;
61007b31
SH
1495
1496 /* Align request. Block drivers can expect the "bulk" of the request
443668ca
DL
1497 * to be aligned, and that unaligned requests do not cross cluster
1498 * boundaries.
61007b31 1499 */
443668ca 1500 if (head) {
b2f95fee
EB
1501 /* Make a small request up to the first aligned sector. For
1502 * convenience, limit this request to max_transfer even if
1503 * we don't need to fall back to writes. */
f5a5ca79 1504 num = MIN(MIN(bytes, max_transfer), alignment - head);
b2f95fee
EB
1505 head = (head + num) % alignment;
1506 assert(num < max_write_zeroes);
d05aa8bb 1507 } else if (tail && num > alignment) {
443668ca
DL
1508 /* Shorten the request to the last aligned sector. */
1509 num -= tail;
61007b31
SH
1510 }
1511
1512 /* limit request size */
1513 if (num > max_write_zeroes) {
1514 num = max_write_zeroes;
1515 }
1516
1517 ret = -ENOTSUP;
1518 /* First try the efficient write zeroes operation */
d05aa8bb
EB
1519 if (drv->bdrv_co_pwrite_zeroes) {
1520 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1521 flags & bs->supported_zero_flags);
1522 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1523 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1524 need_flush = true;
1525 }
465fe887
EB
1526 } else {
1527 assert(!bs->supported_zero_flags);
61007b31
SH
1528 }
1529
1530 if (ret == -ENOTSUP) {
1531 /* Fall back to bounce buffer if write zeroes is unsupported */
465fe887
EB
1532 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1533
1534 if ((flags & BDRV_REQ_FUA) &&
1535 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1536 /* No need for bdrv_driver_pwrite() to do a fallback
1537 * flush on each chunk; use just one at the end */
1538 write_flags &= ~BDRV_REQ_FUA;
1539 need_flush = true;
1540 }
5def6b80 1541 num = MIN(num, max_transfer);
d05aa8bb 1542 iov.iov_len = num;
61007b31 1543 if (iov.iov_base == NULL) {
d05aa8bb 1544 iov.iov_base = qemu_try_blockalign(bs, num);
61007b31
SH
1545 if (iov.iov_base == NULL) {
1546 ret = -ENOMEM;
1547 goto fail;
1548 }
d05aa8bb 1549 memset(iov.iov_base, 0, num);
61007b31
SH
1550 }
1551 qemu_iovec_init_external(&qiov, &iov, 1);
1552
d05aa8bb 1553 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
61007b31
SH
1554
1555 /* Keep bounce buffer around if it is big enough for all
1556 * all future requests.
1557 */
5def6b80 1558 if (num < max_transfer) {
61007b31
SH
1559 qemu_vfree(iov.iov_base);
1560 iov.iov_base = NULL;
1561 }
1562 }
1563
d05aa8bb 1564 offset += num;
f5a5ca79 1565 bytes -= num;
61007b31
SH
1566 }
1567
1568fail:
465fe887
EB
1569 if (ret == 0 && need_flush) {
1570 ret = bdrv_co_flush(bs);
1571 }
61007b31
SH
1572 qemu_vfree(iov.iov_base);
1573 return ret;
1574}
1575
1576/*
04ed95f4
EB
1577 * Forwards an already correctly aligned write request to the BlockDriver,
1578 * after possibly fragmenting it.
61007b31 1579 */
85c97ca7 1580static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
61007b31 1581 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
cff86b38 1582 int64_t align, QEMUIOVector *qiov, int flags)
61007b31 1583{
85c97ca7 1584 BlockDriverState *bs = child->bs;
61007b31
SH
1585 BlockDriver *drv = bs->drv;
1586 bool waited;
1587 int ret;
1588
9896c876 1589 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
04ed95f4
EB
1590 uint64_t bytes_remaining = bytes;
1591 int max_transfer;
61007b31 1592
d470ad42
HR
1593 if (!drv) {
1594 return -ENOMEDIUM;
1595 }
1596
d6883bc9
VSO
1597 if (bdrv_has_readonly_bitmaps(bs)) {
1598 return -EPERM;
1599 }
1600
cff86b38
EB
1601 assert(is_power_of_2(align));
1602 assert((offset & (align - 1)) == 0);
1603 assert((bytes & (align - 1)) == 0);
61007b31 1604 assert(!qiov || bytes == qiov->size);
abb06c5a 1605 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
fa166538 1606 assert(!(flags & ~BDRV_REQ_MASK));
04ed95f4
EB
1607 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1608 align);
61007b31 1609
999658a0
VSO
1610 /* BDRV_REQ_NO_SERIALISING is only for read operation */
1611 assert(!(flags & BDRV_REQ_NO_SERIALISING));
09d2f948
VSO
1612
1613 if (flags & BDRV_REQ_SERIALISING) {
1614 mark_request_serialising(req, bdrv_get_cluster_size(bs));
1615 }
1616
61007b31 1617 waited = wait_serialising_requests(req);
09d2f948
VSO
1618 assert(!waited || !req->serialising ||
1619 is_request_serialising_and_aligned(req));
61007b31
SH
1620 assert(req->overlap_offset <= offset);
1621 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
c6035964
HR
1622 if (flags & BDRV_REQ_WRITE_UNCHANGED) {
1623 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1624 } else {
1625 assert(child->perm & BLK_PERM_WRITE);
1626 }
362b3786 1627 assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE);
61007b31
SH
1628
1629 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1630
1631 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
c1499a5e 1632 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
61007b31
SH
1633 qemu_iovec_is_zero(qiov)) {
1634 flags |= BDRV_REQ_ZERO_WRITE;
1635 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1636 flags |= BDRV_REQ_MAY_UNMAP;
1637 }
1638 }
1639
1640 if (ret < 0) {
1641 /* Do nothing, write notifier decided to fail this request */
1642 } else if (flags & BDRV_REQ_ZERO_WRITE) {
9a4f4c31 1643 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
9896c876 1644 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
3ea1a091
PB
1645 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
1646 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov);
04ed95f4 1647 } else if (bytes <= max_transfer) {
9a4f4c31 1648 bdrv_debug_event(bs, BLKDBG_PWRITEV);
78a07294 1649 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
04ed95f4
EB
1650 } else {
1651 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1652 while (bytes_remaining) {
1653 int num = MIN(bytes_remaining, max_transfer);
1654 QEMUIOVector local_qiov;
1655 int local_flags = flags;
1656
1657 assert(num);
1658 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
1659 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1660 /* If FUA is going to be emulated by flush, we only
1661 * need to flush on the last iteration */
1662 local_flags &= ~BDRV_REQ_FUA;
1663 }
1664 qemu_iovec_init(&local_qiov, qiov->niov);
1665 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1666
1667 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1668 num, &local_qiov, local_flags);
1669 qemu_iovec_destroy(&local_qiov);
1670 if (ret < 0) {
1671 break;
1672 }
1673 bytes_remaining -= num;
1674 }
61007b31 1675 }
9a4f4c31 1676 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
61007b31 1677
47fec599 1678 atomic_inc(&bs->write_gen);
0fdf1a4f 1679 bdrv_set_dirty(bs, offset, bytes);
61007b31 1680
f7946da2 1681 stat64_max(&bs->wr_highest_offset, offset + bytes);
61007b31
SH
1682
1683 if (ret >= 0) {
9896c876 1684 bs->total_sectors = MAX(bs->total_sectors, end_sector);
04ed95f4 1685 ret = 0;
61007b31
SH
1686 }
1687
1688 return ret;
1689}
1690
85c97ca7 1691static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
9eeb6dd1
FZ
1692 int64_t offset,
1693 unsigned int bytes,
1694 BdrvRequestFlags flags,
1695 BdrvTrackedRequest *req)
1696{
85c97ca7 1697 BlockDriverState *bs = child->bs;
9eeb6dd1
FZ
1698 uint8_t *buf = NULL;
1699 QEMUIOVector local_qiov;
1700 struct iovec iov;
a5b8dd2c 1701 uint64_t align = bs->bl.request_alignment;
9eeb6dd1
FZ
1702 unsigned int head_padding_bytes, tail_padding_bytes;
1703 int ret = 0;
1704
1705 head_padding_bytes = offset & (align - 1);
f13ce1be 1706 tail_padding_bytes = (align - (offset + bytes)) & (align - 1);
9eeb6dd1
FZ
1707
1708
1709 assert(flags & BDRV_REQ_ZERO_WRITE);
1710 if (head_padding_bytes || tail_padding_bytes) {
1711 buf = qemu_blockalign(bs, align);
1712 iov = (struct iovec) {
1713 .iov_base = buf,
1714 .iov_len = align,
1715 };
1716 qemu_iovec_init_external(&local_qiov, &iov, 1);
1717 }
1718 if (head_padding_bytes) {
1719 uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1720
1721 /* RMW the unaligned part before head. */
1722 mark_request_serialising(req, align);
1723 wait_serialising_requests(req);
9a4f4c31 1724 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
85c97ca7 1725 ret = bdrv_aligned_preadv(child, req, offset & ~(align - 1), align,
9eeb6dd1
FZ
1726 align, &local_qiov, 0);
1727 if (ret < 0) {
1728 goto fail;
1729 }
9a4f4c31 1730 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
9eeb6dd1
FZ
1731
1732 memset(buf + head_padding_bytes, 0, zero_bytes);
85c97ca7 1733 ret = bdrv_aligned_pwritev(child, req, offset & ~(align - 1), align,
cff86b38 1734 align, &local_qiov,
9eeb6dd1
FZ
1735 flags & ~BDRV_REQ_ZERO_WRITE);
1736 if (ret < 0) {
1737 goto fail;
1738 }
1739 offset += zero_bytes;
1740 bytes -= zero_bytes;
1741 }
1742
1743 assert(!bytes || (offset & (align - 1)) == 0);
1744 if (bytes >= align) {
1745 /* Write the aligned part in the middle. */
1746 uint64_t aligned_bytes = bytes & ~(align - 1);
85c97ca7 1747 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
9eeb6dd1
FZ
1748 NULL, flags);
1749 if (ret < 0) {
1750 goto fail;
1751 }
1752 bytes -= aligned_bytes;
1753 offset += aligned_bytes;
1754 }
1755
1756 assert(!bytes || (offset & (align - 1)) == 0);
1757 if (bytes) {
1758 assert(align == tail_padding_bytes + bytes);
1759 /* RMW the unaligned part after tail. */
1760 mark_request_serialising(req, align);
1761 wait_serialising_requests(req);
9a4f4c31 1762 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
85c97ca7 1763 ret = bdrv_aligned_preadv(child, req, offset, align,
9eeb6dd1
FZ
1764 align, &local_qiov, 0);
1765 if (ret < 0) {
1766 goto fail;
1767 }
9a4f4c31 1768 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
9eeb6dd1
FZ
1769
1770 memset(buf, 0, bytes);
85c97ca7 1771 ret = bdrv_aligned_pwritev(child, req, offset, align, align,
9eeb6dd1
FZ
1772 &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1773 }
1774fail:
1775 qemu_vfree(buf);
1776 return ret;
1777
1778}
1779
61007b31
SH
1780/*
1781 * Handle a write request in coroutine context
1782 */
a03ef88f 1783int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
61007b31
SH
1784 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1785 BdrvRequestFlags flags)
1786{
a03ef88f 1787 BlockDriverState *bs = child->bs;
61007b31 1788 BdrvTrackedRequest req;
a5b8dd2c 1789 uint64_t align = bs->bl.request_alignment;
61007b31
SH
1790 uint8_t *head_buf = NULL;
1791 uint8_t *tail_buf = NULL;
1792 QEMUIOVector local_qiov;
1793 bool use_local_qiov = false;
1794 int ret;
1795
f42cf447
DB
1796 trace_bdrv_co_pwritev(child->bs, offset, bytes, flags);
1797
61007b31
SH
1798 if (!bs->drv) {
1799 return -ENOMEDIUM;
1800 }
1801 if (bs->read_only) {
eaf5fe2d 1802 return -EPERM;
61007b31 1803 }
04c01a5c 1804 assert(!(bs->open_flags & BDRV_O_INACTIVE));
61007b31
SH
1805
1806 ret = bdrv_check_byte_request(bs, offset, bytes);
1807 if (ret < 0) {
1808 return ret;
1809 }
1810
99723548 1811 bdrv_inc_in_flight(bs);
61007b31
SH
1812 /*
1813 * Align write if necessary by performing a read-modify-write cycle.
1814 * Pad qiov with the read parts and be sure to have a tracked request not
1815 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1816 */
ebde595c 1817 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
61007b31 1818
18a59f03 1819 if (flags & BDRV_REQ_ZERO_WRITE) {
85c97ca7 1820 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
9eeb6dd1
FZ
1821 goto out;
1822 }
1823
61007b31
SH
1824 if (offset & (align - 1)) {
1825 QEMUIOVector head_qiov;
1826 struct iovec head_iov;
1827
1828 mark_request_serialising(&req, align);
1829 wait_serialising_requests(&req);
1830
1831 head_buf = qemu_blockalign(bs, align);
1832 head_iov = (struct iovec) {
1833 .iov_base = head_buf,
1834 .iov_len = align,
1835 };
1836 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1837
9a4f4c31 1838 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
85c97ca7 1839 ret = bdrv_aligned_preadv(child, &req, offset & ~(align - 1), align,
61007b31
SH
1840 align, &head_qiov, 0);
1841 if (ret < 0) {
1842 goto fail;
1843 }
9a4f4c31 1844 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
61007b31
SH
1845
1846 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1847 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1848 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1849 use_local_qiov = true;
1850
1851 bytes += offset & (align - 1);
1852 offset = offset & ~(align - 1);
117bc3fa
PL
1853
1854 /* We have read the tail already if the request is smaller
1855 * than one aligned block.
1856 */
1857 if (bytes < align) {
1858 qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
1859 bytes = align;
1860 }
61007b31
SH
1861 }
1862
1863 if ((offset + bytes) & (align - 1)) {
1864 QEMUIOVector tail_qiov;
1865 struct iovec tail_iov;
1866 size_t tail_bytes;
1867 bool waited;
1868
1869 mark_request_serialising(&req, align);
1870 waited = wait_serialising_requests(&req);
1871 assert(!waited || !use_local_qiov);
1872
1873 tail_buf = qemu_blockalign(bs, align);
1874 tail_iov = (struct iovec) {
1875 .iov_base = tail_buf,
1876 .iov_len = align,
1877 };
1878 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1879
9a4f4c31 1880 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
85c97ca7
KW
1881 ret = bdrv_aligned_preadv(child, &req, (offset + bytes) & ~(align - 1),
1882 align, align, &tail_qiov, 0);
61007b31
SH
1883 if (ret < 0) {
1884 goto fail;
1885 }
9a4f4c31 1886 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
61007b31
SH
1887
1888 if (!use_local_qiov) {
1889 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1890 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1891 use_local_qiov = true;
1892 }
1893
1894 tail_bytes = (offset + bytes) & (align - 1);
1895 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1896
1897 bytes = ROUND_UP(bytes, align);
1898 }
1899
85c97ca7 1900 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
3ea1a091
PB
1901 use_local_qiov ? &local_qiov : qiov,
1902 flags);
61007b31
SH
1903
1904fail:
61007b31
SH
1905
1906 if (use_local_qiov) {
1907 qemu_iovec_destroy(&local_qiov);
1908 }
1909 qemu_vfree(head_buf);
1910 qemu_vfree(tail_buf);
9eeb6dd1
FZ
1911out:
1912 tracked_request_end(&req);
99723548 1913 bdrv_dec_in_flight(bs);
61007b31
SH
1914 return ret;
1915}
1916
a03ef88f 1917int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
f5a5ca79 1918 int bytes, BdrvRequestFlags flags)
61007b31 1919{
f5a5ca79 1920 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
61007b31 1921
a03ef88f 1922 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
61007b31
SH
1923 flags &= ~BDRV_REQ_MAY_UNMAP;
1924 }
61007b31 1925
f5a5ca79 1926 return bdrv_co_pwritev(child, offset, bytes, NULL,
74021bc4 1927 BDRV_REQ_ZERO_WRITE | flags);
61007b31
SH
1928}
1929
4085f5c7
JS
1930/*
1931 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1932 */
1933int bdrv_flush_all(void)
1934{
1935 BdrvNextIterator it;
1936 BlockDriverState *bs = NULL;
1937 int result = 0;
1938
1939 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
1940 AioContext *aio_context = bdrv_get_aio_context(bs);
1941 int ret;
1942
1943 aio_context_acquire(aio_context);
1944 ret = bdrv_flush(bs);
1945 if (ret < 0 && !result) {
1946 result = ret;
1947 }
1948 aio_context_release(aio_context);
1949 }
1950
1951 return result;
1952}
1953
1954
4bcd936e 1955typedef struct BdrvCoBlockStatusData {
61007b31
SH
1956 BlockDriverState *bs;
1957 BlockDriverState *base;
c9ce8c4d 1958 bool want_zero;
4bcd936e
EB
1959 int64_t offset;
1960 int64_t bytes;
1961 int64_t *pnum;
1962 int64_t *map;
c9ce8c4d 1963 BlockDriverState **file;
4bcd936e 1964 int ret;
61007b31 1965 bool done;
4bcd936e 1966} BdrvCoBlockStatusData;
61007b31 1967
3e4d0e72
EB
1968int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs,
1969 bool want_zero,
1970 int64_t offset,
1971 int64_t bytes,
1972 int64_t *pnum,
1973 int64_t *map,
1974 BlockDriverState **file)
f7cc69b3
MP
1975{
1976 assert(bs->file && bs->file->bs);
3e4d0e72
EB
1977 *pnum = bytes;
1978 *map = offset;
f7cc69b3 1979 *file = bs->file->bs;
3e4d0e72 1980 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
f7cc69b3
MP
1981}
1982
3e4d0e72
EB
1983int coroutine_fn bdrv_co_block_status_from_backing(BlockDriverState *bs,
1984 bool want_zero,
1985 int64_t offset,
1986 int64_t bytes,
1987 int64_t *pnum,
1988 int64_t *map,
1989 BlockDriverState **file)
f7cc69b3
MP
1990{
1991 assert(bs->backing && bs->backing->bs);
3e4d0e72
EB
1992 *pnum = bytes;
1993 *map = offset;
f7cc69b3 1994 *file = bs->backing->bs;
3e4d0e72 1995 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
f7cc69b3
MP
1996}
1997
61007b31
SH
1998/*
1999 * Returns the allocation status of the specified sectors.
2000 * Drivers not implementing the functionality are assumed to not support
2001 * backing files, hence all their sectors are reported as allocated.
2002 *
86a3d5c6
EB
2003 * If 'want_zero' is true, the caller is querying for mapping
2004 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2005 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2006 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
c9ce8c4d 2007 *
2e8bc787 2008 * If 'offset' is beyond the end of the disk image the return value is
fb0d8654 2009 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
61007b31 2010 *
2e8bc787 2011 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
fb0d8654
EB
2012 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2013 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
67a0fd2a 2014 *
2e8bc787
EB
2015 * 'pnum' is set to the number of bytes (including and immediately
2016 * following the specified offset) that are easily known to be in the
2017 * same allocated/unallocated state. Note that a second call starting
2018 * at the original offset plus returned pnum may have the same status.
2019 * The returned value is non-zero on success except at end-of-file.
2020 *
2021 * Returns negative errno on failure. Otherwise, if the
2022 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2023 * set to the host mapping and BDS corresponding to the guest offset.
61007b31 2024 */
2e8bc787
EB
2025static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
2026 bool want_zero,
2027 int64_t offset, int64_t bytes,
2028 int64_t *pnum, int64_t *map,
2029 BlockDriverState **file)
2030{
2031 int64_t total_size;
2032 int64_t n; /* bytes */
efa6e2ed 2033 int ret;
2e8bc787 2034 int64_t local_map = 0;
298a1665 2035 BlockDriverState *local_file = NULL;
efa6e2ed
EB
2036 int64_t aligned_offset, aligned_bytes;
2037 uint32_t align;
61007b31 2038
298a1665
EB
2039 assert(pnum);
2040 *pnum = 0;
2e8bc787
EB
2041 total_size = bdrv_getlength(bs);
2042 if (total_size < 0) {
2043 ret = total_size;
298a1665 2044 goto early_out;
61007b31
SH
2045 }
2046
2e8bc787 2047 if (offset >= total_size) {
298a1665
EB
2048 ret = BDRV_BLOCK_EOF;
2049 goto early_out;
61007b31 2050 }
2e8bc787 2051 if (!bytes) {
298a1665
EB
2052 ret = 0;
2053 goto early_out;
9cdcfd9f 2054 }
61007b31 2055
2e8bc787
EB
2056 n = total_size - offset;
2057 if (n < bytes) {
2058 bytes = n;
61007b31
SH
2059 }
2060
d470ad42
HR
2061 /* Must be non-NULL or bdrv_getlength() would have failed */
2062 assert(bs->drv);
636cb512 2063 if (!bs->drv->bdrv_co_block_status) {
2e8bc787 2064 *pnum = bytes;
61007b31 2065 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2e8bc787 2066 if (offset + bytes == total_size) {
fb0d8654
EB
2067 ret |= BDRV_BLOCK_EOF;
2068 }
61007b31 2069 if (bs->drv->protocol_name) {
2e8bc787
EB
2070 ret |= BDRV_BLOCK_OFFSET_VALID;
2071 local_map = offset;
298a1665 2072 local_file = bs;
61007b31 2073 }
298a1665 2074 goto early_out;
61007b31
SH
2075 }
2076
99723548 2077 bdrv_inc_in_flight(bs);
efa6e2ed
EB
2078
2079 /* Round out to request_alignment boundaries */
86a3d5c6 2080 align = bs->bl.request_alignment;
efa6e2ed
EB
2081 aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2082 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2083
636cb512
EB
2084 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2085 aligned_bytes, pnum, &local_map,
2086 &local_file);
2087 if (ret < 0) {
2088 *pnum = 0;
2089 goto out;
efa6e2ed
EB
2090 }
2091
2e8bc787 2092 /*
636cb512 2093 * The driver's result must be a non-zero multiple of request_alignment.
efa6e2ed 2094 * Clamp pnum and adjust map to original request.
2e8bc787 2095 */
636cb512
EB
2096 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2097 align > offset - aligned_offset);
efa6e2ed
EB
2098 *pnum -= offset - aligned_offset;
2099 if (*pnum > bytes) {
2100 *pnum = bytes;
61007b31 2101 }
2e8bc787 2102 if (ret & BDRV_BLOCK_OFFSET_VALID) {
efa6e2ed 2103 local_map += offset - aligned_offset;
2e8bc787 2104 }
61007b31
SH
2105
2106 if (ret & BDRV_BLOCK_RAW) {
298a1665 2107 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2e8bc787
EB
2108 ret = bdrv_co_block_status(local_file, want_zero, local_map,
2109 *pnum, pnum, &local_map, &local_file);
99723548 2110 goto out;
61007b31
SH
2111 }
2112
2113 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2114 ret |= BDRV_BLOCK_ALLOCATED;
c9ce8c4d 2115 } else if (want_zero) {
61007b31
SH
2116 if (bdrv_unallocated_blocks_are_zero(bs)) {
2117 ret |= BDRV_BLOCK_ZERO;
760e0063
KW
2118 } else if (bs->backing) {
2119 BlockDriverState *bs2 = bs->backing->bs;
2e8bc787 2120 int64_t size2 = bdrv_getlength(bs2);
c9ce8c4d 2121
2e8bc787 2122 if (size2 >= 0 && offset >= size2) {
61007b31
SH
2123 ret |= BDRV_BLOCK_ZERO;
2124 }
2125 }
2126 }
2127
c9ce8c4d 2128 if (want_zero && local_file && local_file != bs &&
61007b31
SH
2129 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2130 (ret & BDRV_BLOCK_OFFSET_VALID)) {
2e8bc787
EB
2131 int64_t file_pnum;
2132 int ret2;
61007b31 2133
2e8bc787
EB
2134 ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
2135 *pnum, &file_pnum, NULL, NULL);
61007b31
SH
2136 if (ret2 >= 0) {
2137 /* Ignore errors. This is just providing extra information, it
2138 * is useful but not necessary.
2139 */
c61e684e
EB
2140 if (ret2 & BDRV_BLOCK_EOF &&
2141 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2142 /*
2143 * It is valid for the format block driver to read
2144 * beyond the end of the underlying file's current
2145 * size; such areas read as zero.
2146 */
61007b31
SH
2147 ret |= BDRV_BLOCK_ZERO;
2148 } else {
2149 /* Limit request to the range reported by the protocol driver */
2150 *pnum = file_pnum;
2151 ret |= (ret2 & BDRV_BLOCK_ZERO);
2152 }
2153 }
2154 }
2155
99723548
PB
2156out:
2157 bdrv_dec_in_flight(bs);
2e8bc787 2158 if (ret >= 0 && offset + *pnum == total_size) {
fb0d8654
EB
2159 ret |= BDRV_BLOCK_EOF;
2160 }
298a1665
EB
2161early_out:
2162 if (file) {
2163 *file = local_file;
2164 }
2e8bc787
EB
2165 if (map) {
2166 *map = local_map;
2167 }
61007b31
SH
2168 return ret;
2169}
2170
5b648c67
EB
2171static int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
2172 BlockDriverState *base,
2173 bool want_zero,
2174 int64_t offset,
2175 int64_t bytes,
2176 int64_t *pnum,
2177 int64_t *map,
2178 BlockDriverState **file)
ba3f0e25
FZ
2179{
2180 BlockDriverState *p;
5b648c67 2181 int ret = 0;
c61e684e 2182 bool first = true;
ba3f0e25
FZ
2183
2184 assert(bs != base);
760e0063 2185 for (p = bs; p != base; p = backing_bs(p)) {
5b648c67
EB
2186 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
2187 file);
c61e684e
EB
2188 if (ret < 0) {
2189 break;
2190 }
2191 if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) {
2192 /*
2193 * Reading beyond the end of the file continues to read
2194 * zeroes, but we can only widen the result to the
2195 * unallocated length we learned from an earlier
2196 * iteration.
2197 */
5b648c67 2198 *pnum = bytes;
c61e684e
EB
2199 }
2200 if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) {
ba3f0e25
FZ
2201 break;
2202 }
5b648c67
EB
2203 /* [offset, pnum] unallocated on this layer, which could be only
2204 * the first part of [offset, bytes]. */
2205 bytes = MIN(bytes, *pnum);
c61e684e 2206 first = false;
ba3f0e25
FZ
2207 }
2208 return ret;
2209}
2210
31826642 2211/* Coroutine wrapper for bdrv_block_status_above() */
5b648c67 2212static void coroutine_fn bdrv_block_status_above_co_entry(void *opaque)
61007b31 2213{
4bcd936e 2214 BdrvCoBlockStatusData *data = opaque;
61007b31 2215
5b648c67
EB
2216 data->ret = bdrv_co_block_status_above(data->bs, data->base,
2217 data->want_zero,
2218 data->offset, data->bytes,
2219 data->pnum, data->map, data->file);
61007b31
SH
2220 data->done = true;
2221}
2222
2223/*
5b648c67 2224 * Synchronous wrapper around bdrv_co_block_status_above().
61007b31 2225 *
5b648c67 2226 * See bdrv_co_block_status_above() for details.
61007b31 2227 */
7ddb99b9
EB
2228static int bdrv_common_block_status_above(BlockDriverState *bs,
2229 BlockDriverState *base,
2230 bool want_zero, int64_t offset,
2231 int64_t bytes, int64_t *pnum,
2232 int64_t *map,
2233 BlockDriverState **file)
61007b31
SH
2234{
2235 Coroutine *co;
4bcd936e 2236 BdrvCoBlockStatusData data = {
61007b31 2237 .bs = bs,
ba3f0e25 2238 .base = base,
c9ce8c4d 2239 .want_zero = want_zero,
7ddb99b9
EB
2240 .offset = offset,
2241 .bytes = bytes,
2242 .pnum = pnum,
2243 .map = map,
c9ce8c4d 2244 .file = file,
61007b31
SH
2245 .done = false,
2246 };
2247
2248 if (qemu_in_coroutine()) {
2249 /* Fast-path if already in coroutine context */
5b648c67 2250 bdrv_block_status_above_co_entry(&data);
61007b31 2251 } else {
5b648c67 2252 co = qemu_coroutine_create(bdrv_block_status_above_co_entry, &data);
e92f0e19 2253 bdrv_coroutine_enter(bs, co);
88b062c2 2254 BDRV_POLL_WHILE(bs, !data.done);
61007b31 2255 }
7ddb99b9 2256 return data.ret;
61007b31
SH
2257}
2258
31826642
EB
2259int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
2260 int64_t offset, int64_t bytes, int64_t *pnum,
2261 int64_t *map, BlockDriverState **file)
c9ce8c4d 2262{
31826642
EB
2263 return bdrv_common_block_status_above(bs, base, true, offset, bytes,
2264 pnum, map, file);
c9ce8c4d
EB
2265}
2266
237d78f8
EB
2267int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2268 int64_t *pnum, int64_t *map, BlockDriverState **file)
ba3f0e25 2269{
31826642
EB
2270 return bdrv_block_status_above(bs, backing_bs(bs),
2271 offset, bytes, pnum, map, file);
ba3f0e25
FZ
2272}
2273
d6a644bb
EB
2274int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
2275 int64_t bytes, int64_t *pnum)
61007b31 2276{
7ddb99b9
EB
2277 int ret;
2278 int64_t dummy;
d6a644bb 2279
7ddb99b9
EB
2280 ret = bdrv_common_block_status_above(bs, backing_bs(bs), false, offset,
2281 bytes, pnum ? pnum : &dummy, NULL,
c9ce8c4d 2282 NULL);
61007b31
SH
2283 if (ret < 0) {
2284 return ret;
2285 }
2286 return !!(ret & BDRV_BLOCK_ALLOCATED);
2287}
2288
2289/*
2290 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2291 *
51b0a488
EB
2292 * Return true if (a prefix of) the given range is allocated in any image
2293 * between BASE and TOP (inclusive). BASE can be NULL to check if the given
2294 * offset is allocated in any image of the chain. Return false otherwise,
d6a644bb 2295 * or negative errno on failure.
61007b31 2296 *
51b0a488
EB
2297 * 'pnum' is set to the number of bytes (including and immediately
2298 * following the specified offset) that are known to be in the same
2299 * allocated/unallocated state. Note that a subsequent call starting
2300 * at 'offset + *pnum' may return the same allocation status (in other
2301 * words, the result is not necessarily the maximum possible range);
2302 * but 'pnum' will only be 0 when end of file is reached.
61007b31
SH
2303 *
2304 */
2305int bdrv_is_allocated_above(BlockDriverState *top,
2306 BlockDriverState *base,
51b0a488 2307 int64_t offset, int64_t bytes, int64_t *pnum)
61007b31
SH
2308{
2309 BlockDriverState *intermediate;
51b0a488
EB
2310 int ret;
2311 int64_t n = bytes;
61007b31
SH
2312
2313 intermediate = top;
2314 while (intermediate && intermediate != base) {
d6a644bb 2315 int64_t pnum_inter;
c00716be 2316 int64_t size_inter;
d6a644bb 2317
51b0a488 2318 ret = bdrv_is_allocated(intermediate, offset, bytes, &pnum_inter);
61007b31
SH
2319 if (ret < 0) {
2320 return ret;
d6a644bb 2321 }
d6a644bb 2322 if (ret) {
51b0a488 2323 *pnum = pnum_inter;
61007b31
SH
2324 return 1;
2325 }
2326
51b0a488 2327 size_inter = bdrv_getlength(intermediate);
c00716be
EB
2328 if (size_inter < 0) {
2329 return size_inter;
2330 }
51b0a488
EB
2331 if (n > pnum_inter &&
2332 (intermediate == top || offset + pnum_inter < size_inter)) {
2333 n = pnum_inter;
61007b31
SH
2334 }
2335
760e0063 2336 intermediate = backing_bs(intermediate);
61007b31
SH
2337 }
2338
2339 *pnum = n;
2340 return 0;
2341}
2342
1a8ae822
KW
2343typedef struct BdrvVmstateCo {
2344 BlockDriverState *bs;
2345 QEMUIOVector *qiov;
2346 int64_t pos;
2347 bool is_read;
2348 int ret;
2349} BdrvVmstateCo;
2350
2351static int coroutine_fn
2352bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2353 bool is_read)
2354{
2355 BlockDriver *drv = bs->drv;
dc88a467
SH
2356 int ret = -ENOTSUP;
2357
2358 bdrv_inc_in_flight(bs);
1a8ae822
KW
2359
2360 if (!drv) {
dc88a467 2361 ret = -ENOMEDIUM;
1a8ae822 2362 } else if (drv->bdrv_load_vmstate) {
dc88a467
SH
2363 if (is_read) {
2364 ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2365 } else {
2366 ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2367 }
1a8ae822 2368 } else if (bs->file) {
dc88a467 2369 ret = bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
1a8ae822
KW
2370 }
2371
dc88a467
SH
2372 bdrv_dec_in_flight(bs);
2373 return ret;
1a8ae822
KW
2374}
2375
2376static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
2377{
2378 BdrvVmstateCo *co = opaque;
2379 co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
2380}
2381
2382static inline int
2383bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2384 bool is_read)
2385{
2386 if (qemu_in_coroutine()) {
2387 return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
2388 } else {
2389 BdrvVmstateCo data = {
2390 .bs = bs,
2391 .qiov = qiov,
2392 .pos = pos,
2393 .is_read = is_read,
2394 .ret = -EINPROGRESS,
2395 };
0b8b8753 2396 Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
1a8ae822 2397
e92f0e19 2398 bdrv_coroutine_enter(bs, co);
ea17c9d2 2399 BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS);
1a8ae822
KW
2400 return data.ret;
2401 }
2402}
2403
61007b31
SH
2404int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2405 int64_t pos, int size)
2406{
2407 QEMUIOVector qiov;
2408 struct iovec iov = {
2409 .iov_base = (void *) buf,
2410 .iov_len = size,
2411 };
b433d942 2412 int ret;
61007b31
SH
2413
2414 qemu_iovec_init_external(&qiov, &iov, 1);
b433d942
KW
2415
2416 ret = bdrv_writev_vmstate(bs, &qiov, pos);
2417 if (ret < 0) {
2418 return ret;
2419 }
2420
2421 return size;
61007b31
SH
2422}
2423
2424int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2425{
1a8ae822 2426 return bdrv_rw_vmstate(bs, qiov, pos, false);
61007b31
SH
2427}
2428
2429int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2430 int64_t pos, int size)
5ddda0b8
KW
2431{
2432 QEMUIOVector qiov;
2433 struct iovec iov = {
2434 .iov_base = buf,
2435 .iov_len = size,
2436 };
b433d942 2437 int ret;
5ddda0b8
KW
2438
2439 qemu_iovec_init_external(&qiov, &iov, 1);
b433d942
KW
2440 ret = bdrv_readv_vmstate(bs, &qiov, pos);
2441 if (ret < 0) {
2442 return ret;
2443 }
2444
2445 return size;
5ddda0b8
KW
2446}
2447
2448int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
61007b31 2449{
1a8ae822 2450 return bdrv_rw_vmstate(bs, qiov, pos, true);
61007b31
SH
2451}
2452
2453/**************************************************************/
2454/* async I/Os */
2455
61007b31
SH
2456void bdrv_aio_cancel(BlockAIOCB *acb)
2457{
2458 qemu_aio_ref(acb);
2459 bdrv_aio_cancel_async(acb);
2460 while (acb->refcnt > 1) {
2461 if (acb->aiocb_info->get_aio_context) {
2462 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2463 } else if (acb->bs) {
2f47da5f
PB
2464 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2465 * assert that we're not using an I/O thread. Thread-safe
2466 * code should use bdrv_aio_cancel_async exclusively.
2467 */
2468 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
61007b31
SH
2469 aio_poll(bdrv_get_aio_context(acb->bs), true);
2470 } else {
2471 abort();
2472 }
2473 }
2474 qemu_aio_unref(acb);
2475}
2476
2477/* Async version of aio cancel. The caller is not blocked if the acb implements
2478 * cancel_async, otherwise we do nothing and let the request normally complete.
2479 * In either case the completion callback must be called. */
2480void bdrv_aio_cancel_async(BlockAIOCB *acb)
2481{
2482 if (acb->aiocb_info->cancel_async) {
2483 acb->aiocb_info->cancel_async(acb);
2484 }
2485}
2486
61007b31
SH
2487/**************************************************************/
2488/* Coroutine block device emulation */
2489
e293b7a3
KW
2490typedef struct FlushCo {
2491 BlockDriverState *bs;
2492 int ret;
2493} FlushCo;
2494
2495
61007b31
SH
2496static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2497{
e293b7a3 2498 FlushCo *rwco = opaque;
61007b31
SH
2499
2500 rwco->ret = bdrv_co_flush(rwco->bs);
2501}
2502
2503int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2504{
49ca6259
FZ
2505 int current_gen;
2506 int ret = 0;
2507
2508 bdrv_inc_in_flight(bs);
61007b31 2509
e914404e 2510 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
1b6bc94d 2511 bdrv_is_sg(bs)) {
49ca6259 2512 goto early_exit;
61007b31
SH
2513 }
2514
3783fa3d 2515 qemu_co_mutex_lock(&bs->reqs_lock);
47fec599 2516 current_gen = atomic_read(&bs->write_gen);
3ff2f67a
EY
2517
2518 /* Wait until any previous flushes are completed */
99723548 2519 while (bs->active_flush_req) {
3783fa3d 2520 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
3ff2f67a
EY
2521 }
2522
3783fa3d 2523 /* Flushes reach this point in nondecreasing current_gen order. */
99723548 2524 bs->active_flush_req = true;
3783fa3d 2525 qemu_co_mutex_unlock(&bs->reqs_lock);
3ff2f67a 2526
c32b82af
PD
2527 /* Write back all layers by calling one driver function */
2528 if (bs->drv->bdrv_co_flush) {
2529 ret = bs->drv->bdrv_co_flush(bs);
2530 goto out;
2531 }
2532
61007b31
SH
2533 /* Write back cached data to the OS even with cache=unsafe */
2534 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2535 if (bs->drv->bdrv_co_flush_to_os) {
2536 ret = bs->drv->bdrv_co_flush_to_os(bs);
2537 if (ret < 0) {
cdb5e315 2538 goto out;
61007b31
SH
2539 }
2540 }
2541
2542 /* But don't actually force it to the disk with cache=unsafe */
2543 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2544 goto flush_parent;
2545 }
2546
3ff2f67a
EY
2547 /* Check if we really need to flush anything */
2548 if (bs->flushed_gen == current_gen) {
2549 goto flush_parent;
2550 }
2551
61007b31 2552 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
d470ad42
HR
2553 if (!bs->drv) {
2554 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2555 * (even in case of apparent success) */
2556 ret = -ENOMEDIUM;
2557 goto out;
2558 }
61007b31
SH
2559 if (bs->drv->bdrv_co_flush_to_disk) {
2560 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2561 } else if (bs->drv->bdrv_aio_flush) {
2562 BlockAIOCB *acb;
2563 CoroutineIOCompletion co = {
2564 .coroutine = qemu_coroutine_self(),
2565 };
2566
2567 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2568 if (acb == NULL) {
2569 ret = -EIO;
2570 } else {
2571 qemu_coroutine_yield();
2572 ret = co.ret;
2573 }
2574 } else {
2575 /*
2576 * Some block drivers always operate in either writethrough or unsafe
2577 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2578 * know how the server works (because the behaviour is hardcoded or
2579 * depends on server-side configuration), so we can't ensure that
2580 * everything is safe on disk. Returning an error doesn't work because
2581 * that would break guests even if the server operates in writethrough
2582 * mode.
2583 *
2584 * Let's hope the user knows what he's doing.
2585 */
2586 ret = 0;
2587 }
3ff2f67a 2588
61007b31 2589 if (ret < 0) {
cdb5e315 2590 goto out;
61007b31
SH
2591 }
2592
2593 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2594 * in the case of cache=unsafe, so there are no useless flushes.
2595 */
2596flush_parent:
cdb5e315
FZ
2597 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2598out:
3ff2f67a 2599 /* Notify any pending flushes that we have completed */
e6af1e08
KW
2600 if (ret == 0) {
2601 bs->flushed_gen = current_gen;
2602 }
3783fa3d
PB
2603
2604 qemu_co_mutex_lock(&bs->reqs_lock);
99723548 2605 bs->active_flush_req = false;
156af3ac
DL
2606 /* Return value is ignored - it's ok if wait queue is empty */
2607 qemu_co_queue_next(&bs->flush_queue);
3783fa3d 2608 qemu_co_mutex_unlock(&bs->reqs_lock);
3ff2f67a 2609
49ca6259 2610early_exit:
99723548 2611 bdrv_dec_in_flight(bs);
cdb5e315 2612 return ret;
61007b31
SH
2613}
2614
2615int bdrv_flush(BlockDriverState *bs)
2616{
2617 Coroutine *co;
e293b7a3 2618 FlushCo flush_co = {
61007b31
SH
2619 .bs = bs,
2620 .ret = NOT_DONE,
2621 };
2622
2623 if (qemu_in_coroutine()) {
2624 /* Fast-path if already in coroutine context */
e293b7a3 2625 bdrv_flush_co_entry(&flush_co);
61007b31 2626 } else {
0b8b8753 2627 co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
e92f0e19 2628 bdrv_coroutine_enter(bs, co);
88b062c2 2629 BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
61007b31
SH
2630 }
2631
e293b7a3 2632 return flush_co.ret;
61007b31
SH
2633}
2634
2635typedef struct DiscardCo {
2636 BlockDriverState *bs;
0c51a893 2637 int64_t offset;
f5a5ca79 2638 int bytes;
61007b31
SH
2639 int ret;
2640} DiscardCo;
0c51a893 2641static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
61007b31
SH
2642{
2643 DiscardCo *rwco = opaque;
2644
f5a5ca79 2645 rwco->ret = bdrv_co_pdiscard(rwco->bs, rwco->offset, rwco->bytes);
61007b31
SH
2646}
2647
9f1963b3 2648int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
f5a5ca79 2649 int bytes)
61007b31 2650{
b1066c87 2651 BdrvTrackedRequest req;
9f1963b3 2652 int max_pdiscard, ret;
3482b9bc 2653 int head, tail, align;
61007b31
SH
2654
2655 if (!bs->drv) {
2656 return -ENOMEDIUM;
2657 }
2658
d6883bc9
VSO
2659 if (bdrv_has_readonly_bitmaps(bs)) {
2660 return -EPERM;
2661 }
2662
f5a5ca79 2663 ret = bdrv_check_byte_request(bs, offset, bytes);
61007b31
SH
2664 if (ret < 0) {
2665 return ret;
2666 } else if (bs->read_only) {
eaf5fe2d 2667 return -EPERM;
61007b31 2668 }
04c01a5c 2669 assert(!(bs->open_flags & BDRV_O_INACTIVE));
61007b31 2670
61007b31
SH
2671 /* Do nothing if disabled. */
2672 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2673 return 0;
2674 }
2675
02aefe43 2676 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
61007b31
SH
2677 return 0;
2678 }
2679
3482b9bc
EB
2680 /* Discard is advisory, but some devices track and coalesce
2681 * unaligned requests, so we must pass everything down rather than
2682 * round here. Still, most devices will just silently ignore
2683 * unaligned requests (by returning -ENOTSUP), so we must fragment
2684 * the request accordingly. */
02aefe43 2685 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
b8d0a980
EB
2686 assert(align % bs->bl.request_alignment == 0);
2687 head = offset % align;
f5a5ca79 2688 tail = (offset + bytes) % align;
9f1963b3 2689
99723548 2690 bdrv_inc_in_flight(bs);
f5a5ca79 2691 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
50824995 2692
ec050f77
DL
2693 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
2694 if (ret < 0) {
2695 goto out;
2696 }
2697
9f1963b3
EB
2698 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
2699 align);
3482b9bc 2700 assert(max_pdiscard >= bs->bl.request_alignment);
61007b31 2701
f5a5ca79 2702 while (bytes > 0) {
f5a5ca79 2703 int num = bytes;
3482b9bc
EB
2704
2705 if (head) {
2706 /* Make small requests to get to alignment boundaries. */
f5a5ca79 2707 num = MIN(bytes, align - head);
3482b9bc
EB
2708 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
2709 num %= bs->bl.request_alignment;
2710 }
2711 head = (head + num) % align;
2712 assert(num < max_pdiscard);
2713 } else if (tail) {
2714 if (num > align) {
2715 /* Shorten the request to the last aligned cluster. */
2716 num -= tail;
2717 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
2718 tail > bs->bl.request_alignment) {
2719 tail %= bs->bl.request_alignment;
2720 num -= tail;
2721 }
2722 }
2723 /* limit request size */
2724 if (num > max_pdiscard) {
2725 num = max_pdiscard;
2726 }
61007b31 2727
d470ad42
HR
2728 if (!bs->drv) {
2729 ret = -ENOMEDIUM;
2730 goto out;
2731 }
47a5486d
EB
2732 if (bs->drv->bdrv_co_pdiscard) {
2733 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
61007b31
SH
2734 } else {
2735 BlockAIOCB *acb;
2736 CoroutineIOCompletion co = {
2737 .coroutine = qemu_coroutine_self(),
2738 };
2739
4da444a0
EB
2740 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
2741 bdrv_co_io_em_complete, &co);
61007b31 2742 if (acb == NULL) {
b1066c87
FZ
2743 ret = -EIO;
2744 goto out;
61007b31
SH
2745 } else {
2746 qemu_coroutine_yield();
2747 ret = co.ret;
2748 }
2749 }
2750 if (ret && ret != -ENOTSUP) {
b1066c87 2751 goto out;
61007b31
SH
2752 }
2753
9f1963b3 2754 offset += num;
f5a5ca79 2755 bytes -= num;
61007b31 2756 }
b1066c87
FZ
2757 ret = 0;
2758out:
47fec599 2759 atomic_inc(&bs->write_gen);
0fdf1a4f 2760 bdrv_set_dirty(bs, req.offset, req.bytes);
b1066c87 2761 tracked_request_end(&req);
99723548 2762 bdrv_dec_in_flight(bs);
b1066c87 2763 return ret;
61007b31
SH
2764}
2765
f5a5ca79 2766int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
61007b31
SH
2767{
2768 Coroutine *co;
2769 DiscardCo rwco = {
2770 .bs = bs,
0c51a893 2771 .offset = offset,
f5a5ca79 2772 .bytes = bytes,
61007b31
SH
2773 .ret = NOT_DONE,
2774 };
2775
2776 if (qemu_in_coroutine()) {
2777 /* Fast-path if already in coroutine context */
0c51a893 2778 bdrv_pdiscard_co_entry(&rwco);
61007b31 2779 } else {
0c51a893 2780 co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
e92f0e19 2781 bdrv_coroutine_enter(bs, co);
88b062c2 2782 BDRV_POLL_WHILE(bs, rwco.ret == NOT_DONE);
61007b31
SH
2783 }
2784
2785 return rwco.ret;
2786}
2787
48af776a 2788int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
61007b31
SH
2789{
2790 BlockDriver *drv = bs->drv;
5c5ae76a
FZ
2791 CoroutineIOCompletion co = {
2792 .coroutine = qemu_coroutine_self(),
2793 };
2794 BlockAIOCB *acb;
61007b31 2795
99723548 2796 bdrv_inc_in_flight(bs);
16a389dc 2797 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
5c5ae76a
FZ
2798 co.ret = -ENOTSUP;
2799 goto out;
2800 }
2801
16a389dc
KW
2802 if (drv->bdrv_co_ioctl) {
2803 co.ret = drv->bdrv_co_ioctl(bs, req, buf);
2804 } else {
2805 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2806 if (!acb) {
2807 co.ret = -ENOTSUP;
2808 goto out;
2809 }
2810 qemu_coroutine_yield();
5c5ae76a 2811 }
5c5ae76a 2812out:
99723548 2813 bdrv_dec_in_flight(bs);
5c5ae76a
FZ
2814 return co.ret;
2815}
2816
61007b31
SH
2817void *qemu_blockalign(BlockDriverState *bs, size_t size)
2818{
2819 return qemu_memalign(bdrv_opt_mem_align(bs), size);
2820}
2821
2822void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2823{
2824 return memset(qemu_blockalign(bs, size), 0, size);
2825}
2826
2827void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2828{
2829 size_t align = bdrv_opt_mem_align(bs);
2830
2831 /* Ensure that NULL is never returned on success */
2832 assert(align > 0);
2833 if (size == 0) {
2834 size = align;
2835 }
2836
2837 return qemu_try_memalign(align, size);
2838}
2839
2840void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2841{
2842 void *mem = qemu_try_blockalign(bs, size);
2843
2844 if (mem) {
2845 memset(mem, 0, size);
2846 }
2847
2848 return mem;
2849}
2850
2851/*
2852 * Check if all memory in this vector is sector aligned.
2853 */
2854bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2855{
2856 int i;
4196d2f0 2857 size_t alignment = bdrv_min_mem_align(bs);
61007b31
SH
2858
2859 for (i = 0; i < qiov->niov; i++) {
2860 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2861 return false;
2862 }
2863 if (qiov->iov[i].iov_len % alignment) {
2864 return false;
2865 }
2866 }
2867
2868 return true;
2869}
2870
2871void bdrv_add_before_write_notifier(BlockDriverState *bs,
2872 NotifierWithReturn *notifier)
2873{
2874 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2875}
2876
2877void bdrv_io_plug(BlockDriverState *bs)
2878{
6b98bd64
PB
2879 BdrvChild *child;
2880
2881 QLIST_FOREACH(child, &bs->children, next) {
2882 bdrv_io_plug(child->bs);
2883 }
2884
850d54a2 2885 if (atomic_fetch_inc(&bs->io_plugged) == 0) {
6b98bd64
PB
2886 BlockDriver *drv = bs->drv;
2887 if (drv && drv->bdrv_io_plug) {
2888 drv->bdrv_io_plug(bs);
2889 }
61007b31
SH
2890 }
2891}
2892
2893void bdrv_io_unplug(BlockDriverState *bs)
2894{
6b98bd64
PB
2895 BdrvChild *child;
2896
2897 assert(bs->io_plugged);
850d54a2 2898 if (atomic_fetch_dec(&bs->io_plugged) == 1) {
6b98bd64
PB
2899 BlockDriver *drv = bs->drv;
2900 if (drv && drv->bdrv_io_unplug) {
2901 drv->bdrv_io_unplug(bs);
2902 }
2903 }
2904
2905 QLIST_FOREACH(child, &bs->children, next) {
2906 bdrv_io_unplug(child->bs);
61007b31
SH
2907 }
2908}
23d0ba93
FZ
2909
2910void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size)
2911{
2912 BdrvChild *child;
2913
2914 if (bs->drv && bs->drv->bdrv_register_buf) {
2915 bs->drv->bdrv_register_buf(bs, host, size);
2916 }
2917 QLIST_FOREACH(child, &bs->children, next) {
2918 bdrv_register_buf(child->bs, host, size);
2919 }
2920}
2921
2922void bdrv_unregister_buf(BlockDriverState *bs, void *host)
2923{
2924 BdrvChild *child;
2925
2926 if (bs->drv && bs->drv->bdrv_unregister_buf) {
2927 bs->drv->bdrv_unregister_buf(bs, host);
2928 }
2929 QLIST_FOREACH(child, &bs->children, next) {
2930 bdrv_unregister_buf(child->bs, host);
2931 }
2932}
fcc67678 2933
67b51fb9
VSO
2934static int coroutine_fn bdrv_co_copy_range_internal(
2935 BdrvChild *src, uint64_t src_offset, BdrvChild *dst,
2936 uint64_t dst_offset, uint64_t bytes,
2937 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
2938 bool recurse_src)
fcc67678 2939{
999658a0 2940 BdrvTrackedRequest req;
fcc67678
FZ
2941 int ret;
2942
d4d3e5a0 2943 if (!dst || !dst->bs) {
fcc67678
FZ
2944 return -ENOMEDIUM;
2945 }
fcc67678
FZ
2946 ret = bdrv_check_byte_request(dst->bs, dst_offset, bytes);
2947 if (ret) {
2948 return ret;
2949 }
67b51fb9
VSO
2950 if (write_flags & BDRV_REQ_ZERO_WRITE) {
2951 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
fcc67678
FZ
2952 }
2953
d4d3e5a0
FZ
2954 if (!src || !src->bs) {
2955 return -ENOMEDIUM;
2956 }
2957 ret = bdrv_check_byte_request(src->bs, src_offset, bytes);
2958 if (ret) {
2959 return ret;
2960 }
2961
fcc67678
FZ
2962 if (!src->bs->drv->bdrv_co_copy_range_from
2963 || !dst->bs->drv->bdrv_co_copy_range_to
2964 || src->bs->encrypted || dst->bs->encrypted) {
2965 return -ENOTSUP;
2966 }
37aec7d7 2967
fcc67678 2968 if (recurse_src) {
999658a0
VSO
2969 bdrv_inc_in_flight(src->bs);
2970 tracked_request_begin(&req, src->bs, src_offset, bytes,
2971 BDRV_TRACKED_READ);
2972
09d2f948
VSO
2973 /* BDRV_REQ_SERIALISING is only for write operation */
2974 assert(!(read_flags & BDRV_REQ_SERIALISING));
67b51fb9 2975 if (!(read_flags & BDRV_REQ_NO_SERIALISING)) {
999658a0
VSO
2976 wait_serialising_requests(&req);
2977 }
2978
37aec7d7
FZ
2979 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
2980 src, src_offset,
2981 dst, dst_offset,
67b51fb9
VSO
2982 bytes,
2983 read_flags, write_flags);
999658a0
VSO
2984
2985 tracked_request_end(&req);
2986 bdrv_dec_in_flight(src->bs);
fcc67678 2987 } else {
999658a0
VSO
2988 bdrv_inc_in_flight(dst->bs);
2989 tracked_request_begin(&req, dst->bs, dst_offset, bytes,
2990 BDRV_TRACKED_WRITE);
2991
67b51fb9
VSO
2992 /* BDRV_REQ_NO_SERIALISING is only for read operation */
2993 assert(!(write_flags & BDRV_REQ_NO_SERIALISING));
09d2f948
VSO
2994 if (write_flags & BDRV_REQ_SERIALISING) {
2995 mark_request_serialising(&req, bdrv_get_cluster_size(dst->bs));
2996 }
999658a0
VSO
2997 wait_serialising_requests(&req);
2998
37aec7d7
FZ
2999 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3000 src, src_offset,
3001 dst, dst_offset,
67b51fb9
VSO
3002 bytes,
3003 read_flags, write_flags);
999658a0
VSO
3004
3005 tracked_request_end(&req);
3006 bdrv_dec_in_flight(dst->bs);
fcc67678 3007 }
999658a0 3008
37aec7d7 3009 return ret;
fcc67678
FZ
3010}
3011
3012/* Copy range from @src to @dst.
3013 *
3014 * See the comment of bdrv_co_copy_range for the parameter and return value
3015 * semantics. */
3016int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset,
3017 BdrvChild *dst, uint64_t dst_offset,
67b51fb9
VSO
3018 uint64_t bytes,
3019 BdrvRequestFlags read_flags,
3020 BdrvRequestFlags write_flags)
fcc67678 3021{
ecc983a5
FZ
3022 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3023 read_flags, write_flags);
fcc67678 3024 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
67b51fb9 3025 bytes, read_flags, write_flags, true);
fcc67678
FZ
3026}
3027
3028/* Copy range from @src to @dst.
3029 *
3030 * See the comment of bdrv_co_copy_range for the parameter and return value
3031 * semantics. */
3032int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset,
3033 BdrvChild *dst, uint64_t dst_offset,
67b51fb9
VSO
3034 uint64_t bytes,
3035 BdrvRequestFlags read_flags,
3036 BdrvRequestFlags write_flags)
fcc67678 3037{
ecc983a5
FZ
3038 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3039 read_flags, write_flags);
fcc67678 3040 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
67b51fb9 3041 bytes, read_flags, write_flags, false);
fcc67678
FZ
3042}
3043
3044int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset,
3045 BdrvChild *dst, uint64_t dst_offset,
67b51fb9
VSO
3046 uint64_t bytes, BdrvRequestFlags read_flags,
3047 BdrvRequestFlags write_flags)
fcc67678 3048{
37aec7d7
FZ
3049 return bdrv_co_copy_range_from(src, src_offset,
3050 dst, dst_offset,
67b51fb9 3051 bytes, read_flags, write_flags);
fcc67678 3052}
3d9f2d2a
KW
3053
3054static void bdrv_parent_cb_resize(BlockDriverState *bs)
3055{
3056 BdrvChild *c;
3057 QLIST_FOREACH(c, &bs->parents, next_parent) {
3058 if (c->role->resize) {
3059 c->role->resize(c);
3060 }
3061 }
3062}
3063
3064/**
3065 * Truncate file to 'offset' bytes (needed only for file protocols)
3066 */
3067int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset,
3068 PreallocMode prealloc, Error **errp)
3069{
3070 BlockDriverState *bs = child->bs;
3071 BlockDriver *drv = bs->drv;
1bc5f09f
KW
3072 BdrvTrackedRequest req;
3073 int64_t old_size, new_bytes;
3d9f2d2a
KW
3074 int ret;
3075
3076 assert(child->perm & BLK_PERM_RESIZE);
3077
3078 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3079 if (!drv) {
3080 error_setg(errp, "No medium inserted");
3081 return -ENOMEDIUM;
3082 }
3083 if (offset < 0) {
3084 error_setg(errp, "Image size cannot be negative");
3085 return -EINVAL;
3086 }
3087
1bc5f09f
KW
3088 old_size = bdrv_getlength(bs);
3089 if (old_size < 0) {
3090 error_setg_errno(errp, -old_size, "Failed to get old image size");
3091 return old_size;
3092 }
3093
3094 if (offset > old_size) {
3095 new_bytes = offset - old_size;
3096 } else {
3097 new_bytes = 0;
3098 }
3099
3d9f2d2a 3100 bdrv_inc_in_flight(bs);
1bc5f09f
KW
3101 tracked_request_begin(&req, bs, offset, new_bytes, BDRV_TRACKED_TRUNCATE);
3102
3103 /* If we are growing the image and potentially using preallocation for the
3104 * new area, we need to make sure that no write requests are made to it
3105 * concurrently or they might be overwritten by preallocation. */
3106 if (new_bytes) {
3107 mark_request_serialising(&req, 1);
3108 wait_serialising_requests(&req);
3109 }
3d9f2d2a
KW
3110
3111 if (!drv->bdrv_co_truncate) {
3112 if (bs->file && drv->is_filter) {
3113 ret = bdrv_co_truncate(bs->file, offset, prealloc, errp);
3114 goto out;
3115 }
3116 error_setg(errp, "Image format driver does not support resize");
3117 ret = -ENOTSUP;
3118 goto out;
3119 }
3120 if (bs->read_only) {
3121 error_setg(errp, "Image is read-only");
3122 ret = -EACCES;
3123 goto out;
3124 }
3125
3126 assert(!(bs->open_flags & BDRV_O_INACTIVE));
3127
3128 ret = drv->bdrv_co_truncate(bs, offset, prealloc, errp);
3129 if (ret < 0) {
3130 goto out;
3131 }
3132 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3133 if (ret < 0) {
3134 error_setg_errno(errp, -ret, "Could not refresh total sector count");
3135 } else {
3136 offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3137 }
3138 bdrv_dirty_bitmap_truncate(bs, offset);
3139 bdrv_parent_cb_resize(bs);
3140 atomic_inc(&bs->write_gen);
3141
3142out:
1bc5f09f 3143 tracked_request_end(&req);
3d9f2d2a 3144 bdrv_dec_in_flight(bs);
1bc5f09f 3145
3d9f2d2a
KW
3146 return ret;
3147}
3148
3149typedef struct TruncateCo {
3150 BdrvChild *child;
3151 int64_t offset;
3152 PreallocMode prealloc;
3153 Error **errp;
3154 int ret;
3155} TruncateCo;
3156
3157static void coroutine_fn bdrv_truncate_co_entry(void *opaque)
3158{
3159 TruncateCo *tco = opaque;
3160 tco->ret = bdrv_co_truncate(tco->child, tco->offset, tco->prealloc,
3161 tco->errp);
3162}
3163
3164int bdrv_truncate(BdrvChild *child, int64_t offset, PreallocMode prealloc,
3165 Error **errp)
3166{
3167 Coroutine *co;
3168 TruncateCo tco = {
3169 .child = child,
3170 .offset = offset,
3171 .prealloc = prealloc,
3172 .errp = errp,
3173 .ret = NOT_DONE,
3174 };
3175
3176 if (qemu_in_coroutine()) {
3177 /* Fast-path if already in coroutine context */
3178 bdrv_truncate_co_entry(&tco);
3179 } else {
3180 co = qemu_coroutine_create(bdrv_truncate_co_entry, &tco);
3181 qemu_coroutine_enter(co);
3182 BDRV_POLL_WHILE(child->bs, tco.ret == NOT_DONE);
3183 }
3184
3185 return tco.ret;
3186}
This page took 0.74394 seconds and 4 git commands to generate.