2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "qemu/cutils.h"
33 #include "qapi/error.h"
34 #include "qemu/error-report.h"
36 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
38 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
39 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
41 static void bdrv_parent_cb_resize(BlockDriverState *bs);
42 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
43 int64_t offset, int bytes, BdrvRequestFlags flags);
45 void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
46 bool ignore_bds_parents)
50 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
51 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
54 bdrv_parent_drained_begin_single(c, false);
58 void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
59 bool ignore_bds_parents)
63 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
64 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
67 if (c->role->drained_end) {
68 c->role->drained_end(c);
73 static bool bdrv_parent_drained_poll_single(BdrvChild *c)
75 if (c->role->drained_poll) {
76 return c->role->drained_poll(c);
81 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
82 bool ignore_bds_parents)
87 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
88 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
91 busy |= bdrv_parent_drained_poll_single(c);
97 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
99 if (c->role->drained_begin) {
100 c->role->drained_begin(c);
103 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c));
107 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
109 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
110 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
111 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
112 src->opt_mem_alignment);
113 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
114 src->min_mem_alignment);
115 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
118 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
120 BlockDriver *drv = bs->drv;
121 Error *local_err = NULL;
123 memset(&bs->bl, 0, sizeof(bs->bl));
129 /* Default alignment based on whether driver has byte interface */
130 bs->bl.request_alignment = (drv->bdrv_co_preadv ||
131 drv->bdrv_aio_preadv) ? 1 : 512;
133 /* Take some limits from the children as a default */
135 bdrv_refresh_limits(bs->file->bs, &local_err);
137 error_propagate(errp, local_err);
140 bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
142 bs->bl.min_mem_alignment = 512;
143 bs->bl.opt_mem_alignment = getpagesize();
145 /* Safe default since most protocols use readv()/writev()/etc */
146 bs->bl.max_iov = IOV_MAX;
150 bdrv_refresh_limits(bs->backing->bs, &local_err);
152 error_propagate(errp, local_err);
155 bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
158 /* Then let the driver override it */
159 if (drv->bdrv_refresh_limits) {
160 drv->bdrv_refresh_limits(bs, errp);
165 * The copy-on-read flag is actually a reference count so multiple users may
166 * use the feature without worrying about clobbering its previous state.
167 * Copy-on-read stays enabled until all users have called to disable it.
169 void bdrv_enable_copy_on_read(BlockDriverState *bs)
171 atomic_inc(&bs->copy_on_read);
174 void bdrv_disable_copy_on_read(BlockDriverState *bs)
176 int old = atomic_fetch_dec(&bs->copy_on_read);
182 BlockDriverState *bs;
188 bool ignore_bds_parents;
191 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
193 BdrvCoDrainData *data = opaque;
194 BlockDriverState *bs = data->bs;
197 bs->drv->bdrv_co_drain_begin(bs);
199 bs->drv->bdrv_co_drain_end(bs);
202 /* Set data->done before reading bs->wakeup. */
203 atomic_mb_set(&data->done, true);
204 bdrv_dec_in_flight(bs);
211 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
212 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
214 BdrvCoDrainData *data;
216 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
217 (!begin && !bs->drv->bdrv_co_drain_end)) {
221 data = g_new(BdrvCoDrainData, 1);
222 *data = (BdrvCoDrainData) {
228 /* Make sure the driver callback completes during the polling phase for
230 bdrv_inc_in_flight(bs);
231 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
232 aio_co_schedule(bdrv_get_aio_context(bs), data->co);
235 BDRV_POLL_WHILE(bs, !data->done);
240 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
241 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
242 BdrvChild *ignore_parent, bool ignore_bds_parents)
244 BdrvChild *child, *next;
246 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
250 if (atomic_read(&bs->in_flight)) {
255 assert(!ignore_bds_parents);
256 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
257 if (bdrv_drain_poll(child->bs, recursive, child, false)) {
266 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
267 BdrvChild *ignore_parent)
269 return bdrv_drain_poll(bs, recursive, ignore_parent, false);
272 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
273 BdrvChild *parent, bool ignore_bds_parents,
275 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
276 BdrvChild *parent, bool ignore_bds_parents);
278 static void bdrv_co_drain_bh_cb(void *opaque)
280 BdrvCoDrainData *data = opaque;
281 Coroutine *co = data->co;
282 BlockDriverState *bs = data->bs;
285 AioContext *ctx = bdrv_get_aio_context(bs);
286 AioContext *co_ctx = qemu_coroutine_get_aio_context(co);
289 * When the coroutine yielded, the lock for its home context was
290 * released, so we need to re-acquire it here. If it explicitly
291 * acquired a different context, the lock is still held and we don't
292 * want to lock it a second time (or AIO_WAIT_WHILE() would hang).
295 aio_context_acquire(ctx);
297 bdrv_dec_in_flight(bs);
299 bdrv_do_drained_begin(bs, data->recursive, data->parent,
300 data->ignore_bds_parents, data->poll);
302 bdrv_do_drained_end(bs, data->recursive, data->parent,
303 data->ignore_bds_parents);
306 aio_context_release(ctx);
310 bdrv_drain_all_begin();
317 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
318 bool begin, bool recursive,
320 bool ignore_bds_parents,
323 BdrvCoDrainData data;
325 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
326 * other coroutines run if they were queued by aio_co_enter(). */
328 assert(qemu_in_coroutine());
329 data = (BdrvCoDrainData) {
330 .co = qemu_coroutine_self(),
334 .recursive = recursive,
336 .ignore_bds_parents = ignore_bds_parents,
340 bdrv_inc_in_flight(bs);
342 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
343 bdrv_co_drain_bh_cb, &data);
345 qemu_coroutine_yield();
346 /* If we are resumed from some other event (such as an aio completion or a
347 * timer callback), it is a bug in the caller that should be fixed. */
351 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
352 BdrvChild *parent, bool ignore_bds_parents)
354 assert(!qemu_in_coroutine());
356 /* Stop things in parent-to-child order */
357 if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
358 aio_disable_external(bdrv_get_aio_context(bs));
361 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
362 bdrv_drain_invoke(bs, true);
365 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
366 BdrvChild *parent, bool ignore_bds_parents,
369 BdrvChild *child, *next;
371 if (qemu_in_coroutine()) {
372 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
377 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents);
380 assert(!ignore_bds_parents);
381 bs->recursive_quiesce_counter++;
382 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
383 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents,
389 * Wait for drained requests to finish.
391 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
392 * call is needed so things in this AioContext can make progress even
393 * though we don't return to the main AioContext loop - this automatically
394 * includes other nodes in the same AioContext and therefore all child
398 assert(!ignore_bds_parents);
399 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent));
403 void bdrv_drained_begin(BlockDriverState *bs)
405 bdrv_do_drained_begin(bs, false, NULL, false, true);
408 void bdrv_subtree_drained_begin(BlockDriverState *bs)
410 bdrv_do_drained_begin(bs, true, NULL, false, true);
413 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
414 BdrvChild *parent, bool ignore_bds_parents)
416 BdrvChild *child, *next;
417 int old_quiesce_counter;
419 if (qemu_in_coroutine()) {
420 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
424 assert(bs->quiesce_counter > 0);
425 old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter);
427 /* Re-enable things in child-to-parent order */
428 bdrv_drain_invoke(bs, false);
429 bdrv_parent_drained_end(bs, parent, ignore_bds_parents);
430 if (old_quiesce_counter == 1) {
431 aio_enable_external(bdrv_get_aio_context(bs));
435 assert(!ignore_bds_parents);
436 bs->recursive_quiesce_counter--;
437 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
438 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents);
443 void bdrv_drained_end(BlockDriverState *bs)
445 bdrv_do_drained_end(bs, false, NULL, false);
448 void bdrv_subtree_drained_end(BlockDriverState *bs)
450 bdrv_do_drained_end(bs, true, NULL, false);
453 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
457 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
458 bdrv_do_drained_begin(child->bs, true, child, false, true);
462 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
466 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
467 bdrv_do_drained_end(child->bs, true, child, false);
472 * Wait for pending requests to complete on a single BlockDriverState subtree,
473 * and suspend block driver's internal I/O until next request arrives.
475 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
478 void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
480 assert(qemu_in_coroutine());
481 bdrv_drained_begin(bs);
482 bdrv_drained_end(bs);
485 void bdrv_drain(BlockDriverState *bs)
487 bdrv_drained_begin(bs);
488 bdrv_drained_end(bs);
491 static void bdrv_drain_assert_idle(BlockDriverState *bs)
493 BdrvChild *child, *next;
495 assert(atomic_read(&bs->in_flight) == 0);
496 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
497 bdrv_drain_assert_idle(child->bs);
501 unsigned int bdrv_drain_all_count = 0;
503 static bool bdrv_drain_all_poll(void)
505 BlockDriverState *bs = NULL;
508 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
509 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
510 while ((bs = bdrv_next_all_states(bs))) {
511 AioContext *aio_context = bdrv_get_aio_context(bs);
512 aio_context_acquire(aio_context);
513 result |= bdrv_drain_poll(bs, false, NULL, true);
514 aio_context_release(aio_context);
521 * Wait for pending requests to complete across all BlockDriverStates
523 * This function does not flush data to disk, use bdrv_flush_all() for that
524 * after calling this function.
526 * This pauses all block jobs and disables external clients. It must
527 * be paired with bdrv_drain_all_end().
529 * NOTE: no new block jobs or BlockDriverStates can be created between
530 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
532 void bdrv_drain_all_begin(void)
534 BlockDriverState *bs = NULL;
536 if (qemu_in_coroutine()) {
537 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true);
541 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
542 * loop AioContext, so make sure we're in the main context. */
543 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
544 assert(bdrv_drain_all_count < INT_MAX);
545 bdrv_drain_all_count++;
547 /* Quiesce all nodes, without polling in-flight requests yet. The graph
548 * cannot change during this loop. */
549 while ((bs = bdrv_next_all_states(bs))) {
550 AioContext *aio_context = bdrv_get_aio_context(bs);
552 aio_context_acquire(aio_context);
553 bdrv_do_drained_begin(bs, false, NULL, true, false);
554 aio_context_release(aio_context);
557 /* Now poll the in-flight requests */
558 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
560 while ((bs = bdrv_next_all_states(bs))) {
561 bdrv_drain_assert_idle(bs);
565 void bdrv_drain_all_end(void)
567 BlockDriverState *bs = NULL;
569 while ((bs = bdrv_next_all_states(bs))) {
570 AioContext *aio_context = bdrv_get_aio_context(bs);
572 aio_context_acquire(aio_context);
573 bdrv_do_drained_end(bs, false, NULL, true);
574 aio_context_release(aio_context);
577 assert(bdrv_drain_all_count > 0);
578 bdrv_drain_all_count--;
581 void bdrv_drain_all(void)
583 bdrv_drain_all_begin();
584 bdrv_drain_all_end();
588 * Remove an active request from the tracked requests list
590 * This function should be called when a tracked request is completing.
592 static void tracked_request_end(BdrvTrackedRequest *req)
594 if (req->serialising) {
595 atomic_dec(&req->bs->serialising_in_flight);
598 qemu_co_mutex_lock(&req->bs->reqs_lock);
599 QLIST_REMOVE(req, list);
600 qemu_co_queue_restart_all(&req->wait_queue);
601 qemu_co_mutex_unlock(&req->bs->reqs_lock);
605 * Add an active request to the tracked requests list
607 static void tracked_request_begin(BdrvTrackedRequest *req,
608 BlockDriverState *bs,
611 enum BdrvTrackedRequestType type)
613 assert(bytes <= INT64_MAX && offset <= INT64_MAX - bytes);
615 *req = (BdrvTrackedRequest){
620 .co = qemu_coroutine_self(),
621 .serialising = false,
622 .overlap_offset = offset,
623 .overlap_bytes = bytes,
626 qemu_co_queue_init(&req->wait_queue);
628 qemu_co_mutex_lock(&bs->reqs_lock);
629 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
630 qemu_co_mutex_unlock(&bs->reqs_lock);
633 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
635 int64_t overlap_offset = req->offset & ~(align - 1);
636 uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
639 if (!req->serialising) {
640 atomic_inc(&req->bs->serialising_in_flight);
641 req->serialising = true;
644 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
645 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
648 static bool is_request_serialising_and_aligned(BdrvTrackedRequest *req)
651 * If the request is serialising, overlap_offset and overlap_bytes are set,
652 * so we can check if the request is aligned. Otherwise, don't care and
656 return req->serialising && (req->offset == req->overlap_offset) &&
657 (req->bytes == req->overlap_bytes);
661 * Round a region to cluster boundaries
663 void bdrv_round_to_clusters(BlockDriverState *bs,
664 int64_t offset, int64_t bytes,
665 int64_t *cluster_offset,
666 int64_t *cluster_bytes)
670 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
671 *cluster_offset = offset;
672 *cluster_bytes = bytes;
674 int64_t c = bdi.cluster_size;
675 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
676 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
680 static int bdrv_get_cluster_size(BlockDriverState *bs)
685 ret = bdrv_get_info(bs, &bdi);
686 if (ret < 0 || bdi.cluster_size == 0) {
687 return bs->bl.request_alignment;
689 return bdi.cluster_size;
693 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
694 int64_t offset, uint64_t bytes)
697 if (offset >= req->overlap_offset + req->overlap_bytes) {
701 if (req->overlap_offset >= offset + bytes) {
707 void bdrv_inc_in_flight(BlockDriverState *bs)
709 atomic_inc(&bs->in_flight);
712 void bdrv_wakeup(BlockDriverState *bs)
717 void bdrv_dec_in_flight(BlockDriverState *bs)
719 atomic_dec(&bs->in_flight);
723 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
725 BlockDriverState *bs = self->bs;
726 BdrvTrackedRequest *req;
730 if (!atomic_read(&bs->serialising_in_flight)) {
736 qemu_co_mutex_lock(&bs->reqs_lock);
737 QLIST_FOREACH(req, &bs->tracked_requests, list) {
738 if (req == self || (!req->serialising && !self->serialising)) {
741 if (tracked_request_overlaps(req, self->overlap_offset,
742 self->overlap_bytes))
744 /* Hitting this means there was a reentrant request, for
745 * example, a block driver issuing nested requests. This must
746 * never happen since it means deadlock.
748 assert(qemu_coroutine_self() != req->co);
750 /* If the request is already (indirectly) waiting for us, or
751 * will wait for us as soon as it wakes up, then just go on
752 * (instead of producing a deadlock in the former case). */
753 if (!req->waiting_for) {
754 self->waiting_for = req;
755 qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
756 self->waiting_for = NULL;
763 qemu_co_mutex_unlock(&bs->reqs_lock);
769 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
772 if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
776 if (!bdrv_is_inserted(bs)) {
787 typedef struct RwCo {
793 BdrvRequestFlags flags;
796 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
800 if (!rwco->is_write) {
801 rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
802 rwco->qiov->size, rwco->qiov,
805 rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
806 rwco->qiov->size, rwco->qiov,
813 * Process a vectored synchronous request using coroutines
815 static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
816 QEMUIOVector *qiov, bool is_write,
817 BdrvRequestFlags flags)
824 .is_write = is_write,
829 if (qemu_in_coroutine()) {
830 /* Fast-path if already in coroutine context */
831 bdrv_rw_co_entry(&rwco);
833 co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
834 bdrv_coroutine_enter(child->bs, co);
835 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
841 * Process a synchronous request using coroutines
843 static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
844 int nb_sectors, bool is_write, BdrvRequestFlags flags)
846 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf,
847 nb_sectors * BDRV_SECTOR_SIZE);
849 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
853 return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
854 &qiov, is_write, flags);
857 /* return < 0 if error. See bdrv_write() for the return codes */
858 int bdrv_read(BdrvChild *child, int64_t sector_num,
859 uint8_t *buf, int nb_sectors)
861 return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0);
864 /* Return < 0 if error. Important errors are:
865 -EIO generic I/O error (may happen for all errors)
866 -ENOMEDIUM No media inserted.
867 -EINVAL Invalid sector number or nb_sectors
868 -EACCES Trying to write a read-only device
870 int bdrv_write(BdrvChild *child, int64_t sector_num,
871 const uint8_t *buf, int nb_sectors)
873 return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
876 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
877 int bytes, BdrvRequestFlags flags)
879 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
881 return bdrv_prwv_co(child, offset, &qiov, true,
882 BDRV_REQ_ZERO_WRITE | flags);
886 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
887 * The operation is sped up by checking the block status and only writing
888 * zeroes to the device if they currently do not return zeroes. Optional
889 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
892 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
894 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
897 int64_t target_size, bytes, offset = 0;
898 BlockDriverState *bs = child->bs;
900 target_size = bdrv_getlength(bs);
901 if (target_size < 0) {
906 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
910 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
914 if (ret & BDRV_BLOCK_ZERO) {
918 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
926 int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
930 ret = bdrv_prwv_co(child, offset, qiov, false, 0);
938 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
940 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
946 return bdrv_preadv(child, offset, &qiov);
949 int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
953 ret = bdrv_prwv_co(child, offset, qiov, true, 0);
961 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
963 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
969 return bdrv_pwritev(child, offset, &qiov);
973 * Writes to the file and ensures that no writes are reordered across this
974 * request (acts as a barrier)
976 * Returns 0 on success, -errno in error cases.
978 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
979 const void *buf, int count)
983 ret = bdrv_pwrite(child, offset, buf, count);
988 ret = bdrv_flush(child->bs);
996 typedef struct CoroutineIOCompletion {
997 Coroutine *coroutine;
999 } CoroutineIOCompletion;
1001 static void bdrv_co_io_em_complete(void *opaque, int ret)
1003 CoroutineIOCompletion *co = opaque;
1006 aio_co_wake(co->coroutine);
1009 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
1010 uint64_t offset, uint64_t bytes,
1011 QEMUIOVector *qiov, int flags)
1013 BlockDriver *drv = bs->drv;
1015 unsigned int nb_sectors;
1017 assert(!(flags & ~BDRV_REQ_MASK));
1018 assert(!(flags & BDRV_REQ_NO_FALLBACK));
1024 if (drv->bdrv_co_preadv) {
1025 return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1028 if (drv->bdrv_aio_preadv) {
1030 CoroutineIOCompletion co = {
1031 .coroutine = qemu_coroutine_self(),
1034 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1035 bdrv_co_io_em_complete, &co);
1039 qemu_coroutine_yield();
1044 sector_num = offset >> BDRV_SECTOR_BITS;
1045 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1047 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1048 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1049 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
1050 assert(drv->bdrv_co_readv);
1052 return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1055 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
1056 uint64_t offset, uint64_t bytes,
1057 QEMUIOVector *qiov, int flags)
1059 BlockDriver *drv = bs->drv;
1061 unsigned int nb_sectors;
1064 assert(!(flags & ~BDRV_REQ_MASK));
1065 assert(!(flags & BDRV_REQ_NO_FALLBACK));
1071 if (drv->bdrv_co_pwritev) {
1072 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
1073 flags & bs->supported_write_flags);
1074 flags &= ~bs->supported_write_flags;
1078 if (drv->bdrv_aio_pwritev) {
1080 CoroutineIOCompletion co = {
1081 .coroutine = qemu_coroutine_self(),
1084 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov,
1085 flags & bs->supported_write_flags,
1086 bdrv_co_io_em_complete, &co);
1087 flags &= ~bs->supported_write_flags;
1091 qemu_coroutine_yield();
1097 sector_num = offset >> BDRV_SECTOR_BITS;
1098 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1100 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1101 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1102 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
1104 assert(drv->bdrv_co_writev);
1105 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov,
1106 flags & bs->supported_write_flags);
1107 flags &= ~bs->supported_write_flags;
1110 if (ret == 0 && (flags & BDRV_REQ_FUA)) {
1111 ret = bdrv_co_flush(bs);
1117 static int coroutine_fn
1118 bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
1119 uint64_t bytes, QEMUIOVector *qiov)
1121 BlockDriver *drv = bs->drv;
1127 if (!drv->bdrv_co_pwritev_compressed) {
1131 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1134 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
1135 int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
1137 BlockDriverState *bs = child->bs;
1139 /* Perform I/O through a temporary buffer so that users who scribble over
1140 * their read buffer while the operation is in progress do not end up
1141 * modifying the image file. This is critical for zero-copy guest I/O
1142 * where anything might happen inside guest memory.
1144 void *bounce_buffer;
1146 BlockDriver *drv = bs->drv;
1147 QEMUIOVector local_qiov;
1148 int64_t cluster_offset;
1149 int64_t cluster_bytes;
1152 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1153 BDRV_REQUEST_MAX_BYTES);
1154 unsigned int progress = 0;
1160 /* FIXME We cannot require callers to have write permissions when all they
1161 * are doing is a read request. If we did things right, write permissions
1162 * would be obtained anyway, but internally by the copy-on-read code. As
1163 * long as it is implemented here rather than in a separate filter driver,
1164 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1165 * it could request permissions. Therefore we have to bypass the permission
1166 * system for the moment. */
1167 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1169 /* Cover entire cluster so no additional backing file I/O is required when
1170 * allocating cluster in the image file. Note that this value may exceed
1171 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1172 * is one reason we loop rather than doing it all at once.
1174 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
1175 skip_bytes = offset - cluster_offset;
1177 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1178 cluster_offset, cluster_bytes);
1180 bounce_buffer = qemu_try_blockalign(bs,
1181 MIN(MIN(max_transfer, cluster_bytes),
1182 MAX_BOUNCE_BUFFER));
1183 if (bounce_buffer == NULL) {
1188 while (cluster_bytes) {
1191 ret = bdrv_is_allocated(bs, cluster_offset,
1192 MIN(cluster_bytes, max_transfer), &pnum);
1194 /* Safe to treat errors in querying allocation as if
1195 * unallocated; we'll probably fail again soon on the
1196 * read, but at least that will set a decent errno.
1198 pnum = MIN(cluster_bytes, max_transfer);
1201 /* Stop at EOF if the image ends in the middle of the cluster */
1202 if (ret == 0 && pnum == 0) {
1203 assert(progress >= bytes);
1207 assert(skip_bytes < pnum);
1210 /* Must copy-on-read; use the bounce buffer */
1211 pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1212 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1214 ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1220 bdrv_debug_event(bs, BLKDBG_COR_WRITE);
1221 if (drv->bdrv_co_pwrite_zeroes &&
1222 buffer_is_zero(bounce_buffer, pnum)) {
1223 /* FIXME: Should we (perhaps conditionally) be setting
1224 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1225 * that still correctly reads as zero? */
1226 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
1227 BDRV_REQ_WRITE_UNCHANGED);
1229 /* This does not change the data on the disk, it is not
1230 * necessary to flush even in cache=writethrough mode.
1232 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
1234 BDRV_REQ_WRITE_UNCHANGED);
1238 /* It might be okay to ignore write errors for guest
1239 * requests. If this is a deliberate copy-on-read
1240 * then we don't want to ignore the error. Simply
1241 * report it in all cases.
1246 qemu_iovec_from_buf(qiov, progress, bounce_buffer + skip_bytes,
1249 /* Read directly into the destination */
1250 qemu_iovec_init(&local_qiov, qiov->niov);
1251 qemu_iovec_concat(&local_qiov, qiov, progress, pnum - skip_bytes);
1252 ret = bdrv_driver_preadv(bs, offset + progress, local_qiov.size,
1254 qemu_iovec_destroy(&local_qiov);
1260 cluster_offset += pnum;
1261 cluster_bytes -= pnum;
1262 progress += pnum - skip_bytes;
1268 qemu_vfree(bounce_buffer);
1273 * Forwards an already correctly aligned request to the BlockDriver. This
1274 * handles copy on read, zeroing after EOF, and fragmentation of large
1275 * reads; any other features must be implemented by the caller.
1277 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
1278 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1279 int64_t align, QEMUIOVector *qiov, int flags)
1281 BlockDriverState *bs = child->bs;
1282 int64_t total_bytes, max_bytes;
1284 uint64_t bytes_remaining = bytes;
1287 assert(is_power_of_2(align));
1288 assert((offset & (align - 1)) == 0);
1289 assert((bytes & (align - 1)) == 0);
1290 assert(!qiov || bytes == qiov->size);
1291 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1292 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1295 /* TODO: We would need a per-BDS .supported_read_flags and
1296 * potential fallback support, if we ever implement any read flags
1297 * to pass through to drivers. For now, there aren't any
1298 * passthrough flags. */
1299 assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ)));
1301 /* Handle Copy on Read and associated serialisation */
1302 if (flags & BDRV_REQ_COPY_ON_READ) {
1303 /* If we touch the same cluster it counts as an overlap. This
1304 * guarantees that allocating writes will be serialized and not race
1305 * with each other for the same cluster. For example, in copy-on-read
1306 * it ensures that the CoR read and write operations are atomic and
1307 * guest writes cannot interleave between them. */
1308 mark_request_serialising(req, bdrv_get_cluster_size(bs));
1311 /* BDRV_REQ_SERIALISING is only for write operation */
1312 assert(!(flags & BDRV_REQ_SERIALISING));
1314 if (!(flags & BDRV_REQ_NO_SERIALISING)) {
1315 wait_serialising_requests(req);
1318 if (flags & BDRV_REQ_COPY_ON_READ) {
1321 ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
1326 if (!ret || pnum != bytes) {
1327 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, qiov);
1332 /* Forward the request to the BlockDriver, possibly fragmenting it */
1333 total_bytes = bdrv_getlength(bs);
1334 if (total_bytes < 0) {
1339 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1340 if (bytes <= max_bytes && bytes <= max_transfer) {
1341 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
1345 while (bytes_remaining) {
1349 QEMUIOVector local_qiov;
1351 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1353 qemu_iovec_init(&local_qiov, qiov->niov);
1354 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1356 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1357 num, &local_qiov, 0);
1359 qemu_iovec_destroy(&local_qiov);
1361 num = bytes_remaining;
1362 ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
1368 bytes_remaining -= num;
1372 return ret < 0 ? ret : 0;
1376 * Handle a read request in coroutine context
1378 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1379 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1380 BdrvRequestFlags flags)
1382 BlockDriverState *bs = child->bs;
1383 BlockDriver *drv = bs->drv;
1384 BdrvTrackedRequest req;
1386 uint64_t align = bs->bl.request_alignment;
1387 uint8_t *head_buf = NULL;
1388 uint8_t *tail_buf = NULL;
1389 QEMUIOVector local_qiov;
1390 bool use_local_qiov = false;
1393 trace_bdrv_co_preadv(child->bs, offset, bytes, flags);
1399 ret = bdrv_check_byte_request(bs, offset, bytes);
1404 bdrv_inc_in_flight(bs);
1406 /* Don't do copy-on-read if we read data before write operation */
1407 if (atomic_read(&bs->copy_on_read) && !(flags & BDRV_REQ_NO_SERIALISING)) {
1408 flags |= BDRV_REQ_COPY_ON_READ;
1411 /* Align read if necessary by padding qiov */
1412 if (offset & (align - 1)) {
1413 head_buf = qemu_blockalign(bs, align);
1414 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1415 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1416 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1417 use_local_qiov = true;
1419 bytes += offset & (align - 1);
1420 offset = offset & ~(align - 1);
1423 if ((offset + bytes) & (align - 1)) {
1424 if (!use_local_qiov) {
1425 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1426 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1427 use_local_qiov = true;
1429 tail_buf = qemu_blockalign(bs, align);
1430 qemu_iovec_add(&local_qiov, tail_buf,
1431 align - ((offset + bytes) & (align - 1)));
1433 bytes = ROUND_UP(bytes, align);
1436 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1437 ret = bdrv_aligned_preadv(child, &req, offset, bytes, align,
1438 use_local_qiov ? &local_qiov : qiov,
1440 tracked_request_end(&req);
1441 bdrv_dec_in_flight(bs);
1443 if (use_local_qiov) {
1444 qemu_iovec_destroy(&local_qiov);
1445 qemu_vfree(head_buf);
1446 qemu_vfree(tail_buf);
1452 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1453 int64_t offset, int bytes, BdrvRequestFlags flags)
1455 BlockDriver *drv = bs->drv;
1459 bool need_flush = false;
1463 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
1464 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1465 bs->bl.request_alignment);
1466 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1472 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1476 assert(alignment % bs->bl.request_alignment == 0);
1477 head = offset % alignment;
1478 tail = (offset + bytes) % alignment;
1479 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1480 assert(max_write_zeroes >= bs->bl.request_alignment);
1482 while (bytes > 0 && !ret) {
1485 /* Align request. Block drivers can expect the "bulk" of the request
1486 * to be aligned, and that unaligned requests do not cross cluster
1490 /* Make a small request up to the first aligned sector. For
1491 * convenience, limit this request to max_transfer even if
1492 * we don't need to fall back to writes. */
1493 num = MIN(MIN(bytes, max_transfer), alignment - head);
1494 head = (head + num) % alignment;
1495 assert(num < max_write_zeroes);
1496 } else if (tail && num > alignment) {
1497 /* Shorten the request to the last aligned sector. */
1501 /* limit request size */
1502 if (num > max_write_zeroes) {
1503 num = max_write_zeroes;
1507 /* First try the efficient write zeroes operation */
1508 if (drv->bdrv_co_pwrite_zeroes) {
1509 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1510 flags & bs->supported_zero_flags);
1511 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1512 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1516 assert(!bs->supported_zero_flags);
1519 if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
1520 /* Fall back to bounce buffer if write zeroes is unsupported */
1521 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1523 if ((flags & BDRV_REQ_FUA) &&
1524 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1525 /* No need for bdrv_driver_pwrite() to do a fallback
1526 * flush on each chunk; use just one at the end */
1527 write_flags &= ~BDRV_REQ_FUA;
1530 num = MIN(num, max_transfer);
1532 buf = qemu_try_blockalign0(bs, num);
1538 qemu_iovec_init_buf(&qiov, buf, num);
1540 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
1542 /* Keep bounce buffer around if it is big enough for all
1543 * all future requests.
1545 if (num < max_transfer) {
1556 if (ret == 0 && need_flush) {
1557 ret = bdrv_co_flush(bs);
1563 static inline int coroutine_fn
1564 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
1565 BdrvTrackedRequest *req, int flags)
1567 BlockDriverState *bs = child->bs;
1569 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1571 if (bs->read_only) {
1575 /* BDRV_REQ_NO_SERIALISING is only for read operation */
1576 assert(!(flags & BDRV_REQ_NO_SERIALISING));
1577 assert(!(bs->open_flags & BDRV_O_INACTIVE));
1578 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1579 assert(!(flags & ~BDRV_REQ_MASK));
1581 if (flags & BDRV_REQ_SERIALISING) {
1582 mark_request_serialising(req, bdrv_get_cluster_size(bs));
1585 waited = wait_serialising_requests(req);
1587 assert(!waited || !req->serialising ||
1588 is_request_serialising_and_aligned(req));
1589 assert(req->overlap_offset <= offset);
1590 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1591 assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE);
1593 switch (req->type) {
1594 case BDRV_TRACKED_WRITE:
1595 case BDRV_TRACKED_DISCARD:
1596 if (flags & BDRV_REQ_WRITE_UNCHANGED) {
1597 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1599 assert(child->perm & BLK_PERM_WRITE);
1601 return notifier_with_return_list_notify(&bs->before_write_notifiers,
1603 case BDRV_TRACKED_TRUNCATE:
1604 assert(child->perm & BLK_PERM_RESIZE);
1611 static inline void coroutine_fn
1612 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes,
1613 BdrvTrackedRequest *req, int ret)
1615 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1616 BlockDriverState *bs = child->bs;
1618 atomic_inc(&bs->write_gen);
1621 * Discard cannot extend the image, but in error handling cases, such as
1622 * when reverting a qcow2 cluster allocation, the discarded range can pass
1623 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
1624 * here. Instead, just skip it, since semantically a discard request
1625 * beyond EOF cannot expand the image anyway.
1628 (req->type == BDRV_TRACKED_TRUNCATE ||
1629 end_sector > bs->total_sectors) &&
1630 req->type != BDRV_TRACKED_DISCARD) {
1631 bs->total_sectors = end_sector;
1632 bdrv_parent_cb_resize(bs);
1633 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
1636 switch (req->type) {
1637 case BDRV_TRACKED_WRITE:
1638 stat64_max(&bs->wr_highest_offset, offset + bytes);
1639 /* fall through, to set dirty bits */
1640 case BDRV_TRACKED_DISCARD:
1641 bdrv_set_dirty(bs, offset, bytes);
1650 * Forwards an already correctly aligned write request to the BlockDriver,
1651 * after possibly fragmenting it.
1653 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
1654 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1655 int64_t align, QEMUIOVector *qiov, int flags)
1657 BlockDriverState *bs = child->bs;
1658 BlockDriver *drv = bs->drv;
1661 uint64_t bytes_remaining = bytes;
1668 if (bdrv_has_readonly_bitmaps(bs)) {
1672 assert(is_power_of_2(align));
1673 assert((offset & (align - 1)) == 0);
1674 assert((bytes & (align - 1)) == 0);
1675 assert(!qiov || bytes == qiov->size);
1676 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1679 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
1681 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1682 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
1683 qemu_iovec_is_zero(qiov)) {
1684 flags |= BDRV_REQ_ZERO_WRITE;
1685 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1686 flags |= BDRV_REQ_MAY_UNMAP;
1691 /* Do nothing, write notifier decided to fail this request */
1692 } else if (flags & BDRV_REQ_ZERO_WRITE) {
1693 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1694 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
1695 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
1696 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov);
1697 } else if (bytes <= max_transfer) {
1698 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1699 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
1701 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1702 while (bytes_remaining) {
1703 int num = MIN(bytes_remaining, max_transfer);
1704 QEMUIOVector local_qiov;
1705 int local_flags = flags;
1708 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
1709 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1710 /* If FUA is going to be emulated by flush, we only
1711 * need to flush on the last iteration */
1712 local_flags &= ~BDRV_REQ_FUA;
1714 qemu_iovec_init(&local_qiov, qiov->niov);
1715 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1717 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1718 num, &local_qiov, local_flags);
1719 qemu_iovec_destroy(&local_qiov);
1723 bytes_remaining -= num;
1726 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
1731 bdrv_co_write_req_finish(child, offset, bytes, req, ret);
1736 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
1739 BdrvRequestFlags flags,
1740 BdrvTrackedRequest *req)
1742 BlockDriverState *bs = child->bs;
1743 uint8_t *buf = NULL;
1744 QEMUIOVector local_qiov;
1745 uint64_t align = bs->bl.request_alignment;
1746 unsigned int head_padding_bytes, tail_padding_bytes;
1749 head_padding_bytes = offset & (align - 1);
1750 tail_padding_bytes = (align - (offset + bytes)) & (align - 1);
1753 assert(flags & BDRV_REQ_ZERO_WRITE);
1754 if (head_padding_bytes || tail_padding_bytes) {
1755 buf = qemu_blockalign(bs, align);
1756 qemu_iovec_init_buf(&local_qiov, buf, align);
1758 if (head_padding_bytes) {
1759 uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1761 /* RMW the unaligned part before head. */
1762 mark_request_serialising(req, align);
1763 wait_serialising_requests(req);
1764 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1765 ret = bdrv_aligned_preadv(child, req, offset & ~(align - 1), align,
1766 align, &local_qiov, 0);
1770 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1772 memset(buf + head_padding_bytes, 0, zero_bytes);
1773 ret = bdrv_aligned_pwritev(child, req, offset & ~(align - 1), align,
1775 flags & ~BDRV_REQ_ZERO_WRITE);
1779 offset += zero_bytes;
1780 bytes -= zero_bytes;
1783 assert(!bytes || (offset & (align - 1)) == 0);
1784 if (bytes >= align) {
1785 /* Write the aligned part in the middle. */
1786 uint64_t aligned_bytes = bytes & ~(align - 1);
1787 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
1792 bytes -= aligned_bytes;
1793 offset += aligned_bytes;
1796 assert(!bytes || (offset & (align - 1)) == 0);
1798 assert(align == tail_padding_bytes + bytes);
1799 /* RMW the unaligned part after tail. */
1800 mark_request_serialising(req, align);
1801 wait_serialising_requests(req);
1802 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1803 ret = bdrv_aligned_preadv(child, req, offset, align,
1804 align, &local_qiov, 0);
1808 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1810 memset(buf, 0, bytes);
1811 ret = bdrv_aligned_pwritev(child, req, offset, align, align,
1812 &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1821 * Handle a write request in coroutine context
1823 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
1824 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1825 BdrvRequestFlags flags)
1827 BlockDriverState *bs = child->bs;
1828 BdrvTrackedRequest req;
1829 uint64_t align = bs->bl.request_alignment;
1830 uint8_t *head_buf = NULL;
1831 uint8_t *tail_buf = NULL;
1832 QEMUIOVector local_qiov;
1833 bool use_local_qiov = false;
1836 trace_bdrv_co_pwritev(child->bs, offset, bytes, flags);
1842 ret = bdrv_check_byte_request(bs, offset, bytes);
1847 bdrv_inc_in_flight(bs);
1849 * Align write if necessary by performing a read-modify-write cycle.
1850 * Pad qiov with the read parts and be sure to have a tracked request not
1851 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1853 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
1855 if (flags & BDRV_REQ_ZERO_WRITE) {
1856 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
1860 if (offset & (align - 1)) {
1861 QEMUIOVector head_qiov;
1863 mark_request_serialising(&req, align);
1864 wait_serialising_requests(&req);
1866 head_buf = qemu_blockalign(bs, align);
1867 qemu_iovec_init_buf(&head_qiov, head_buf, align);
1869 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1870 ret = bdrv_aligned_preadv(child, &req, offset & ~(align - 1), align,
1871 align, &head_qiov, 0);
1875 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1877 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1878 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1879 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1880 use_local_qiov = true;
1882 bytes += offset & (align - 1);
1883 offset = offset & ~(align - 1);
1885 /* We have read the tail already if the request is smaller
1886 * than one aligned block.
1888 if (bytes < align) {
1889 qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
1894 if ((offset + bytes) & (align - 1)) {
1895 QEMUIOVector tail_qiov;
1899 mark_request_serialising(&req, align);
1900 waited = wait_serialising_requests(&req);
1901 assert(!waited || !use_local_qiov);
1903 tail_buf = qemu_blockalign(bs, align);
1904 qemu_iovec_init_buf(&tail_qiov, tail_buf, align);
1906 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1907 ret = bdrv_aligned_preadv(child, &req, (offset + bytes) & ~(align - 1),
1908 align, align, &tail_qiov, 0);
1912 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1914 if (!use_local_qiov) {
1915 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1916 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1917 use_local_qiov = true;
1920 tail_bytes = (offset + bytes) & (align - 1);
1921 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1923 bytes = ROUND_UP(bytes, align);
1926 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
1927 use_local_qiov ? &local_qiov : qiov,
1932 if (use_local_qiov) {
1933 qemu_iovec_destroy(&local_qiov);
1935 qemu_vfree(head_buf);
1936 qemu_vfree(tail_buf);
1938 tracked_request_end(&req);
1939 bdrv_dec_in_flight(bs);
1943 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
1944 int bytes, BdrvRequestFlags flags)
1946 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
1948 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
1949 flags &= ~BDRV_REQ_MAY_UNMAP;
1952 return bdrv_co_pwritev(child, offset, bytes, NULL,
1953 BDRV_REQ_ZERO_WRITE | flags);
1957 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1959 int bdrv_flush_all(void)
1961 BdrvNextIterator it;
1962 BlockDriverState *bs = NULL;
1965 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
1966 AioContext *aio_context = bdrv_get_aio_context(bs);
1969 aio_context_acquire(aio_context);
1970 ret = bdrv_flush(bs);
1971 if (ret < 0 && !result) {
1974 aio_context_release(aio_context);
1981 typedef struct BdrvCoBlockStatusData {
1982 BlockDriverState *bs;
1983 BlockDriverState *base;
1989 BlockDriverState **file;
1992 } BdrvCoBlockStatusData;
1994 int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs,
2000 BlockDriverState **file)
2002 assert(bs->file && bs->file->bs);
2005 *file = bs->file->bs;
2006 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2009 int coroutine_fn bdrv_co_block_status_from_backing(BlockDriverState *bs,
2015 BlockDriverState **file)
2017 assert(bs->backing && bs->backing->bs);
2020 *file = bs->backing->bs;
2021 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2025 * Returns the allocation status of the specified sectors.
2026 * Drivers not implementing the functionality are assumed to not support
2027 * backing files, hence all their sectors are reported as allocated.
2029 * If 'want_zero' is true, the caller is querying for mapping
2030 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2031 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2032 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2034 * If 'offset' is beyond the end of the disk image the return value is
2035 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2037 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2038 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2039 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2041 * 'pnum' is set to the number of bytes (including and immediately
2042 * following the specified offset) that are easily known to be in the
2043 * same allocated/unallocated state. Note that a second call starting
2044 * at the original offset plus returned pnum may have the same status.
2045 * The returned value is non-zero on success except at end-of-file.
2047 * Returns negative errno on failure. Otherwise, if the
2048 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2049 * set to the host mapping and BDS corresponding to the guest offset.
2051 static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
2053 int64_t offset, int64_t bytes,
2054 int64_t *pnum, int64_t *map,
2055 BlockDriverState **file)
2058 int64_t n; /* bytes */
2060 int64_t local_map = 0;
2061 BlockDriverState *local_file = NULL;
2062 int64_t aligned_offset, aligned_bytes;
2067 total_size = bdrv_getlength(bs);
2068 if (total_size < 0) {
2073 if (offset >= total_size) {
2074 ret = BDRV_BLOCK_EOF;
2082 n = total_size - offset;
2087 /* Must be non-NULL or bdrv_getlength() would have failed */
2089 if (!bs->drv->bdrv_co_block_status) {
2091 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2092 if (offset + bytes == total_size) {
2093 ret |= BDRV_BLOCK_EOF;
2095 if (bs->drv->protocol_name) {
2096 ret |= BDRV_BLOCK_OFFSET_VALID;
2103 bdrv_inc_in_flight(bs);
2105 /* Round out to request_alignment boundaries */
2106 align = bs->bl.request_alignment;
2107 aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2108 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2110 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2111 aligned_bytes, pnum, &local_map,
2119 * The driver's result must be a non-zero multiple of request_alignment.
2120 * Clamp pnum and adjust map to original request.
2122 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2123 align > offset - aligned_offset);
2124 *pnum -= offset - aligned_offset;
2125 if (*pnum > bytes) {
2128 if (ret & BDRV_BLOCK_OFFSET_VALID) {
2129 local_map += offset - aligned_offset;
2132 if (ret & BDRV_BLOCK_RAW) {
2133 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2134 ret = bdrv_co_block_status(local_file, want_zero, local_map,
2135 *pnum, pnum, &local_map, &local_file);
2139 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2140 ret |= BDRV_BLOCK_ALLOCATED;
2141 } else if (want_zero) {
2142 if (bdrv_unallocated_blocks_are_zero(bs)) {
2143 ret |= BDRV_BLOCK_ZERO;
2144 } else if (bs->backing) {
2145 BlockDriverState *bs2 = bs->backing->bs;
2146 int64_t size2 = bdrv_getlength(bs2);
2148 if (size2 >= 0 && offset >= size2) {
2149 ret |= BDRV_BLOCK_ZERO;
2154 if (want_zero && local_file && local_file != bs &&
2155 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2156 (ret & BDRV_BLOCK_OFFSET_VALID)) {
2160 ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
2161 *pnum, &file_pnum, NULL, NULL);
2163 /* Ignore errors. This is just providing extra information, it
2164 * is useful but not necessary.
2166 if (ret2 & BDRV_BLOCK_EOF &&
2167 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2169 * It is valid for the format block driver to read
2170 * beyond the end of the underlying file's current
2171 * size; such areas read as zero.
2173 ret |= BDRV_BLOCK_ZERO;
2175 /* Limit request to the range reported by the protocol driver */
2177 ret |= (ret2 & BDRV_BLOCK_ZERO);
2183 bdrv_dec_in_flight(bs);
2184 if (ret >= 0 && offset + *pnum == total_size) {
2185 ret |= BDRV_BLOCK_EOF;
2197 static int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
2198 BlockDriverState *base,
2204 BlockDriverState **file)
2206 BlockDriverState *p;
2211 for (p = bs; p != base; p = backing_bs(p)) {
2212 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
2217 if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) {
2219 * Reading beyond the end of the file continues to read
2220 * zeroes, but we can only widen the result to the
2221 * unallocated length we learned from an earlier
2226 if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) {
2229 /* [offset, pnum] unallocated on this layer, which could be only
2230 * the first part of [offset, bytes]. */
2231 bytes = MIN(bytes, *pnum);
2237 /* Coroutine wrapper for bdrv_block_status_above() */
2238 static void coroutine_fn bdrv_block_status_above_co_entry(void *opaque)
2240 BdrvCoBlockStatusData *data = opaque;
2242 data->ret = bdrv_co_block_status_above(data->bs, data->base,
2244 data->offset, data->bytes,
2245 data->pnum, data->map, data->file);
2251 * Synchronous wrapper around bdrv_co_block_status_above().
2253 * See bdrv_co_block_status_above() for details.
2255 static int bdrv_common_block_status_above(BlockDriverState *bs,
2256 BlockDriverState *base,
2257 bool want_zero, int64_t offset,
2258 int64_t bytes, int64_t *pnum,
2260 BlockDriverState **file)
2263 BdrvCoBlockStatusData data = {
2266 .want_zero = want_zero,
2275 if (qemu_in_coroutine()) {
2276 /* Fast-path if already in coroutine context */
2277 bdrv_block_status_above_co_entry(&data);
2279 co = qemu_coroutine_create(bdrv_block_status_above_co_entry, &data);
2280 bdrv_coroutine_enter(bs, co);
2281 BDRV_POLL_WHILE(bs, !data.done);
2286 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
2287 int64_t offset, int64_t bytes, int64_t *pnum,
2288 int64_t *map, BlockDriverState **file)
2290 return bdrv_common_block_status_above(bs, base, true, offset, bytes,
2294 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2295 int64_t *pnum, int64_t *map, BlockDriverState **file)
2297 return bdrv_block_status_above(bs, backing_bs(bs),
2298 offset, bytes, pnum, map, file);
2301 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
2302 int64_t bytes, int64_t *pnum)
2307 ret = bdrv_common_block_status_above(bs, backing_bs(bs), false, offset,
2308 bytes, pnum ? pnum : &dummy, NULL,
2313 return !!(ret & BDRV_BLOCK_ALLOCATED);
2317 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2319 * Return true if (a prefix of) the given range is allocated in any image
2320 * between BASE and TOP (inclusive). BASE can be NULL to check if the given
2321 * offset is allocated in any image of the chain. Return false otherwise,
2322 * or negative errno on failure.
2324 * 'pnum' is set to the number of bytes (including and immediately
2325 * following the specified offset) that are known to be in the same
2326 * allocated/unallocated state. Note that a subsequent call starting
2327 * at 'offset + *pnum' may return the same allocation status (in other
2328 * words, the result is not necessarily the maximum possible range);
2329 * but 'pnum' will only be 0 when end of file is reached.
2332 int bdrv_is_allocated_above(BlockDriverState *top,
2333 BlockDriverState *base,
2334 int64_t offset, int64_t bytes, int64_t *pnum)
2336 BlockDriverState *intermediate;
2341 while (intermediate && intermediate != base) {
2345 ret = bdrv_is_allocated(intermediate, offset, bytes, &pnum_inter);
2354 size_inter = bdrv_getlength(intermediate);
2355 if (size_inter < 0) {
2358 if (n > pnum_inter &&
2359 (intermediate == top || offset + pnum_inter < size_inter)) {
2363 intermediate = backing_bs(intermediate);
2370 typedef struct BdrvVmstateCo {
2371 BlockDriverState *bs;
2378 static int coroutine_fn
2379 bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2382 BlockDriver *drv = bs->drv;
2385 bdrv_inc_in_flight(bs);
2389 } else if (drv->bdrv_load_vmstate) {
2391 ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2393 ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2395 } else if (bs->file) {
2396 ret = bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
2399 bdrv_dec_in_flight(bs);
2403 static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
2405 BdrvVmstateCo *co = opaque;
2406 co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
2411 bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2414 if (qemu_in_coroutine()) {
2415 return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
2417 BdrvVmstateCo data = {
2422 .ret = -EINPROGRESS,
2424 Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
2426 bdrv_coroutine_enter(bs, co);
2427 BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS);
2432 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2433 int64_t pos, int size)
2435 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2438 ret = bdrv_writev_vmstate(bs, &qiov, pos);
2446 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2448 return bdrv_rw_vmstate(bs, qiov, pos, false);
2451 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2452 int64_t pos, int size)
2454 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2457 ret = bdrv_readv_vmstate(bs, &qiov, pos);
2465 int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2467 return bdrv_rw_vmstate(bs, qiov, pos, true);
2470 /**************************************************************/
2473 void bdrv_aio_cancel(BlockAIOCB *acb)
2476 bdrv_aio_cancel_async(acb);
2477 while (acb->refcnt > 1) {
2478 if (acb->aiocb_info->get_aio_context) {
2479 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2480 } else if (acb->bs) {
2481 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2482 * assert that we're not using an I/O thread. Thread-safe
2483 * code should use bdrv_aio_cancel_async exclusively.
2485 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
2486 aio_poll(bdrv_get_aio_context(acb->bs), true);
2491 qemu_aio_unref(acb);
2494 /* Async version of aio cancel. The caller is not blocked if the acb implements
2495 * cancel_async, otherwise we do nothing and let the request normally complete.
2496 * In either case the completion callback must be called. */
2497 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2499 if (acb->aiocb_info->cancel_async) {
2500 acb->aiocb_info->cancel_async(acb);
2504 /**************************************************************/
2505 /* Coroutine block device emulation */
2507 typedef struct FlushCo {
2508 BlockDriverState *bs;
2513 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2515 FlushCo *rwco = opaque;
2517 rwco->ret = bdrv_co_flush(rwco->bs);
2521 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2526 bdrv_inc_in_flight(bs);
2528 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2533 qemu_co_mutex_lock(&bs->reqs_lock);
2534 current_gen = atomic_read(&bs->write_gen);
2536 /* Wait until any previous flushes are completed */
2537 while (bs->active_flush_req) {
2538 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
2541 /* Flushes reach this point in nondecreasing current_gen order. */
2542 bs->active_flush_req = true;
2543 qemu_co_mutex_unlock(&bs->reqs_lock);
2545 /* Write back all layers by calling one driver function */
2546 if (bs->drv->bdrv_co_flush) {
2547 ret = bs->drv->bdrv_co_flush(bs);
2551 /* Write back cached data to the OS even with cache=unsafe */
2552 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2553 if (bs->drv->bdrv_co_flush_to_os) {
2554 ret = bs->drv->bdrv_co_flush_to_os(bs);
2560 /* But don't actually force it to the disk with cache=unsafe */
2561 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2565 /* Check if we really need to flush anything */
2566 if (bs->flushed_gen == current_gen) {
2570 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2572 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2573 * (even in case of apparent success) */
2577 if (bs->drv->bdrv_co_flush_to_disk) {
2578 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2579 } else if (bs->drv->bdrv_aio_flush) {
2581 CoroutineIOCompletion co = {
2582 .coroutine = qemu_coroutine_self(),
2585 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2589 qemu_coroutine_yield();
2594 * Some block drivers always operate in either writethrough or unsafe
2595 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2596 * know how the server works (because the behaviour is hardcoded or
2597 * depends on server-side configuration), so we can't ensure that
2598 * everything is safe on disk. Returning an error doesn't work because
2599 * that would break guests even if the server operates in writethrough
2602 * Let's hope the user knows what he's doing.
2611 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2612 * in the case of cache=unsafe, so there are no useless flushes.
2615 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2617 /* Notify any pending flushes that we have completed */
2619 bs->flushed_gen = current_gen;
2622 qemu_co_mutex_lock(&bs->reqs_lock);
2623 bs->active_flush_req = false;
2624 /* Return value is ignored - it's ok if wait queue is empty */
2625 qemu_co_queue_next(&bs->flush_queue);
2626 qemu_co_mutex_unlock(&bs->reqs_lock);
2629 bdrv_dec_in_flight(bs);
2633 int bdrv_flush(BlockDriverState *bs)
2636 FlushCo flush_co = {
2641 if (qemu_in_coroutine()) {
2642 /* Fast-path if already in coroutine context */
2643 bdrv_flush_co_entry(&flush_co);
2645 co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
2646 bdrv_coroutine_enter(bs, co);
2647 BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
2650 return flush_co.ret;
2653 typedef struct DiscardCo {
2659 static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
2661 DiscardCo *rwco = opaque;
2663 rwco->ret = bdrv_co_pdiscard(rwco->child, rwco->offset, rwco->bytes);
2667 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int bytes)
2669 BdrvTrackedRequest req;
2670 int max_pdiscard, ret;
2671 int head, tail, align;
2672 BlockDriverState *bs = child->bs;
2674 if (!bs || !bs->drv) {
2678 if (bdrv_has_readonly_bitmaps(bs)) {
2682 ret = bdrv_check_byte_request(bs, offset, bytes);
2687 /* Do nothing if disabled. */
2688 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2692 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
2696 /* Discard is advisory, but some devices track and coalesce
2697 * unaligned requests, so we must pass everything down rather than
2698 * round here. Still, most devices will just silently ignore
2699 * unaligned requests (by returning -ENOTSUP), so we must fragment
2700 * the request accordingly. */
2701 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
2702 assert(align % bs->bl.request_alignment == 0);
2703 head = offset % align;
2704 tail = (offset + bytes) % align;
2706 bdrv_inc_in_flight(bs);
2707 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
2709 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
2714 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
2716 assert(max_pdiscard >= bs->bl.request_alignment);
2722 /* Make small requests to get to alignment boundaries. */
2723 num = MIN(bytes, align - head);
2724 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
2725 num %= bs->bl.request_alignment;
2727 head = (head + num) % align;
2728 assert(num < max_pdiscard);
2731 /* Shorten the request to the last aligned cluster. */
2733 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
2734 tail > bs->bl.request_alignment) {
2735 tail %= bs->bl.request_alignment;
2739 /* limit request size */
2740 if (num > max_pdiscard) {
2748 if (bs->drv->bdrv_co_pdiscard) {
2749 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
2752 CoroutineIOCompletion co = {
2753 .coroutine = qemu_coroutine_self(),
2756 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
2757 bdrv_co_io_em_complete, &co);
2762 qemu_coroutine_yield();
2766 if (ret && ret != -ENOTSUP) {
2775 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
2776 tracked_request_end(&req);
2777 bdrv_dec_in_flight(bs);
2781 int bdrv_pdiscard(BdrvChild *child, int64_t offset, int bytes)
2791 if (qemu_in_coroutine()) {
2792 /* Fast-path if already in coroutine context */
2793 bdrv_pdiscard_co_entry(&rwco);
2795 co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
2796 bdrv_coroutine_enter(child->bs, co);
2797 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
2803 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
2805 BlockDriver *drv = bs->drv;
2806 CoroutineIOCompletion co = {
2807 .coroutine = qemu_coroutine_self(),
2811 bdrv_inc_in_flight(bs);
2812 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
2817 if (drv->bdrv_co_ioctl) {
2818 co.ret = drv->bdrv_co_ioctl(bs, req, buf);
2820 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2825 qemu_coroutine_yield();
2828 bdrv_dec_in_flight(bs);
2832 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2834 return qemu_memalign(bdrv_opt_mem_align(bs), size);
2837 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2839 return memset(qemu_blockalign(bs, size), 0, size);
2842 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2844 size_t align = bdrv_opt_mem_align(bs);
2846 /* Ensure that NULL is never returned on success */
2852 return qemu_try_memalign(align, size);
2855 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2857 void *mem = qemu_try_blockalign(bs, size);
2860 memset(mem, 0, size);
2867 * Check if all memory in this vector is sector aligned.
2869 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2872 size_t alignment = bdrv_min_mem_align(bs);
2874 for (i = 0; i < qiov->niov; i++) {
2875 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2878 if (qiov->iov[i].iov_len % alignment) {
2886 void bdrv_add_before_write_notifier(BlockDriverState *bs,
2887 NotifierWithReturn *notifier)
2889 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2892 void bdrv_io_plug(BlockDriverState *bs)
2896 QLIST_FOREACH(child, &bs->children, next) {
2897 bdrv_io_plug(child->bs);
2900 if (atomic_fetch_inc(&bs->io_plugged) == 0) {
2901 BlockDriver *drv = bs->drv;
2902 if (drv && drv->bdrv_io_plug) {
2903 drv->bdrv_io_plug(bs);
2908 void bdrv_io_unplug(BlockDriverState *bs)
2912 assert(bs->io_plugged);
2913 if (atomic_fetch_dec(&bs->io_plugged) == 1) {
2914 BlockDriver *drv = bs->drv;
2915 if (drv && drv->bdrv_io_unplug) {
2916 drv->bdrv_io_unplug(bs);
2920 QLIST_FOREACH(child, &bs->children, next) {
2921 bdrv_io_unplug(child->bs);
2925 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size)
2929 if (bs->drv && bs->drv->bdrv_register_buf) {
2930 bs->drv->bdrv_register_buf(bs, host, size);
2932 QLIST_FOREACH(child, &bs->children, next) {
2933 bdrv_register_buf(child->bs, host, size);
2937 void bdrv_unregister_buf(BlockDriverState *bs, void *host)
2941 if (bs->drv && bs->drv->bdrv_unregister_buf) {
2942 bs->drv->bdrv_unregister_buf(bs, host);
2944 QLIST_FOREACH(child, &bs->children, next) {
2945 bdrv_unregister_buf(child->bs, host);
2949 static int coroutine_fn bdrv_co_copy_range_internal(
2950 BdrvChild *src, uint64_t src_offset, BdrvChild *dst,
2951 uint64_t dst_offset, uint64_t bytes,
2952 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
2955 BdrvTrackedRequest req;
2958 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
2959 assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
2960 assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
2962 if (!dst || !dst->bs) {
2965 ret = bdrv_check_byte_request(dst->bs, dst_offset, bytes);
2969 if (write_flags & BDRV_REQ_ZERO_WRITE) {
2970 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
2973 if (!src || !src->bs) {
2976 ret = bdrv_check_byte_request(src->bs, src_offset, bytes);
2981 if (!src->bs->drv->bdrv_co_copy_range_from
2982 || !dst->bs->drv->bdrv_co_copy_range_to
2983 || src->bs->encrypted || dst->bs->encrypted) {
2988 bdrv_inc_in_flight(src->bs);
2989 tracked_request_begin(&req, src->bs, src_offset, bytes,
2992 /* BDRV_REQ_SERIALISING is only for write operation */
2993 assert(!(read_flags & BDRV_REQ_SERIALISING));
2994 if (!(read_flags & BDRV_REQ_NO_SERIALISING)) {
2995 wait_serialising_requests(&req);
2998 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3002 read_flags, write_flags);
3004 tracked_request_end(&req);
3005 bdrv_dec_in_flight(src->bs);
3007 bdrv_inc_in_flight(dst->bs);
3008 tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3009 BDRV_TRACKED_WRITE);
3010 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3013 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3017 read_flags, write_flags);
3019 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3020 tracked_request_end(&req);
3021 bdrv_dec_in_flight(dst->bs);
3027 /* Copy range from @src to @dst.
3029 * See the comment of bdrv_co_copy_range for the parameter and return value
3031 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset,
3032 BdrvChild *dst, uint64_t dst_offset,
3034 BdrvRequestFlags read_flags,
3035 BdrvRequestFlags write_flags)
3037 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3038 read_flags, write_flags);
3039 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3040 bytes, read_flags, write_flags, true);
3043 /* Copy range from @src to @dst.
3045 * See the comment of bdrv_co_copy_range for the parameter and return value
3047 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset,
3048 BdrvChild *dst, uint64_t dst_offset,
3050 BdrvRequestFlags read_flags,
3051 BdrvRequestFlags write_flags)
3053 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3054 read_flags, write_flags);
3055 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3056 bytes, read_flags, write_flags, false);
3059 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset,
3060 BdrvChild *dst, uint64_t dst_offset,
3061 uint64_t bytes, BdrvRequestFlags read_flags,
3062 BdrvRequestFlags write_flags)
3064 return bdrv_co_copy_range_from(src, src_offset,
3066 bytes, read_flags, write_flags);
3069 static void bdrv_parent_cb_resize(BlockDriverState *bs)
3072 QLIST_FOREACH(c, &bs->parents, next_parent) {
3073 if (c->role->resize) {
3080 * Truncate file to 'offset' bytes (needed only for file protocols)
3082 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset,
3083 PreallocMode prealloc, Error **errp)
3085 BlockDriverState *bs = child->bs;
3086 BlockDriver *drv = bs->drv;
3087 BdrvTrackedRequest req;
3088 int64_t old_size, new_bytes;
3092 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3094 error_setg(errp, "No medium inserted");
3098 error_setg(errp, "Image size cannot be negative");
3102 old_size = bdrv_getlength(bs);
3104 error_setg_errno(errp, -old_size, "Failed to get old image size");
3108 if (offset > old_size) {
3109 new_bytes = offset - old_size;
3114 bdrv_inc_in_flight(bs);
3115 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3116 BDRV_TRACKED_TRUNCATE);
3118 /* If we are growing the image and potentially using preallocation for the
3119 * new area, we need to make sure that no write requests are made to it
3120 * concurrently or they might be overwritten by preallocation. */
3122 mark_request_serialising(&req, 1);
3124 if (bs->read_only) {
3125 error_setg(errp, "Image is read-only");
3129 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3132 error_setg_errno(errp, -ret,
3133 "Failed to prepare request for truncation");
3137 if (!drv->bdrv_co_truncate) {
3138 if (bs->file && drv->is_filter) {
3139 ret = bdrv_co_truncate(bs->file, offset, prealloc, errp);
3142 error_setg(errp, "Image format driver does not support resize");
3147 ret = drv->bdrv_co_truncate(bs, offset, prealloc, errp);
3151 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3153 error_setg_errno(errp, -ret, "Could not refresh total sector count");
3155 offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3157 /* It's possible that truncation succeeded but refresh_total_sectors
3158 * failed, but the latter doesn't affect how we should finish the request.
3159 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3160 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
3163 tracked_request_end(&req);
3164 bdrv_dec_in_flight(bs);
3169 typedef struct TruncateCo {
3172 PreallocMode prealloc;
3177 static void coroutine_fn bdrv_truncate_co_entry(void *opaque)
3179 TruncateCo *tco = opaque;
3180 tco->ret = bdrv_co_truncate(tco->child, tco->offset, tco->prealloc,
3185 int bdrv_truncate(BdrvChild *child, int64_t offset, PreallocMode prealloc,
3192 .prealloc = prealloc,
3197 if (qemu_in_coroutine()) {
3198 /* Fast-path if already in coroutine context */
3199 bdrv_truncate_co_entry(&tco);
3201 co = qemu_coroutine_create(bdrv_truncate_co_entry, &tco);
3202 bdrv_coroutine_enter(child->bs, co);
3203 BDRV_POLL_WHILE(child->bs, tco.ret == NOT_DONE);