2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "qemu/cutils.h"
33 #include "qapi/error.h"
34 #include "qemu/error-report.h"
36 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
38 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
39 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
41 static void bdrv_parent_cb_resize(BlockDriverState *bs);
42 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
43 int64_t offset, int bytes, BdrvRequestFlags flags);
45 void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
46 bool ignore_bds_parents)
50 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
51 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
54 bdrv_parent_drained_begin_single(c, false);
58 void bdrv_parent_drained_end_single(BdrvChild *c)
60 assert(c->parent_quiesce_counter > 0);
61 c->parent_quiesce_counter--;
62 if (c->role->drained_end) {
63 c->role->drained_end(c);
67 void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
68 bool ignore_bds_parents)
72 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
73 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
76 bdrv_parent_drained_end_single(c);
80 static bool bdrv_parent_drained_poll_single(BdrvChild *c)
82 if (c->role->drained_poll) {
83 return c->role->drained_poll(c);
88 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
89 bool ignore_bds_parents)
94 QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
95 if (c == ignore || (ignore_bds_parents && c->role->parent_is_bds)) {
98 busy |= bdrv_parent_drained_poll_single(c);
104 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
106 c->parent_quiesce_counter++;
107 if (c->role->drained_begin) {
108 c->role->drained_begin(c);
111 BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c));
115 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
117 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
118 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
119 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
120 src->opt_mem_alignment);
121 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
122 src->min_mem_alignment);
123 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
126 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
128 BlockDriver *drv = bs->drv;
129 Error *local_err = NULL;
131 memset(&bs->bl, 0, sizeof(bs->bl));
137 /* Default alignment based on whether driver has byte interface */
138 bs->bl.request_alignment = (drv->bdrv_co_preadv ||
139 drv->bdrv_aio_preadv) ? 1 : 512;
141 /* Take some limits from the children as a default */
143 bdrv_refresh_limits(bs->file->bs, &local_err);
145 error_propagate(errp, local_err);
148 bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
150 bs->bl.min_mem_alignment = 512;
151 bs->bl.opt_mem_alignment = getpagesize();
153 /* Safe default since most protocols use readv()/writev()/etc */
154 bs->bl.max_iov = IOV_MAX;
158 bdrv_refresh_limits(bs->backing->bs, &local_err);
160 error_propagate(errp, local_err);
163 bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
166 /* Then let the driver override it */
167 if (drv->bdrv_refresh_limits) {
168 drv->bdrv_refresh_limits(bs, errp);
173 * The copy-on-read flag is actually a reference count so multiple users may
174 * use the feature without worrying about clobbering its previous state.
175 * Copy-on-read stays enabled until all users have called to disable it.
177 void bdrv_enable_copy_on_read(BlockDriverState *bs)
179 atomic_inc(&bs->copy_on_read);
182 void bdrv_disable_copy_on_read(BlockDriverState *bs)
184 int old = atomic_fetch_dec(&bs->copy_on_read);
190 BlockDriverState *bs;
196 bool ignore_bds_parents;
197 int *drained_end_counter;
200 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
202 BdrvCoDrainData *data = opaque;
203 BlockDriverState *bs = data->bs;
206 bs->drv->bdrv_co_drain_begin(bs);
208 bs->drv->bdrv_co_drain_end(bs);
211 /* Set data->done before reading bs->wakeup. */
212 atomic_mb_set(&data->done, true);
213 bdrv_dec_in_flight(bs);
215 if (data->drained_end_counter) {
216 atomic_dec(data->drained_end_counter);
219 if (data->begin || data->drained_end_counter) {
224 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
225 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
226 int *drained_end_counter)
228 BdrvCoDrainData *data;
230 if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
231 (!begin && !bs->drv->bdrv_co_drain_end)) {
235 data = g_new(BdrvCoDrainData, 1);
236 *data = (BdrvCoDrainData) {
240 .drained_end_counter = drained_end_counter,
243 if (!begin && drained_end_counter) {
244 atomic_inc(drained_end_counter);
247 /* Make sure the driver callback completes during the polling phase for
249 bdrv_inc_in_flight(bs);
250 data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
251 aio_co_schedule(bdrv_get_aio_context(bs), data->co);
254 * TODO: Drop this and make callers pass @drained_end_counter and poll
257 if (!begin && !drained_end_counter) {
258 BDRV_POLL_WHILE(bs, !data->done);
263 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
264 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
265 BdrvChild *ignore_parent, bool ignore_bds_parents)
267 BdrvChild *child, *next;
269 if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
273 if (atomic_read(&bs->in_flight)) {
278 assert(!ignore_bds_parents);
279 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
280 if (bdrv_drain_poll(child->bs, recursive, child, false)) {
289 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
290 BdrvChild *ignore_parent)
292 return bdrv_drain_poll(bs, recursive, ignore_parent, false);
295 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
296 BdrvChild *parent, bool ignore_bds_parents,
298 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
299 BdrvChild *parent, bool ignore_bds_parents,
300 int *drained_end_counter);
302 static void bdrv_co_drain_bh_cb(void *opaque)
304 BdrvCoDrainData *data = opaque;
305 Coroutine *co = data->co;
306 BlockDriverState *bs = data->bs;
309 AioContext *ctx = bdrv_get_aio_context(bs);
310 AioContext *co_ctx = qemu_coroutine_get_aio_context(co);
313 * When the coroutine yielded, the lock for its home context was
314 * released, so we need to re-acquire it here. If it explicitly
315 * acquired a different context, the lock is still held and we don't
316 * want to lock it a second time (or AIO_WAIT_WHILE() would hang).
319 aio_context_acquire(ctx);
321 bdrv_dec_in_flight(bs);
323 bdrv_do_drained_begin(bs, data->recursive, data->parent,
324 data->ignore_bds_parents, data->poll);
326 bdrv_do_drained_end(bs, data->recursive, data->parent,
327 data->ignore_bds_parents,
328 data->drained_end_counter);
331 aio_context_release(ctx);
335 bdrv_drain_all_begin();
342 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
343 bool begin, bool recursive,
345 bool ignore_bds_parents,
347 int *drained_end_counter)
349 BdrvCoDrainData data;
351 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
352 * other coroutines run if they were queued by aio_co_enter(). */
354 assert(qemu_in_coroutine());
355 data = (BdrvCoDrainData) {
356 .co = qemu_coroutine_self(),
360 .recursive = recursive,
362 .ignore_bds_parents = ignore_bds_parents,
364 .drained_end_counter = drained_end_counter,
368 bdrv_inc_in_flight(bs);
370 aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
371 bdrv_co_drain_bh_cb, &data);
373 qemu_coroutine_yield();
374 /* If we are resumed from some other event (such as an aio completion or a
375 * timer callback), it is a bug in the caller that should be fixed. */
379 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
380 BdrvChild *parent, bool ignore_bds_parents)
382 assert(!qemu_in_coroutine());
384 /* Stop things in parent-to-child order */
385 if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
386 aio_disable_external(bdrv_get_aio_context(bs));
389 bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
390 bdrv_drain_invoke(bs, true, NULL);
393 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
394 BdrvChild *parent, bool ignore_bds_parents,
397 BdrvChild *child, *next;
399 if (qemu_in_coroutine()) {
400 bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
405 bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents);
408 assert(!ignore_bds_parents);
409 bs->recursive_quiesce_counter++;
410 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
411 bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents,
417 * Wait for drained requests to finish.
419 * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
420 * call is needed so things in this AioContext can make progress even
421 * though we don't return to the main AioContext loop - this automatically
422 * includes other nodes in the same AioContext and therefore all child
426 assert(!ignore_bds_parents);
427 BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent));
431 void bdrv_drained_begin(BlockDriverState *bs)
433 bdrv_do_drained_begin(bs, false, NULL, false, true);
436 void bdrv_subtree_drained_begin(BlockDriverState *bs)
438 bdrv_do_drained_begin(bs, true, NULL, false, true);
441 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
442 BdrvChild *parent, bool ignore_bds_parents,
443 int *drained_end_counter)
445 BdrvChild *child, *next;
446 int old_quiesce_counter;
448 if (qemu_in_coroutine()) {
449 bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
450 false, drained_end_counter);
453 assert(bs->quiesce_counter > 0);
455 /* Re-enable things in child-to-parent order */
456 bdrv_drain_invoke(bs, false, drained_end_counter);
457 bdrv_parent_drained_end(bs, parent, ignore_bds_parents);
459 old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter);
460 if (old_quiesce_counter == 1) {
461 aio_enable_external(bdrv_get_aio_context(bs));
465 assert(!ignore_bds_parents);
466 bs->recursive_quiesce_counter--;
467 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
468 bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents,
469 drained_end_counter);
474 void bdrv_drained_end(BlockDriverState *bs)
476 bdrv_do_drained_end(bs, false, NULL, false, NULL);
479 void bdrv_subtree_drained_end(BlockDriverState *bs)
481 bdrv_do_drained_end(bs, true, NULL, false, NULL);
484 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
488 for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
489 bdrv_do_drained_begin(child->bs, true, child, false, true);
493 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
497 for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
498 bdrv_do_drained_end(child->bs, true, child, false, NULL);
503 * Wait for pending requests to complete on a single BlockDriverState subtree,
504 * and suspend block driver's internal I/O until next request arrives.
506 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
509 void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
511 assert(qemu_in_coroutine());
512 bdrv_drained_begin(bs);
513 bdrv_drained_end(bs);
516 void bdrv_drain(BlockDriverState *bs)
518 bdrv_drained_begin(bs);
519 bdrv_drained_end(bs);
522 static void bdrv_drain_assert_idle(BlockDriverState *bs)
524 BdrvChild *child, *next;
526 assert(atomic_read(&bs->in_flight) == 0);
527 QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
528 bdrv_drain_assert_idle(child->bs);
532 unsigned int bdrv_drain_all_count = 0;
534 static bool bdrv_drain_all_poll(void)
536 BlockDriverState *bs = NULL;
539 /* bdrv_drain_poll() can't make changes to the graph and we are holding the
540 * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
541 while ((bs = bdrv_next_all_states(bs))) {
542 AioContext *aio_context = bdrv_get_aio_context(bs);
543 aio_context_acquire(aio_context);
544 result |= bdrv_drain_poll(bs, false, NULL, true);
545 aio_context_release(aio_context);
552 * Wait for pending requests to complete across all BlockDriverStates
554 * This function does not flush data to disk, use bdrv_flush_all() for that
555 * after calling this function.
557 * This pauses all block jobs and disables external clients. It must
558 * be paired with bdrv_drain_all_end().
560 * NOTE: no new block jobs or BlockDriverStates can be created between
561 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
563 void bdrv_drain_all_begin(void)
565 BlockDriverState *bs = NULL;
567 if (qemu_in_coroutine()) {
568 bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
572 /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
573 * loop AioContext, so make sure we're in the main context. */
574 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
575 assert(bdrv_drain_all_count < INT_MAX);
576 bdrv_drain_all_count++;
578 /* Quiesce all nodes, without polling in-flight requests yet. The graph
579 * cannot change during this loop. */
580 while ((bs = bdrv_next_all_states(bs))) {
581 AioContext *aio_context = bdrv_get_aio_context(bs);
583 aio_context_acquire(aio_context);
584 bdrv_do_drained_begin(bs, false, NULL, true, false);
585 aio_context_release(aio_context);
588 /* Now poll the in-flight requests */
589 AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
591 while ((bs = bdrv_next_all_states(bs))) {
592 bdrv_drain_assert_idle(bs);
596 void bdrv_drain_all_end(void)
598 BlockDriverState *bs = NULL;
600 while ((bs = bdrv_next_all_states(bs))) {
601 AioContext *aio_context = bdrv_get_aio_context(bs);
603 aio_context_acquire(aio_context);
604 bdrv_do_drained_end(bs, false, NULL, true, NULL);
605 aio_context_release(aio_context);
608 assert(bdrv_drain_all_count > 0);
609 bdrv_drain_all_count--;
612 void bdrv_drain_all(void)
614 bdrv_drain_all_begin();
615 bdrv_drain_all_end();
619 * Remove an active request from the tracked requests list
621 * This function should be called when a tracked request is completing.
623 static void tracked_request_end(BdrvTrackedRequest *req)
625 if (req->serialising) {
626 atomic_dec(&req->bs->serialising_in_flight);
629 qemu_co_mutex_lock(&req->bs->reqs_lock);
630 QLIST_REMOVE(req, list);
631 qemu_co_queue_restart_all(&req->wait_queue);
632 qemu_co_mutex_unlock(&req->bs->reqs_lock);
636 * Add an active request to the tracked requests list
638 static void tracked_request_begin(BdrvTrackedRequest *req,
639 BlockDriverState *bs,
642 enum BdrvTrackedRequestType type)
644 assert(bytes <= INT64_MAX && offset <= INT64_MAX - bytes);
646 *req = (BdrvTrackedRequest){
651 .co = qemu_coroutine_self(),
652 .serialising = false,
653 .overlap_offset = offset,
654 .overlap_bytes = bytes,
657 qemu_co_queue_init(&req->wait_queue);
659 qemu_co_mutex_lock(&bs->reqs_lock);
660 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
661 qemu_co_mutex_unlock(&bs->reqs_lock);
664 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
666 int64_t overlap_offset = req->offset & ~(align - 1);
667 uint64_t overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
670 if (!req->serialising) {
671 atomic_inc(&req->bs->serialising_in_flight);
672 req->serialising = true;
675 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
676 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
679 static bool is_request_serialising_and_aligned(BdrvTrackedRequest *req)
682 * If the request is serialising, overlap_offset and overlap_bytes are set,
683 * so we can check if the request is aligned. Otherwise, don't care and
687 return req->serialising && (req->offset == req->overlap_offset) &&
688 (req->bytes == req->overlap_bytes);
692 * Round a region to cluster boundaries
694 void bdrv_round_to_clusters(BlockDriverState *bs,
695 int64_t offset, int64_t bytes,
696 int64_t *cluster_offset,
697 int64_t *cluster_bytes)
701 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
702 *cluster_offset = offset;
703 *cluster_bytes = bytes;
705 int64_t c = bdi.cluster_size;
706 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
707 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
711 static int bdrv_get_cluster_size(BlockDriverState *bs)
716 ret = bdrv_get_info(bs, &bdi);
717 if (ret < 0 || bdi.cluster_size == 0) {
718 return bs->bl.request_alignment;
720 return bdi.cluster_size;
724 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
725 int64_t offset, uint64_t bytes)
728 if (offset >= req->overlap_offset + req->overlap_bytes) {
732 if (req->overlap_offset >= offset + bytes) {
738 void bdrv_inc_in_flight(BlockDriverState *bs)
740 atomic_inc(&bs->in_flight);
743 void bdrv_wakeup(BlockDriverState *bs)
748 void bdrv_dec_in_flight(BlockDriverState *bs)
750 atomic_dec(&bs->in_flight);
754 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
756 BlockDriverState *bs = self->bs;
757 BdrvTrackedRequest *req;
761 if (!atomic_read(&bs->serialising_in_flight)) {
767 qemu_co_mutex_lock(&bs->reqs_lock);
768 QLIST_FOREACH(req, &bs->tracked_requests, list) {
769 if (req == self || (!req->serialising && !self->serialising)) {
772 if (tracked_request_overlaps(req, self->overlap_offset,
773 self->overlap_bytes))
775 /* Hitting this means there was a reentrant request, for
776 * example, a block driver issuing nested requests. This must
777 * never happen since it means deadlock.
779 assert(qemu_coroutine_self() != req->co);
781 /* If the request is already (indirectly) waiting for us, or
782 * will wait for us as soon as it wakes up, then just go on
783 * (instead of producing a deadlock in the former case). */
784 if (!req->waiting_for) {
785 self->waiting_for = req;
786 qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
787 self->waiting_for = NULL;
794 qemu_co_mutex_unlock(&bs->reqs_lock);
800 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
803 if (size > BDRV_REQUEST_MAX_BYTES) {
807 if (!bdrv_is_inserted(bs)) {
818 typedef struct RwCo {
824 BdrvRequestFlags flags;
827 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
831 if (!rwco->is_write) {
832 rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
833 rwco->qiov->size, rwco->qiov,
836 rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
837 rwco->qiov->size, rwco->qiov,
844 * Process a vectored synchronous request using coroutines
846 static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
847 QEMUIOVector *qiov, bool is_write,
848 BdrvRequestFlags flags)
855 .is_write = is_write,
860 if (qemu_in_coroutine()) {
861 /* Fast-path if already in coroutine context */
862 bdrv_rw_co_entry(&rwco);
864 co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
865 bdrv_coroutine_enter(child->bs, co);
866 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
871 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
872 int bytes, BdrvRequestFlags flags)
874 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
876 return bdrv_prwv_co(child, offset, &qiov, true,
877 BDRV_REQ_ZERO_WRITE | flags);
881 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
882 * The operation is sped up by checking the block status and only writing
883 * zeroes to the device if they currently do not return zeroes. Optional
884 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
887 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
889 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
892 int64_t target_size, bytes, offset = 0;
893 BlockDriverState *bs = child->bs;
895 target_size = bdrv_getlength(bs);
896 if (target_size < 0) {
901 bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
905 ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
909 if (ret & BDRV_BLOCK_ZERO) {
913 ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
921 int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
925 ret = bdrv_prwv_co(child, offset, qiov, false, 0);
933 /* See bdrv_pwrite() for the return codes */
934 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
936 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
942 return bdrv_preadv(child, offset, &qiov);
945 int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
949 ret = bdrv_prwv_co(child, offset, qiov, true, 0);
957 /* Return no. of bytes on success or < 0 on error. Important errors are:
958 -EIO generic I/O error (may happen for all errors)
959 -ENOMEDIUM No media inserted.
960 -EINVAL Invalid offset or number of bytes
961 -EACCES Trying to write a read-only device
963 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
965 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
971 return bdrv_pwritev(child, offset, &qiov);
975 * Writes to the file and ensures that no writes are reordered across this
976 * request (acts as a barrier)
978 * Returns 0 on success, -errno in error cases.
980 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
981 const void *buf, int count)
985 ret = bdrv_pwrite(child, offset, buf, count);
990 ret = bdrv_flush(child->bs);
998 typedef struct CoroutineIOCompletion {
999 Coroutine *coroutine;
1001 } CoroutineIOCompletion;
1003 static void bdrv_co_io_em_complete(void *opaque, int ret)
1005 CoroutineIOCompletion *co = opaque;
1008 aio_co_wake(co->coroutine);
1011 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
1012 uint64_t offset, uint64_t bytes,
1013 QEMUIOVector *qiov, int flags)
1015 BlockDriver *drv = bs->drv;
1017 unsigned int nb_sectors;
1019 assert(!(flags & ~BDRV_REQ_MASK));
1020 assert(!(flags & BDRV_REQ_NO_FALLBACK));
1026 if (drv->bdrv_co_preadv) {
1027 return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1030 if (drv->bdrv_aio_preadv) {
1032 CoroutineIOCompletion co = {
1033 .coroutine = qemu_coroutine_self(),
1036 acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1037 bdrv_co_io_em_complete, &co);
1041 qemu_coroutine_yield();
1046 sector_num = offset >> BDRV_SECTOR_BITS;
1047 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1049 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1050 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1051 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1052 assert(drv->bdrv_co_readv);
1054 return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1057 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
1058 uint64_t offset, uint64_t bytes,
1059 QEMUIOVector *qiov, int flags)
1061 BlockDriver *drv = bs->drv;
1063 unsigned int nb_sectors;
1066 assert(!(flags & ~BDRV_REQ_MASK));
1067 assert(!(flags & BDRV_REQ_NO_FALLBACK));
1073 if (drv->bdrv_co_pwritev) {
1074 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
1075 flags & bs->supported_write_flags);
1076 flags &= ~bs->supported_write_flags;
1080 if (drv->bdrv_aio_pwritev) {
1082 CoroutineIOCompletion co = {
1083 .coroutine = qemu_coroutine_self(),
1086 acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov,
1087 flags & bs->supported_write_flags,
1088 bdrv_co_io_em_complete, &co);
1089 flags &= ~bs->supported_write_flags;
1093 qemu_coroutine_yield();
1099 sector_num = offset >> BDRV_SECTOR_BITS;
1100 nb_sectors = bytes >> BDRV_SECTOR_BITS;
1102 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1103 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1104 assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1106 assert(drv->bdrv_co_writev);
1107 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov,
1108 flags & bs->supported_write_flags);
1109 flags &= ~bs->supported_write_flags;
1112 if (ret == 0 && (flags & BDRV_REQ_FUA)) {
1113 ret = bdrv_co_flush(bs);
1119 static int coroutine_fn
1120 bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
1121 uint64_t bytes, QEMUIOVector *qiov)
1123 BlockDriver *drv = bs->drv;
1129 if (!drv->bdrv_co_pwritev_compressed) {
1133 return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1136 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
1137 int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
1139 BlockDriverState *bs = child->bs;
1141 /* Perform I/O through a temporary buffer so that users who scribble over
1142 * their read buffer while the operation is in progress do not end up
1143 * modifying the image file. This is critical for zero-copy guest I/O
1144 * where anything might happen inside guest memory.
1146 void *bounce_buffer;
1148 BlockDriver *drv = bs->drv;
1149 QEMUIOVector local_qiov;
1150 int64_t cluster_offset;
1151 int64_t cluster_bytes;
1154 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1155 BDRV_REQUEST_MAX_BYTES);
1156 unsigned int progress = 0;
1162 /* FIXME We cannot require callers to have write permissions when all they
1163 * are doing is a read request. If we did things right, write permissions
1164 * would be obtained anyway, but internally by the copy-on-read code. As
1165 * long as it is implemented here rather than in a separate filter driver,
1166 * the copy-on-read code doesn't have its own BdrvChild, however, for which
1167 * it could request permissions. Therefore we have to bypass the permission
1168 * system for the moment. */
1169 // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1171 /* Cover entire cluster so no additional backing file I/O is required when
1172 * allocating cluster in the image file. Note that this value may exceed
1173 * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1174 * is one reason we loop rather than doing it all at once.
1176 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
1177 skip_bytes = offset - cluster_offset;
1179 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1180 cluster_offset, cluster_bytes);
1182 bounce_buffer = qemu_try_blockalign(bs,
1183 MIN(MIN(max_transfer, cluster_bytes),
1184 MAX_BOUNCE_BUFFER));
1185 if (bounce_buffer == NULL) {
1190 while (cluster_bytes) {
1193 ret = bdrv_is_allocated(bs, cluster_offset,
1194 MIN(cluster_bytes, max_transfer), &pnum);
1196 /* Safe to treat errors in querying allocation as if
1197 * unallocated; we'll probably fail again soon on the
1198 * read, but at least that will set a decent errno.
1200 pnum = MIN(cluster_bytes, max_transfer);
1203 /* Stop at EOF if the image ends in the middle of the cluster */
1204 if (ret == 0 && pnum == 0) {
1205 assert(progress >= bytes);
1209 assert(skip_bytes < pnum);
1212 /* Must copy-on-read; use the bounce buffer */
1213 pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1214 qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1216 ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1222 bdrv_debug_event(bs, BLKDBG_COR_WRITE);
1223 if (drv->bdrv_co_pwrite_zeroes &&
1224 buffer_is_zero(bounce_buffer, pnum)) {
1225 /* FIXME: Should we (perhaps conditionally) be setting
1226 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1227 * that still correctly reads as zero? */
1228 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
1229 BDRV_REQ_WRITE_UNCHANGED);
1231 /* This does not change the data on the disk, it is not
1232 * necessary to flush even in cache=writethrough mode.
1234 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
1236 BDRV_REQ_WRITE_UNCHANGED);
1240 /* It might be okay to ignore write errors for guest
1241 * requests. If this is a deliberate copy-on-read
1242 * then we don't want to ignore the error. Simply
1243 * report it in all cases.
1248 qemu_iovec_from_buf(qiov, progress, bounce_buffer + skip_bytes,
1251 /* Read directly into the destination */
1252 qemu_iovec_init(&local_qiov, qiov->niov);
1253 qemu_iovec_concat(&local_qiov, qiov, progress, pnum - skip_bytes);
1254 ret = bdrv_driver_preadv(bs, offset + progress, local_qiov.size,
1256 qemu_iovec_destroy(&local_qiov);
1262 cluster_offset += pnum;
1263 cluster_bytes -= pnum;
1264 progress += pnum - skip_bytes;
1270 qemu_vfree(bounce_buffer);
1275 * Forwards an already correctly aligned request to the BlockDriver. This
1276 * handles copy on read, zeroing after EOF, and fragmentation of large
1277 * reads; any other features must be implemented by the caller.
1279 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
1280 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1281 int64_t align, QEMUIOVector *qiov, int flags)
1283 BlockDriverState *bs = child->bs;
1284 int64_t total_bytes, max_bytes;
1286 uint64_t bytes_remaining = bytes;
1289 assert(is_power_of_2(align));
1290 assert((offset & (align - 1)) == 0);
1291 assert((bytes & (align - 1)) == 0);
1292 assert(!qiov || bytes == qiov->size);
1293 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1294 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1297 /* TODO: We would need a per-BDS .supported_read_flags and
1298 * potential fallback support, if we ever implement any read flags
1299 * to pass through to drivers. For now, there aren't any
1300 * passthrough flags. */
1301 assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ)));
1303 /* Handle Copy on Read and associated serialisation */
1304 if (flags & BDRV_REQ_COPY_ON_READ) {
1305 /* If we touch the same cluster it counts as an overlap. This
1306 * guarantees that allocating writes will be serialized and not race
1307 * with each other for the same cluster. For example, in copy-on-read
1308 * it ensures that the CoR read and write operations are atomic and
1309 * guest writes cannot interleave between them. */
1310 mark_request_serialising(req, bdrv_get_cluster_size(bs));
1313 /* BDRV_REQ_SERIALISING is only for write operation */
1314 assert(!(flags & BDRV_REQ_SERIALISING));
1316 if (!(flags & BDRV_REQ_NO_SERIALISING)) {
1317 wait_serialising_requests(req);
1320 if (flags & BDRV_REQ_COPY_ON_READ) {
1323 ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
1328 if (!ret || pnum != bytes) {
1329 ret = bdrv_co_do_copy_on_readv(child, offset, bytes, qiov);
1334 /* Forward the request to the BlockDriver, possibly fragmenting it */
1335 total_bytes = bdrv_getlength(bs);
1336 if (total_bytes < 0) {
1341 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1342 if (bytes <= max_bytes && bytes <= max_transfer) {
1343 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
1347 while (bytes_remaining) {
1351 QEMUIOVector local_qiov;
1353 num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1355 qemu_iovec_init(&local_qiov, qiov->niov);
1356 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1358 ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1359 num, &local_qiov, 0);
1361 qemu_iovec_destroy(&local_qiov);
1363 num = bytes_remaining;
1364 ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
1370 bytes_remaining -= num;
1374 return ret < 0 ? ret : 0;
1378 * Handle a read request in coroutine context
1380 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1381 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1382 BdrvRequestFlags flags)
1384 BlockDriverState *bs = child->bs;
1385 BlockDriver *drv = bs->drv;
1386 BdrvTrackedRequest req;
1388 uint64_t align = bs->bl.request_alignment;
1389 uint8_t *head_buf = NULL;
1390 uint8_t *tail_buf = NULL;
1391 QEMUIOVector local_qiov;
1392 bool use_local_qiov = false;
1395 trace_bdrv_co_preadv(child->bs, offset, bytes, flags);
1401 ret = bdrv_check_byte_request(bs, offset, bytes);
1406 bdrv_inc_in_flight(bs);
1408 /* Don't do copy-on-read if we read data before write operation */
1409 if (atomic_read(&bs->copy_on_read) && !(flags & BDRV_REQ_NO_SERIALISING)) {
1410 flags |= BDRV_REQ_COPY_ON_READ;
1413 /* Align read if necessary by padding qiov */
1414 if (offset & (align - 1)) {
1415 head_buf = qemu_blockalign(bs, align);
1416 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1417 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1418 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1419 use_local_qiov = true;
1421 bytes += offset & (align - 1);
1422 offset = offset & ~(align - 1);
1425 if ((offset + bytes) & (align - 1)) {
1426 if (!use_local_qiov) {
1427 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1428 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1429 use_local_qiov = true;
1431 tail_buf = qemu_blockalign(bs, align);
1432 qemu_iovec_add(&local_qiov, tail_buf,
1433 align - ((offset + bytes) & (align - 1)));
1435 bytes = ROUND_UP(bytes, align);
1438 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1439 ret = bdrv_aligned_preadv(child, &req, offset, bytes, align,
1440 use_local_qiov ? &local_qiov : qiov,
1442 tracked_request_end(&req);
1443 bdrv_dec_in_flight(bs);
1445 if (use_local_qiov) {
1446 qemu_iovec_destroy(&local_qiov);
1447 qemu_vfree(head_buf);
1448 qemu_vfree(tail_buf);
1454 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1455 int64_t offset, int bytes, BdrvRequestFlags flags)
1457 BlockDriver *drv = bs->drv;
1461 bool need_flush = false;
1465 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
1466 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1467 bs->bl.request_alignment);
1468 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1474 if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1478 assert(alignment % bs->bl.request_alignment == 0);
1479 head = offset % alignment;
1480 tail = (offset + bytes) % alignment;
1481 max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1482 assert(max_write_zeroes >= bs->bl.request_alignment);
1484 while (bytes > 0 && !ret) {
1487 /* Align request. Block drivers can expect the "bulk" of the request
1488 * to be aligned, and that unaligned requests do not cross cluster
1492 /* Make a small request up to the first aligned sector. For
1493 * convenience, limit this request to max_transfer even if
1494 * we don't need to fall back to writes. */
1495 num = MIN(MIN(bytes, max_transfer), alignment - head);
1496 head = (head + num) % alignment;
1497 assert(num < max_write_zeroes);
1498 } else if (tail && num > alignment) {
1499 /* Shorten the request to the last aligned sector. */
1503 /* limit request size */
1504 if (num > max_write_zeroes) {
1505 num = max_write_zeroes;
1509 /* First try the efficient write zeroes operation */
1510 if (drv->bdrv_co_pwrite_zeroes) {
1511 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1512 flags & bs->supported_zero_flags);
1513 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1514 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1518 assert(!bs->supported_zero_flags);
1521 if (ret < 0 && !(flags & BDRV_REQ_NO_FALLBACK)) {
1522 /* Fall back to bounce buffer if write zeroes is unsupported */
1523 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1525 if ((flags & BDRV_REQ_FUA) &&
1526 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1527 /* No need for bdrv_driver_pwrite() to do a fallback
1528 * flush on each chunk; use just one at the end */
1529 write_flags &= ~BDRV_REQ_FUA;
1532 num = MIN(num, max_transfer);
1534 buf = qemu_try_blockalign0(bs, num);
1540 qemu_iovec_init_buf(&qiov, buf, num);
1542 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
1544 /* Keep bounce buffer around if it is big enough for all
1545 * all future requests.
1547 if (num < max_transfer) {
1558 if (ret == 0 && need_flush) {
1559 ret = bdrv_co_flush(bs);
1565 static inline int coroutine_fn
1566 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, uint64_t bytes,
1567 BdrvTrackedRequest *req, int flags)
1569 BlockDriverState *bs = child->bs;
1571 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1573 if (bs->read_only) {
1577 /* BDRV_REQ_NO_SERIALISING is only for read operation */
1578 assert(!(flags & BDRV_REQ_NO_SERIALISING));
1579 assert(!(bs->open_flags & BDRV_O_INACTIVE));
1580 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1581 assert(!(flags & ~BDRV_REQ_MASK));
1583 if (flags & BDRV_REQ_SERIALISING) {
1584 mark_request_serialising(req, bdrv_get_cluster_size(bs));
1587 waited = wait_serialising_requests(req);
1589 assert(!waited || !req->serialising ||
1590 is_request_serialising_and_aligned(req));
1591 assert(req->overlap_offset <= offset);
1592 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1593 assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE);
1595 switch (req->type) {
1596 case BDRV_TRACKED_WRITE:
1597 case BDRV_TRACKED_DISCARD:
1598 if (flags & BDRV_REQ_WRITE_UNCHANGED) {
1599 assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1601 assert(child->perm & BLK_PERM_WRITE);
1603 return notifier_with_return_list_notify(&bs->before_write_notifiers,
1605 case BDRV_TRACKED_TRUNCATE:
1606 assert(child->perm & BLK_PERM_RESIZE);
1613 static inline void coroutine_fn
1614 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes,
1615 BdrvTrackedRequest *req, int ret)
1617 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1618 BlockDriverState *bs = child->bs;
1620 atomic_inc(&bs->write_gen);
1623 * Discard cannot extend the image, but in error handling cases, such as
1624 * when reverting a qcow2 cluster allocation, the discarded range can pass
1625 * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
1626 * here. Instead, just skip it, since semantically a discard request
1627 * beyond EOF cannot expand the image anyway.
1630 (req->type == BDRV_TRACKED_TRUNCATE ||
1631 end_sector > bs->total_sectors) &&
1632 req->type != BDRV_TRACKED_DISCARD) {
1633 bs->total_sectors = end_sector;
1634 bdrv_parent_cb_resize(bs);
1635 bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
1638 switch (req->type) {
1639 case BDRV_TRACKED_WRITE:
1640 stat64_max(&bs->wr_highest_offset, offset + bytes);
1641 /* fall through, to set dirty bits */
1642 case BDRV_TRACKED_DISCARD:
1643 bdrv_set_dirty(bs, offset, bytes);
1652 * Forwards an already correctly aligned write request to the BlockDriver,
1653 * after possibly fragmenting it.
1655 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
1656 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1657 int64_t align, QEMUIOVector *qiov, int flags)
1659 BlockDriverState *bs = child->bs;
1660 BlockDriver *drv = bs->drv;
1663 uint64_t bytes_remaining = bytes;
1670 if (bdrv_has_readonly_bitmaps(bs)) {
1674 assert(is_power_of_2(align));
1675 assert((offset & (align - 1)) == 0);
1676 assert((bytes & (align - 1)) == 0);
1677 assert(!qiov || bytes == qiov->size);
1678 max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1681 ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
1683 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1684 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
1685 qemu_iovec_is_zero(qiov)) {
1686 flags |= BDRV_REQ_ZERO_WRITE;
1687 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1688 flags |= BDRV_REQ_MAY_UNMAP;
1693 /* Do nothing, write notifier decided to fail this request */
1694 } else if (flags & BDRV_REQ_ZERO_WRITE) {
1695 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1696 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
1697 } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
1698 ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov);
1699 } else if (bytes <= max_transfer) {
1700 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1701 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
1703 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1704 while (bytes_remaining) {
1705 int num = MIN(bytes_remaining, max_transfer);
1706 QEMUIOVector local_qiov;
1707 int local_flags = flags;
1710 if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
1711 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1712 /* If FUA is going to be emulated by flush, we only
1713 * need to flush on the last iteration */
1714 local_flags &= ~BDRV_REQ_FUA;
1716 qemu_iovec_init(&local_qiov, qiov->niov);
1717 qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1719 ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1720 num, &local_qiov, local_flags);
1721 qemu_iovec_destroy(&local_qiov);
1725 bytes_remaining -= num;
1728 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
1733 bdrv_co_write_req_finish(child, offset, bytes, req, ret);
1738 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
1741 BdrvRequestFlags flags,
1742 BdrvTrackedRequest *req)
1744 BlockDriverState *bs = child->bs;
1745 uint8_t *buf = NULL;
1746 QEMUIOVector local_qiov;
1747 uint64_t align = bs->bl.request_alignment;
1748 unsigned int head_padding_bytes, tail_padding_bytes;
1751 head_padding_bytes = offset & (align - 1);
1752 tail_padding_bytes = (align - (offset + bytes)) & (align - 1);
1755 assert(flags & BDRV_REQ_ZERO_WRITE);
1756 if (head_padding_bytes || tail_padding_bytes) {
1757 buf = qemu_blockalign(bs, align);
1758 qemu_iovec_init_buf(&local_qiov, buf, align);
1760 if (head_padding_bytes) {
1761 uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1763 /* RMW the unaligned part before head. */
1764 mark_request_serialising(req, align);
1765 wait_serialising_requests(req);
1766 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1767 ret = bdrv_aligned_preadv(child, req, offset & ~(align - 1), align,
1768 align, &local_qiov, 0);
1772 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1774 memset(buf + head_padding_bytes, 0, zero_bytes);
1775 ret = bdrv_aligned_pwritev(child, req, offset & ~(align - 1), align,
1777 flags & ~BDRV_REQ_ZERO_WRITE);
1781 offset += zero_bytes;
1782 bytes -= zero_bytes;
1785 assert(!bytes || (offset & (align - 1)) == 0);
1786 if (bytes >= align) {
1787 /* Write the aligned part in the middle. */
1788 uint64_t aligned_bytes = bytes & ~(align - 1);
1789 ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
1794 bytes -= aligned_bytes;
1795 offset += aligned_bytes;
1798 assert(!bytes || (offset & (align - 1)) == 0);
1800 assert(align == tail_padding_bytes + bytes);
1801 /* RMW the unaligned part after tail. */
1802 mark_request_serialising(req, align);
1803 wait_serialising_requests(req);
1804 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1805 ret = bdrv_aligned_preadv(child, req, offset, align,
1806 align, &local_qiov, 0);
1810 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1812 memset(buf, 0, bytes);
1813 ret = bdrv_aligned_pwritev(child, req, offset, align, align,
1814 &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1823 * Handle a write request in coroutine context
1825 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
1826 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1827 BdrvRequestFlags flags)
1829 BlockDriverState *bs = child->bs;
1830 BdrvTrackedRequest req;
1831 uint64_t align = bs->bl.request_alignment;
1832 uint8_t *head_buf = NULL;
1833 uint8_t *tail_buf = NULL;
1834 QEMUIOVector local_qiov;
1835 bool use_local_qiov = false;
1838 trace_bdrv_co_pwritev(child->bs, offset, bytes, flags);
1844 ret = bdrv_check_byte_request(bs, offset, bytes);
1849 bdrv_inc_in_flight(bs);
1851 * Align write if necessary by performing a read-modify-write cycle.
1852 * Pad qiov with the read parts and be sure to have a tracked request not
1853 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1855 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
1857 if (flags & BDRV_REQ_ZERO_WRITE) {
1858 ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
1862 if (offset & (align - 1)) {
1863 QEMUIOVector head_qiov;
1865 mark_request_serialising(&req, align);
1866 wait_serialising_requests(&req);
1868 head_buf = qemu_blockalign(bs, align);
1869 qemu_iovec_init_buf(&head_qiov, head_buf, align);
1871 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1872 ret = bdrv_aligned_preadv(child, &req, offset & ~(align - 1), align,
1873 align, &head_qiov, 0);
1877 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1879 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1880 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1881 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1882 use_local_qiov = true;
1884 bytes += offset & (align - 1);
1885 offset = offset & ~(align - 1);
1887 /* We have read the tail already if the request is smaller
1888 * than one aligned block.
1890 if (bytes < align) {
1891 qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
1896 if ((offset + bytes) & (align - 1)) {
1897 QEMUIOVector tail_qiov;
1901 mark_request_serialising(&req, align);
1902 waited = wait_serialising_requests(&req);
1903 assert(!waited || !use_local_qiov);
1905 tail_buf = qemu_blockalign(bs, align);
1906 qemu_iovec_init_buf(&tail_qiov, tail_buf, align);
1908 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1909 ret = bdrv_aligned_preadv(child, &req, (offset + bytes) & ~(align - 1),
1910 align, align, &tail_qiov, 0);
1914 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1916 if (!use_local_qiov) {
1917 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1918 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1919 use_local_qiov = true;
1922 tail_bytes = (offset + bytes) & (align - 1);
1923 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1925 bytes = ROUND_UP(bytes, align);
1928 ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
1929 use_local_qiov ? &local_qiov : qiov,
1934 if (use_local_qiov) {
1935 qemu_iovec_destroy(&local_qiov);
1937 qemu_vfree(head_buf);
1938 qemu_vfree(tail_buf);
1940 tracked_request_end(&req);
1941 bdrv_dec_in_flight(bs);
1945 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
1946 int bytes, BdrvRequestFlags flags)
1948 trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
1950 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
1951 flags &= ~BDRV_REQ_MAY_UNMAP;
1954 return bdrv_co_pwritev(child, offset, bytes, NULL,
1955 BDRV_REQ_ZERO_WRITE | flags);
1959 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1961 int bdrv_flush_all(void)
1963 BdrvNextIterator it;
1964 BlockDriverState *bs = NULL;
1967 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
1968 AioContext *aio_context = bdrv_get_aio_context(bs);
1971 aio_context_acquire(aio_context);
1972 ret = bdrv_flush(bs);
1973 if (ret < 0 && !result) {
1976 aio_context_release(aio_context);
1983 typedef struct BdrvCoBlockStatusData {
1984 BlockDriverState *bs;
1985 BlockDriverState *base;
1991 BlockDriverState **file;
1994 } BdrvCoBlockStatusData;
1996 int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs,
2002 BlockDriverState **file)
2004 assert(bs->file && bs->file->bs);
2007 *file = bs->file->bs;
2008 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2011 int coroutine_fn bdrv_co_block_status_from_backing(BlockDriverState *bs,
2017 BlockDriverState **file)
2019 assert(bs->backing && bs->backing->bs);
2022 *file = bs->backing->bs;
2023 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2027 * Returns the allocation status of the specified sectors.
2028 * Drivers not implementing the functionality are assumed to not support
2029 * backing files, hence all their sectors are reported as allocated.
2031 * If 'want_zero' is true, the caller is querying for mapping
2032 * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2033 * _ZERO where possible; otherwise, the result favors larger 'pnum',
2034 * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2036 * If 'offset' is beyond the end of the disk image the return value is
2037 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2039 * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2040 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2041 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2043 * 'pnum' is set to the number of bytes (including and immediately
2044 * following the specified offset) that are easily known to be in the
2045 * same allocated/unallocated state. Note that a second call starting
2046 * at the original offset plus returned pnum may have the same status.
2047 * The returned value is non-zero on success except at end-of-file.
2049 * Returns negative errno on failure. Otherwise, if the
2050 * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2051 * set to the host mapping and BDS corresponding to the guest offset.
2053 static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
2055 int64_t offset, int64_t bytes,
2056 int64_t *pnum, int64_t *map,
2057 BlockDriverState **file)
2060 int64_t n; /* bytes */
2062 int64_t local_map = 0;
2063 BlockDriverState *local_file = NULL;
2064 int64_t aligned_offset, aligned_bytes;
2069 total_size = bdrv_getlength(bs);
2070 if (total_size < 0) {
2075 if (offset >= total_size) {
2076 ret = BDRV_BLOCK_EOF;
2084 n = total_size - offset;
2089 /* Must be non-NULL or bdrv_getlength() would have failed */
2091 if (!bs->drv->bdrv_co_block_status) {
2093 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2094 if (offset + bytes == total_size) {
2095 ret |= BDRV_BLOCK_EOF;
2097 if (bs->drv->protocol_name) {
2098 ret |= BDRV_BLOCK_OFFSET_VALID;
2105 bdrv_inc_in_flight(bs);
2107 /* Round out to request_alignment boundaries */
2108 align = bs->bl.request_alignment;
2109 aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2110 aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2112 ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2113 aligned_bytes, pnum, &local_map,
2121 * The driver's result must be a non-zero multiple of request_alignment.
2122 * Clamp pnum and adjust map to original request.
2124 assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2125 align > offset - aligned_offset);
2126 if (ret & BDRV_BLOCK_RECURSE) {
2127 assert(ret & BDRV_BLOCK_DATA);
2128 assert(ret & BDRV_BLOCK_OFFSET_VALID);
2129 assert(!(ret & BDRV_BLOCK_ZERO));
2132 *pnum -= offset - aligned_offset;
2133 if (*pnum > bytes) {
2136 if (ret & BDRV_BLOCK_OFFSET_VALID) {
2137 local_map += offset - aligned_offset;
2140 if (ret & BDRV_BLOCK_RAW) {
2141 assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2142 ret = bdrv_co_block_status(local_file, want_zero, local_map,
2143 *pnum, pnum, &local_map, &local_file);
2147 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2148 ret |= BDRV_BLOCK_ALLOCATED;
2149 } else if (want_zero) {
2150 if (bdrv_unallocated_blocks_are_zero(bs)) {
2151 ret |= BDRV_BLOCK_ZERO;
2152 } else if (bs->backing) {
2153 BlockDriverState *bs2 = bs->backing->bs;
2154 int64_t size2 = bdrv_getlength(bs2);
2156 if (size2 >= 0 && offset >= size2) {
2157 ret |= BDRV_BLOCK_ZERO;
2162 if (want_zero && ret & BDRV_BLOCK_RECURSE &&
2163 local_file && local_file != bs &&
2164 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2165 (ret & BDRV_BLOCK_OFFSET_VALID)) {
2169 ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
2170 *pnum, &file_pnum, NULL, NULL);
2172 /* Ignore errors. This is just providing extra information, it
2173 * is useful but not necessary.
2175 if (ret2 & BDRV_BLOCK_EOF &&
2176 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2178 * It is valid for the format block driver to read
2179 * beyond the end of the underlying file's current
2180 * size; such areas read as zero.
2182 ret |= BDRV_BLOCK_ZERO;
2184 /* Limit request to the range reported by the protocol driver */
2186 ret |= (ret2 & BDRV_BLOCK_ZERO);
2192 bdrv_dec_in_flight(bs);
2193 if (ret >= 0 && offset + *pnum == total_size) {
2194 ret |= BDRV_BLOCK_EOF;
2206 static int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
2207 BlockDriverState *base,
2213 BlockDriverState **file)
2215 BlockDriverState *p;
2220 for (p = bs; p != base; p = backing_bs(p)) {
2221 ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
2226 if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) {
2228 * Reading beyond the end of the file continues to read
2229 * zeroes, but we can only widen the result to the
2230 * unallocated length we learned from an earlier
2235 if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) {
2238 /* [offset, pnum] unallocated on this layer, which could be only
2239 * the first part of [offset, bytes]. */
2240 bytes = MIN(bytes, *pnum);
2246 /* Coroutine wrapper for bdrv_block_status_above() */
2247 static void coroutine_fn bdrv_block_status_above_co_entry(void *opaque)
2249 BdrvCoBlockStatusData *data = opaque;
2251 data->ret = bdrv_co_block_status_above(data->bs, data->base,
2253 data->offset, data->bytes,
2254 data->pnum, data->map, data->file);
2260 * Synchronous wrapper around bdrv_co_block_status_above().
2262 * See bdrv_co_block_status_above() for details.
2264 static int bdrv_common_block_status_above(BlockDriverState *bs,
2265 BlockDriverState *base,
2266 bool want_zero, int64_t offset,
2267 int64_t bytes, int64_t *pnum,
2269 BlockDriverState **file)
2272 BdrvCoBlockStatusData data = {
2275 .want_zero = want_zero,
2284 if (qemu_in_coroutine()) {
2285 /* Fast-path if already in coroutine context */
2286 bdrv_block_status_above_co_entry(&data);
2288 co = qemu_coroutine_create(bdrv_block_status_above_co_entry, &data);
2289 bdrv_coroutine_enter(bs, co);
2290 BDRV_POLL_WHILE(bs, !data.done);
2295 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
2296 int64_t offset, int64_t bytes, int64_t *pnum,
2297 int64_t *map, BlockDriverState **file)
2299 return bdrv_common_block_status_above(bs, base, true, offset, bytes,
2303 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2304 int64_t *pnum, int64_t *map, BlockDriverState **file)
2306 return bdrv_block_status_above(bs, backing_bs(bs),
2307 offset, bytes, pnum, map, file);
2310 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
2311 int64_t bytes, int64_t *pnum)
2316 ret = bdrv_common_block_status_above(bs, backing_bs(bs), false, offset,
2317 bytes, pnum ? pnum : &dummy, NULL,
2322 return !!(ret & BDRV_BLOCK_ALLOCATED);
2326 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2328 * Return 1 if (a prefix of) the given range is allocated in any image
2329 * between BASE and TOP (BASE is only included if include_base is set).
2330 * BASE can be NULL to check if the given offset is allocated in any
2331 * image of the chain. Return 0 otherwise, or negative errno on
2334 * 'pnum' is set to the number of bytes (including and immediately
2335 * following the specified offset) that are known to be in the same
2336 * allocated/unallocated state. Note that a subsequent call starting
2337 * at 'offset + *pnum' may return the same allocation status (in other
2338 * words, the result is not necessarily the maximum possible range);
2339 * but 'pnum' will only be 0 when end of file is reached.
2342 int bdrv_is_allocated_above(BlockDriverState *top,
2343 BlockDriverState *base,
2344 bool include_base, int64_t offset,
2345 int64_t bytes, int64_t *pnum)
2347 BlockDriverState *intermediate;
2351 assert(base || !include_base);
2354 while (include_base || intermediate != base) {
2358 assert(intermediate);
2359 ret = bdrv_is_allocated(intermediate, offset, bytes, &pnum_inter);
2368 size_inter = bdrv_getlength(intermediate);
2369 if (size_inter < 0) {
2372 if (n > pnum_inter &&
2373 (intermediate == top || offset + pnum_inter < size_inter)) {
2377 if (intermediate == base) {
2381 intermediate = backing_bs(intermediate);
2388 typedef struct BdrvVmstateCo {
2389 BlockDriverState *bs;
2396 static int coroutine_fn
2397 bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2400 BlockDriver *drv = bs->drv;
2403 bdrv_inc_in_flight(bs);
2407 } else if (drv->bdrv_load_vmstate) {
2409 ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2411 ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2413 } else if (bs->file) {
2414 ret = bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
2417 bdrv_dec_in_flight(bs);
2421 static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
2423 BdrvVmstateCo *co = opaque;
2424 co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
2429 bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2432 if (qemu_in_coroutine()) {
2433 return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
2435 BdrvVmstateCo data = {
2440 .ret = -EINPROGRESS,
2442 Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
2444 bdrv_coroutine_enter(bs, co);
2445 BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS);
2450 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2451 int64_t pos, int size)
2453 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2456 ret = bdrv_writev_vmstate(bs, &qiov, pos);
2464 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2466 return bdrv_rw_vmstate(bs, qiov, pos, false);
2469 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2470 int64_t pos, int size)
2472 QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2475 ret = bdrv_readv_vmstate(bs, &qiov, pos);
2483 int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2485 return bdrv_rw_vmstate(bs, qiov, pos, true);
2488 /**************************************************************/
2491 void bdrv_aio_cancel(BlockAIOCB *acb)
2494 bdrv_aio_cancel_async(acb);
2495 while (acb->refcnt > 1) {
2496 if (acb->aiocb_info->get_aio_context) {
2497 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2498 } else if (acb->bs) {
2499 /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2500 * assert that we're not using an I/O thread. Thread-safe
2501 * code should use bdrv_aio_cancel_async exclusively.
2503 assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
2504 aio_poll(bdrv_get_aio_context(acb->bs), true);
2509 qemu_aio_unref(acb);
2512 /* Async version of aio cancel. The caller is not blocked if the acb implements
2513 * cancel_async, otherwise we do nothing and let the request normally complete.
2514 * In either case the completion callback must be called. */
2515 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2517 if (acb->aiocb_info->cancel_async) {
2518 acb->aiocb_info->cancel_async(acb);
2522 /**************************************************************/
2523 /* Coroutine block device emulation */
2525 typedef struct FlushCo {
2526 BlockDriverState *bs;
2531 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2533 FlushCo *rwco = opaque;
2535 rwco->ret = bdrv_co_flush(rwco->bs);
2539 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2544 bdrv_inc_in_flight(bs);
2546 if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2551 qemu_co_mutex_lock(&bs->reqs_lock);
2552 current_gen = atomic_read(&bs->write_gen);
2554 /* Wait until any previous flushes are completed */
2555 while (bs->active_flush_req) {
2556 qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
2559 /* Flushes reach this point in nondecreasing current_gen order. */
2560 bs->active_flush_req = true;
2561 qemu_co_mutex_unlock(&bs->reqs_lock);
2563 /* Write back all layers by calling one driver function */
2564 if (bs->drv->bdrv_co_flush) {
2565 ret = bs->drv->bdrv_co_flush(bs);
2569 /* Write back cached data to the OS even with cache=unsafe */
2570 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2571 if (bs->drv->bdrv_co_flush_to_os) {
2572 ret = bs->drv->bdrv_co_flush_to_os(bs);
2578 /* But don't actually force it to the disk with cache=unsafe */
2579 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2583 /* Check if we really need to flush anything */
2584 if (bs->flushed_gen == current_gen) {
2588 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2590 /* bs->drv->bdrv_co_flush() might have ejected the BDS
2591 * (even in case of apparent success) */
2595 if (bs->drv->bdrv_co_flush_to_disk) {
2596 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2597 } else if (bs->drv->bdrv_aio_flush) {
2599 CoroutineIOCompletion co = {
2600 .coroutine = qemu_coroutine_self(),
2603 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2607 qemu_coroutine_yield();
2612 * Some block drivers always operate in either writethrough or unsafe
2613 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2614 * know how the server works (because the behaviour is hardcoded or
2615 * depends on server-side configuration), so we can't ensure that
2616 * everything is safe on disk. Returning an error doesn't work because
2617 * that would break guests even if the server operates in writethrough
2620 * Let's hope the user knows what he's doing.
2629 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2630 * in the case of cache=unsafe, so there are no useless flushes.
2633 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2635 /* Notify any pending flushes that we have completed */
2637 bs->flushed_gen = current_gen;
2640 qemu_co_mutex_lock(&bs->reqs_lock);
2641 bs->active_flush_req = false;
2642 /* Return value is ignored - it's ok if wait queue is empty */
2643 qemu_co_queue_next(&bs->flush_queue);
2644 qemu_co_mutex_unlock(&bs->reqs_lock);
2647 bdrv_dec_in_flight(bs);
2651 int bdrv_flush(BlockDriverState *bs)
2654 FlushCo flush_co = {
2659 if (qemu_in_coroutine()) {
2660 /* Fast-path if already in coroutine context */
2661 bdrv_flush_co_entry(&flush_co);
2663 co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
2664 bdrv_coroutine_enter(bs, co);
2665 BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
2668 return flush_co.ret;
2671 typedef struct DiscardCo {
2677 static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
2679 DiscardCo *rwco = opaque;
2681 rwco->ret = bdrv_co_pdiscard(rwco->child, rwco->offset, rwco->bytes);
2685 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
2688 BdrvTrackedRequest req;
2689 int max_pdiscard, ret;
2690 int head, tail, align;
2691 BlockDriverState *bs = child->bs;
2693 if (!bs || !bs->drv || !bdrv_is_inserted(bs)) {
2697 if (bdrv_has_readonly_bitmaps(bs)) {
2701 if (offset < 0 || bytes < 0 || bytes > INT64_MAX - offset) {
2705 /* Do nothing if disabled. */
2706 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2710 if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
2714 /* Discard is advisory, but some devices track and coalesce
2715 * unaligned requests, so we must pass everything down rather than
2716 * round here. Still, most devices will just silently ignore
2717 * unaligned requests (by returning -ENOTSUP), so we must fragment
2718 * the request accordingly. */
2719 align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
2720 assert(align % bs->bl.request_alignment == 0);
2721 head = offset % align;
2722 tail = (offset + bytes) % align;
2724 bdrv_inc_in_flight(bs);
2725 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
2727 ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
2732 max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
2734 assert(max_pdiscard >= bs->bl.request_alignment);
2737 int64_t num = bytes;
2740 /* Make small requests to get to alignment boundaries. */
2741 num = MIN(bytes, align - head);
2742 if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
2743 num %= bs->bl.request_alignment;
2745 head = (head + num) % align;
2746 assert(num < max_pdiscard);
2749 /* Shorten the request to the last aligned cluster. */
2751 } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
2752 tail > bs->bl.request_alignment) {
2753 tail %= bs->bl.request_alignment;
2757 /* limit request size */
2758 if (num > max_pdiscard) {
2766 if (bs->drv->bdrv_co_pdiscard) {
2767 ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
2770 CoroutineIOCompletion co = {
2771 .coroutine = qemu_coroutine_self(),
2774 acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
2775 bdrv_co_io_em_complete, &co);
2780 qemu_coroutine_yield();
2784 if (ret && ret != -ENOTSUP) {
2793 bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
2794 tracked_request_end(&req);
2795 bdrv_dec_in_flight(bs);
2799 int bdrv_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes)
2809 if (qemu_in_coroutine()) {
2810 /* Fast-path if already in coroutine context */
2811 bdrv_pdiscard_co_entry(&rwco);
2813 co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
2814 bdrv_coroutine_enter(child->bs, co);
2815 BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
2821 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
2823 BlockDriver *drv = bs->drv;
2824 CoroutineIOCompletion co = {
2825 .coroutine = qemu_coroutine_self(),
2829 bdrv_inc_in_flight(bs);
2830 if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
2835 if (drv->bdrv_co_ioctl) {
2836 co.ret = drv->bdrv_co_ioctl(bs, req, buf);
2838 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2843 qemu_coroutine_yield();
2846 bdrv_dec_in_flight(bs);
2850 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2852 return qemu_memalign(bdrv_opt_mem_align(bs), size);
2855 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2857 return memset(qemu_blockalign(bs, size), 0, size);
2860 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2862 size_t align = bdrv_opt_mem_align(bs);
2864 /* Ensure that NULL is never returned on success */
2870 return qemu_try_memalign(align, size);
2873 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2875 void *mem = qemu_try_blockalign(bs, size);
2878 memset(mem, 0, size);
2885 * Check if all memory in this vector is sector aligned.
2887 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2890 size_t alignment = bdrv_min_mem_align(bs);
2892 for (i = 0; i < qiov->niov; i++) {
2893 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2896 if (qiov->iov[i].iov_len % alignment) {
2904 void bdrv_add_before_write_notifier(BlockDriverState *bs,
2905 NotifierWithReturn *notifier)
2907 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2910 void bdrv_io_plug(BlockDriverState *bs)
2914 QLIST_FOREACH(child, &bs->children, next) {
2915 bdrv_io_plug(child->bs);
2918 if (atomic_fetch_inc(&bs->io_plugged) == 0) {
2919 BlockDriver *drv = bs->drv;
2920 if (drv && drv->bdrv_io_plug) {
2921 drv->bdrv_io_plug(bs);
2926 void bdrv_io_unplug(BlockDriverState *bs)
2930 assert(bs->io_plugged);
2931 if (atomic_fetch_dec(&bs->io_plugged) == 1) {
2932 BlockDriver *drv = bs->drv;
2933 if (drv && drv->bdrv_io_unplug) {
2934 drv->bdrv_io_unplug(bs);
2938 QLIST_FOREACH(child, &bs->children, next) {
2939 bdrv_io_unplug(child->bs);
2943 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size)
2947 if (bs->drv && bs->drv->bdrv_register_buf) {
2948 bs->drv->bdrv_register_buf(bs, host, size);
2950 QLIST_FOREACH(child, &bs->children, next) {
2951 bdrv_register_buf(child->bs, host, size);
2955 void bdrv_unregister_buf(BlockDriverState *bs, void *host)
2959 if (bs->drv && bs->drv->bdrv_unregister_buf) {
2960 bs->drv->bdrv_unregister_buf(bs, host);
2962 QLIST_FOREACH(child, &bs->children, next) {
2963 bdrv_unregister_buf(child->bs, host);
2967 static int coroutine_fn bdrv_co_copy_range_internal(
2968 BdrvChild *src, uint64_t src_offset, BdrvChild *dst,
2969 uint64_t dst_offset, uint64_t bytes,
2970 BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
2973 BdrvTrackedRequest req;
2976 /* TODO We can support BDRV_REQ_NO_FALLBACK here */
2977 assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
2978 assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
2980 if (!dst || !dst->bs) {
2983 ret = bdrv_check_byte_request(dst->bs, dst_offset, bytes);
2987 if (write_flags & BDRV_REQ_ZERO_WRITE) {
2988 return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
2991 if (!src || !src->bs) {
2994 ret = bdrv_check_byte_request(src->bs, src_offset, bytes);
2999 if (!src->bs->drv->bdrv_co_copy_range_from
3000 || !dst->bs->drv->bdrv_co_copy_range_to
3001 || src->bs->encrypted || dst->bs->encrypted) {
3006 bdrv_inc_in_flight(src->bs);
3007 tracked_request_begin(&req, src->bs, src_offset, bytes,
3010 /* BDRV_REQ_SERIALISING is only for write operation */
3011 assert(!(read_flags & BDRV_REQ_SERIALISING));
3012 if (!(read_flags & BDRV_REQ_NO_SERIALISING)) {
3013 wait_serialising_requests(&req);
3016 ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3020 read_flags, write_flags);
3022 tracked_request_end(&req);
3023 bdrv_dec_in_flight(src->bs);
3025 bdrv_inc_in_flight(dst->bs);
3026 tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3027 BDRV_TRACKED_WRITE);
3028 ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3031 ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3035 read_flags, write_flags);
3037 bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3038 tracked_request_end(&req);
3039 bdrv_dec_in_flight(dst->bs);
3045 /* Copy range from @src to @dst.
3047 * See the comment of bdrv_co_copy_range for the parameter and return value
3049 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset,
3050 BdrvChild *dst, uint64_t dst_offset,
3052 BdrvRequestFlags read_flags,
3053 BdrvRequestFlags write_flags)
3055 trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3056 read_flags, write_flags);
3057 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3058 bytes, read_flags, write_flags, true);
3061 /* Copy range from @src to @dst.
3063 * See the comment of bdrv_co_copy_range for the parameter and return value
3065 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset,
3066 BdrvChild *dst, uint64_t dst_offset,
3068 BdrvRequestFlags read_flags,
3069 BdrvRequestFlags write_flags)
3071 trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3072 read_flags, write_flags);
3073 return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3074 bytes, read_flags, write_flags, false);
3077 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset,
3078 BdrvChild *dst, uint64_t dst_offset,
3079 uint64_t bytes, BdrvRequestFlags read_flags,
3080 BdrvRequestFlags write_flags)
3082 return bdrv_co_copy_range_from(src, src_offset,
3084 bytes, read_flags, write_flags);
3087 static void bdrv_parent_cb_resize(BlockDriverState *bs)
3090 QLIST_FOREACH(c, &bs->parents, next_parent) {
3091 if (c->role->resize) {
3098 * Truncate file to 'offset' bytes (needed only for file protocols)
3100 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset,
3101 PreallocMode prealloc, Error **errp)
3103 BlockDriverState *bs = child->bs;
3104 BlockDriver *drv = bs->drv;
3105 BdrvTrackedRequest req;
3106 int64_t old_size, new_bytes;
3110 /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3112 error_setg(errp, "No medium inserted");
3116 error_setg(errp, "Image size cannot be negative");
3120 old_size = bdrv_getlength(bs);
3122 error_setg_errno(errp, -old_size, "Failed to get old image size");
3126 if (offset > old_size) {
3127 new_bytes = offset - old_size;
3132 bdrv_inc_in_flight(bs);
3133 tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3134 BDRV_TRACKED_TRUNCATE);
3136 /* If we are growing the image and potentially using preallocation for the
3137 * new area, we need to make sure that no write requests are made to it
3138 * concurrently or they might be overwritten by preallocation. */
3140 mark_request_serialising(&req, 1);
3142 if (bs->read_only) {
3143 error_setg(errp, "Image is read-only");
3147 ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3150 error_setg_errno(errp, -ret,
3151 "Failed to prepare request for truncation");
3155 if (!drv->bdrv_co_truncate) {
3156 if (bs->file && drv->is_filter) {
3157 ret = bdrv_co_truncate(bs->file, offset, prealloc, errp);
3160 error_setg(errp, "Image format driver does not support resize");
3165 ret = drv->bdrv_co_truncate(bs, offset, prealloc, errp);
3169 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3171 error_setg_errno(errp, -ret, "Could not refresh total sector count");
3173 offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3175 /* It's possible that truncation succeeded but refresh_total_sectors
3176 * failed, but the latter doesn't affect how we should finish the request.
3177 * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3178 bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
3181 tracked_request_end(&req);
3182 bdrv_dec_in_flight(bs);
3187 typedef struct TruncateCo {
3190 PreallocMode prealloc;
3195 static void coroutine_fn bdrv_truncate_co_entry(void *opaque)
3197 TruncateCo *tco = opaque;
3198 tco->ret = bdrv_co_truncate(tco->child, tco->offset, tco->prealloc,
3203 int bdrv_truncate(BdrvChild *child, int64_t offset, PreallocMode prealloc,
3210 .prealloc = prealloc,
3215 if (qemu_in_coroutine()) {
3216 /* Fast-path if already in coroutine context */
3217 bdrv_truncate_co_entry(&tco);
3219 co = qemu_coroutine_create(bdrv_truncate_co_entry, &tco);
3220 bdrv_coroutine_enter(child->bs, co);
3221 BDRV_POLL_WHILE(child->bs, tco.ret == NOT_DONE);