2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "qemu/cutils.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
36 static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child,
40 BdrvRequestFlags flags,
41 BlockCompletionFunc *cb,
44 static void coroutine_fn bdrv_co_do_rw(void *opaque);
45 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
46 int64_t offset, int count, BdrvRequestFlags flags);
48 static void bdrv_parent_drained_begin(BlockDriverState *bs)
52 QLIST_FOREACH(c, &bs->parents, next_parent) {
53 if (c->role->drained_begin) {
54 c->role->drained_begin(c);
59 static void bdrv_parent_drained_end(BlockDriverState *bs)
63 QLIST_FOREACH(c, &bs->parents, next_parent) {
64 if (c->role->drained_end) {
65 c->role->drained_end(c);
70 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
72 dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
73 dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
74 dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
75 src->opt_mem_alignment);
76 dst->min_mem_alignment = MAX(dst->min_mem_alignment,
77 src->min_mem_alignment);
78 dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
81 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
83 BlockDriver *drv = bs->drv;
84 Error *local_err = NULL;
86 memset(&bs->bl, 0, sizeof(bs->bl));
92 /* Default alignment based on whether driver has byte interface */
93 bs->bl.request_alignment = drv->bdrv_co_preadv ? 1 : 512;
95 /* Take some limits from the children as a default */
97 bdrv_refresh_limits(bs->file->bs, &local_err);
99 error_propagate(errp, local_err);
102 bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
104 bs->bl.min_mem_alignment = 512;
105 bs->bl.opt_mem_alignment = getpagesize();
107 /* Safe default since most protocols use readv()/writev()/etc */
108 bs->bl.max_iov = IOV_MAX;
112 bdrv_refresh_limits(bs->backing->bs, &local_err);
114 error_propagate(errp, local_err);
117 bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
120 /* Then let the driver override it */
121 if (drv->bdrv_refresh_limits) {
122 drv->bdrv_refresh_limits(bs, errp);
127 * The copy-on-read flag is actually a reference count so multiple users may
128 * use the feature without worrying about clobbering its previous state.
129 * Copy-on-read stays enabled until all users have called to disable it.
131 void bdrv_enable_copy_on_read(BlockDriverState *bs)
136 void bdrv_disable_copy_on_read(BlockDriverState *bs)
138 assert(bs->copy_on_read > 0);
142 /* Check if any requests are in-flight (including throttled requests) */
143 bool bdrv_requests_pending(BlockDriverState *bs)
147 if (!QLIST_EMPTY(&bs->tracked_requests)) {
151 QLIST_FOREACH(child, &bs->children, next) {
152 if (bdrv_requests_pending(child->bs)) {
160 static void bdrv_drain_recurse(BlockDriverState *bs)
164 if (bs->drv && bs->drv->bdrv_drain) {
165 bs->drv->bdrv_drain(bs);
167 QLIST_FOREACH(child, &bs->children, next) {
168 bdrv_drain_recurse(child->bs);
174 BlockDriverState *bs;
179 static void bdrv_drain_poll(BlockDriverState *bs)
185 busy = bdrv_requests_pending(bs);
186 busy |= aio_poll(bdrv_get_aio_context(bs), busy);
190 static void bdrv_co_drain_bh_cb(void *opaque)
192 BdrvCoDrainData *data = opaque;
193 Coroutine *co = data->co;
195 qemu_bh_delete(data->bh);
196 bdrv_drain_poll(data->bs);
198 qemu_coroutine_enter(co, NULL);
201 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
203 BdrvCoDrainData data;
205 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
206 * other coroutines run if they were queued from
207 * qemu_co_queue_run_restart(). */
209 assert(qemu_in_coroutine());
210 data = (BdrvCoDrainData) {
211 .co = qemu_coroutine_self(),
214 .bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_drain_bh_cb, &data),
216 qemu_bh_schedule(data.bh);
218 qemu_coroutine_yield();
219 /* If we are resumed from some other event (such as an aio completion or a
220 * timer callback), it is a bug in the caller that should be fixed. */
224 void bdrv_drained_begin(BlockDriverState *bs)
226 if (!bs->quiesce_counter++) {
227 aio_disable_external(bdrv_get_aio_context(bs));
228 bdrv_parent_drained_begin(bs);
231 bdrv_io_unplugged_begin(bs);
232 bdrv_drain_recurse(bs);
233 if (qemu_in_coroutine()) {
234 bdrv_co_yield_to_drain(bs);
238 bdrv_io_unplugged_end(bs);
241 void bdrv_drained_end(BlockDriverState *bs)
243 assert(bs->quiesce_counter > 0);
244 if (--bs->quiesce_counter > 0) {
248 bdrv_parent_drained_end(bs);
249 aio_enable_external(bdrv_get_aio_context(bs));
253 * Wait for pending requests to complete on a single BlockDriverState subtree,
254 * and suspend block driver's internal I/O until next request arrives.
256 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
259 * Only this BlockDriverState's AioContext is run, so in-flight requests must
260 * not depend on events in other AioContexts. In that case, use
261 * bdrv_drain_all() instead.
263 void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
265 assert(qemu_in_coroutine());
266 bdrv_drained_begin(bs);
267 bdrv_drained_end(bs);
270 void bdrv_drain(BlockDriverState *bs)
272 bdrv_drained_begin(bs);
273 bdrv_drained_end(bs);
277 * Wait for pending requests to complete across all BlockDriverStates
279 * This function does not flush data to disk, use bdrv_flush_all() for that
280 * after calling this function.
282 void bdrv_drain_all(void)
284 /* Always run first iteration so any pending completion BHs run */
286 BlockDriverState *bs;
288 BlockJob *job = NULL;
289 GSList *aio_ctxs = NULL, *ctx;
291 while ((job = block_job_next(job))) {
292 AioContext *aio_context = blk_get_aio_context(job->blk);
294 aio_context_acquire(aio_context);
295 block_job_pause(job);
296 aio_context_release(aio_context);
299 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
300 AioContext *aio_context = bdrv_get_aio_context(bs);
302 aio_context_acquire(aio_context);
303 bdrv_parent_drained_begin(bs);
304 bdrv_io_unplugged_begin(bs);
305 bdrv_drain_recurse(bs);
306 aio_context_release(aio_context);
308 if (!g_slist_find(aio_ctxs, aio_context)) {
309 aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
313 /* Note that completion of an asynchronous I/O operation can trigger any
314 * number of other I/O operations on other devices---for example a
315 * coroutine can submit an I/O request to another device in response to
316 * request completion. Therefore we must keep looping until there was no
317 * more activity rather than simply draining each device independently.
322 for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
323 AioContext *aio_context = ctx->data;
325 aio_context_acquire(aio_context);
326 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
327 if (aio_context == bdrv_get_aio_context(bs)) {
328 if (bdrv_requests_pending(bs)) {
330 aio_poll(aio_context, busy);
334 busy |= aio_poll(aio_context, false);
335 aio_context_release(aio_context);
339 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
340 AioContext *aio_context = bdrv_get_aio_context(bs);
342 aio_context_acquire(aio_context);
343 bdrv_io_unplugged_end(bs);
344 bdrv_parent_drained_end(bs);
345 aio_context_release(aio_context);
347 g_slist_free(aio_ctxs);
350 while ((job = block_job_next(job))) {
351 AioContext *aio_context = blk_get_aio_context(job->blk);
353 aio_context_acquire(aio_context);
354 block_job_resume(job);
355 aio_context_release(aio_context);
360 * Remove an active request from the tracked requests list
362 * This function should be called when a tracked request is completing.
364 static void tracked_request_end(BdrvTrackedRequest *req)
366 if (req->serialising) {
367 req->bs->serialising_in_flight--;
370 QLIST_REMOVE(req, list);
371 qemu_co_queue_restart_all(&req->wait_queue);
375 * Add an active request to the tracked requests list
377 static void tracked_request_begin(BdrvTrackedRequest *req,
378 BlockDriverState *bs,
381 enum BdrvTrackedRequestType type)
383 *req = (BdrvTrackedRequest){
388 .co = qemu_coroutine_self(),
389 .serialising = false,
390 .overlap_offset = offset,
391 .overlap_bytes = bytes,
394 qemu_co_queue_init(&req->wait_queue);
396 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
399 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
401 int64_t overlap_offset = req->offset & ~(align - 1);
402 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
405 if (!req->serialising) {
406 req->bs->serialising_in_flight++;
407 req->serialising = true;
410 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
411 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
415 * Round a region to cluster boundaries (sector-based)
417 void bdrv_round_sectors_to_clusters(BlockDriverState *bs,
418 int64_t sector_num, int nb_sectors,
419 int64_t *cluster_sector_num,
420 int *cluster_nb_sectors)
424 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
425 *cluster_sector_num = sector_num;
426 *cluster_nb_sectors = nb_sectors;
428 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
429 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
430 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
436 * Round a region to cluster boundaries
438 void bdrv_round_to_clusters(BlockDriverState *bs,
439 int64_t offset, unsigned int bytes,
440 int64_t *cluster_offset,
441 unsigned int *cluster_bytes)
445 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
446 *cluster_offset = offset;
447 *cluster_bytes = bytes;
449 int64_t c = bdi.cluster_size;
450 *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
451 *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
455 static int bdrv_get_cluster_size(BlockDriverState *bs)
460 ret = bdrv_get_info(bs, &bdi);
461 if (ret < 0 || bdi.cluster_size == 0) {
462 return bs->bl.request_alignment;
464 return bdi.cluster_size;
468 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
469 int64_t offset, unsigned int bytes)
472 if (offset >= req->overlap_offset + req->overlap_bytes) {
476 if (req->overlap_offset >= offset + bytes) {
482 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
484 BlockDriverState *bs = self->bs;
485 BdrvTrackedRequest *req;
489 if (!bs->serialising_in_flight) {
495 QLIST_FOREACH(req, &bs->tracked_requests, list) {
496 if (req == self || (!req->serialising && !self->serialising)) {
499 if (tracked_request_overlaps(req, self->overlap_offset,
500 self->overlap_bytes))
502 /* Hitting this means there was a reentrant request, for
503 * example, a block driver issuing nested requests. This must
504 * never happen since it means deadlock.
506 assert(qemu_coroutine_self() != req->co);
508 /* If the request is already (indirectly) waiting for us, or
509 * will wait for us as soon as it wakes up, then just go on
510 * (instead of producing a deadlock in the former case). */
511 if (!req->waiting_for) {
512 self->waiting_for = req;
513 qemu_co_queue_wait(&req->wait_queue);
514 self->waiting_for = NULL;
526 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
529 if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
533 if (!bdrv_is_inserted(bs)) {
544 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
547 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
551 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
552 nb_sectors * BDRV_SECTOR_SIZE);
555 typedef struct RwCo {
561 BdrvRequestFlags flags;
564 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
568 if (!rwco->is_write) {
569 rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
570 rwco->qiov->size, rwco->qiov,
573 rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
574 rwco->qiov->size, rwco->qiov,
580 * Process a vectored synchronous request using coroutines
582 static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
583 QEMUIOVector *qiov, bool is_write,
584 BdrvRequestFlags flags)
591 .is_write = is_write,
596 if (qemu_in_coroutine()) {
597 /* Fast-path if already in coroutine context */
598 bdrv_rw_co_entry(&rwco);
600 AioContext *aio_context = bdrv_get_aio_context(child->bs);
602 co = qemu_coroutine_create(bdrv_rw_co_entry);
603 qemu_coroutine_enter(co, &rwco);
604 while (rwco.ret == NOT_DONE) {
605 aio_poll(aio_context, true);
612 * Process a synchronous request using coroutines
614 static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
615 int nb_sectors, bool is_write, BdrvRequestFlags flags)
619 .iov_base = (void *)buf,
620 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
623 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
627 qemu_iovec_init_external(&qiov, &iov, 1);
628 return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
629 &qiov, is_write, flags);
632 /* return < 0 if error. See bdrv_write() for the return codes */
633 int bdrv_read(BdrvChild *child, int64_t sector_num,
634 uint8_t *buf, int nb_sectors)
636 return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0);
639 /* Return < 0 if error. Important errors are:
640 -EIO generic I/O error (may happen for all errors)
641 -ENOMEDIUM No media inserted.
642 -EINVAL Invalid sector number or nb_sectors
643 -EACCES Trying to write a read-only device
645 int bdrv_write(BdrvChild *child, int64_t sector_num,
646 const uint8_t *buf, int nb_sectors)
648 return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
651 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
652 int count, BdrvRequestFlags flags)
660 qemu_iovec_init_external(&qiov, &iov, 1);
661 return bdrv_prwv_co(child, offset, &qiov, true,
662 BDRV_REQ_ZERO_WRITE | flags);
666 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
667 * The operation is sped up by checking the block status and only writing
668 * zeroes to the device if they currently do not return zeroes. Optional
669 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
672 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
674 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
676 int64_t target_sectors, ret, nb_sectors, sector_num = 0;
677 BlockDriverState *bs = child->bs;
678 BlockDriverState *file;
681 target_sectors = bdrv_nb_sectors(bs);
682 if (target_sectors < 0) {
683 return target_sectors;
687 nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
688 if (nb_sectors <= 0) {
691 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file);
693 error_report("error getting block status at sector %" PRId64 ": %s",
694 sector_num, strerror(-ret));
697 if (ret & BDRV_BLOCK_ZERO) {
701 ret = bdrv_pwrite_zeroes(child, sector_num << BDRV_SECTOR_BITS,
702 n << BDRV_SECTOR_BITS, flags);
704 error_report("error writing zeroes at sector %" PRId64 ": %s",
705 sector_num, strerror(-ret));
712 int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
716 ret = bdrv_prwv_co(child, offset, qiov, false, 0);
724 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
728 .iov_base = (void *)buf,
736 qemu_iovec_init_external(&qiov, &iov, 1);
737 return bdrv_preadv(child, offset, &qiov);
740 int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
744 ret = bdrv_prwv_co(child, offset, qiov, true, 0);
752 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
756 .iov_base = (void *) buf,
764 qemu_iovec_init_external(&qiov, &iov, 1);
765 return bdrv_pwritev(child, offset, &qiov);
769 * Writes to the file and ensures that no writes are reordered across this
770 * request (acts as a barrier)
772 * Returns 0 on success, -errno in error cases.
774 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
775 const void *buf, int count)
779 ret = bdrv_pwrite(child, offset, buf, count);
784 ret = bdrv_flush(child->bs);
792 typedef struct CoroutineIOCompletion {
793 Coroutine *coroutine;
795 } CoroutineIOCompletion;
797 static void bdrv_co_io_em_complete(void *opaque, int ret)
799 CoroutineIOCompletion *co = opaque;
802 qemu_coroutine_enter(co->coroutine, NULL);
805 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
806 uint64_t offset, uint64_t bytes,
807 QEMUIOVector *qiov, int flags)
809 BlockDriver *drv = bs->drv;
811 unsigned int nb_sectors;
813 assert(!(flags & ~BDRV_REQ_MASK));
815 if (drv->bdrv_co_preadv) {
816 return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
819 sector_num = offset >> BDRV_SECTOR_BITS;
820 nb_sectors = bytes >> BDRV_SECTOR_BITS;
822 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
823 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
824 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
826 if (drv->bdrv_co_readv) {
827 return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
830 CoroutineIOCompletion co = {
831 .coroutine = qemu_coroutine_self(),
834 acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
835 bdrv_co_io_em_complete, &co);
839 qemu_coroutine_yield();
845 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
846 uint64_t offset, uint64_t bytes,
847 QEMUIOVector *qiov, int flags)
849 BlockDriver *drv = bs->drv;
851 unsigned int nb_sectors;
854 assert(!(flags & ~BDRV_REQ_MASK));
856 if (drv->bdrv_co_pwritev) {
857 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
858 flags & bs->supported_write_flags);
859 flags &= ~bs->supported_write_flags;
863 sector_num = offset >> BDRV_SECTOR_BITS;
864 nb_sectors = bytes >> BDRV_SECTOR_BITS;
866 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
867 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
868 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
870 if (drv->bdrv_co_writev_flags) {
871 ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov,
872 flags & bs->supported_write_flags);
873 flags &= ~bs->supported_write_flags;
874 } else if (drv->bdrv_co_writev) {
875 assert(!bs->supported_write_flags);
876 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
879 CoroutineIOCompletion co = {
880 .coroutine = qemu_coroutine_self(),
883 acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
884 bdrv_co_io_em_complete, &co);
888 qemu_coroutine_yield();
894 if (ret == 0 && (flags & BDRV_REQ_FUA)) {
895 ret = bdrv_co_flush(bs);
901 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
902 int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
904 /* Perform I/O through a temporary buffer so that users who scribble over
905 * their read buffer while the operation is in progress do not end up
906 * modifying the image file. This is critical for zero-copy guest I/O
907 * where anything might happen inside guest memory.
911 BlockDriver *drv = bs->drv;
913 QEMUIOVector bounce_qiov;
914 int64_t cluster_offset;
915 unsigned int cluster_bytes;
919 /* Cover entire cluster so no additional backing file I/O is required when
920 * allocating cluster in the image file.
922 bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
924 trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
925 cluster_offset, cluster_bytes);
927 iov.iov_len = cluster_bytes;
928 iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
929 if (bounce_buffer == NULL) {
934 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
936 ret = bdrv_driver_preadv(bs, cluster_offset, cluster_bytes,
942 if (drv->bdrv_co_pwrite_zeroes &&
943 buffer_is_zero(bounce_buffer, iov.iov_len)) {
944 /* FIXME: Should we (perhaps conditionally) be setting
945 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
946 * that still correctly reads as zero? */
947 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, cluster_bytes, 0);
949 /* This does not change the data on the disk, it is not necessary
950 * to flush even in cache=writethrough mode.
952 ret = bdrv_driver_pwritev(bs, cluster_offset, cluster_bytes,
957 /* It might be okay to ignore write errors for guest requests. If this
958 * is a deliberate copy-on-read then we don't want to ignore the error.
959 * Simply report it in all cases.
964 skip_bytes = offset - cluster_offset;
965 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, bytes);
968 qemu_vfree(bounce_buffer);
973 * Forwards an already correctly aligned request to the BlockDriver. This
974 * handles copy on read and zeroing after EOF; any other features must be
975 * implemented by the caller.
977 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
978 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
979 int64_t align, QEMUIOVector *qiov, int flags)
981 int64_t total_bytes, max_bytes;
984 assert(is_power_of_2(align));
985 assert((offset & (align - 1)) == 0);
986 assert((bytes & (align - 1)) == 0);
987 assert(!qiov || bytes == qiov->size);
988 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
990 /* TODO: We would need a per-BDS .supported_read_flags and
991 * potential fallback support, if we ever implement any read flags
992 * to pass through to drivers. For now, there aren't any
993 * passthrough flags. */
994 assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ)));
996 /* Handle Copy on Read and associated serialisation */
997 if (flags & BDRV_REQ_COPY_ON_READ) {
998 /* If we touch the same cluster it counts as an overlap. This
999 * guarantees that allocating writes will be serialized and not race
1000 * with each other for the same cluster. For example, in copy-on-read
1001 * it ensures that the CoR read and write operations are atomic and
1002 * guest writes cannot interleave between them. */
1003 mark_request_serialising(req, bdrv_get_cluster_size(bs));
1006 if (!(flags & BDRV_REQ_NO_SERIALISING)) {
1007 wait_serialising_requests(req);
1010 if (flags & BDRV_REQ_COPY_ON_READ) {
1011 int64_t start_sector = offset >> BDRV_SECTOR_BITS;
1012 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1013 unsigned int nb_sectors = end_sector - start_sector;
1016 ret = bdrv_is_allocated(bs, start_sector, nb_sectors, &pnum);
1021 if (!ret || pnum != nb_sectors) {
1022 ret = bdrv_co_do_copy_on_readv(bs, offset, bytes, qiov);
1027 /* Forward the request to the BlockDriver */
1028 total_bytes = bdrv_getlength(bs);
1029 if (total_bytes < 0) {
1034 max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1035 if (bytes <= max_bytes) {
1036 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
1037 } else if (max_bytes > 0) {
1038 QEMUIOVector local_qiov;
1040 qemu_iovec_init(&local_qiov, qiov->niov);
1041 qemu_iovec_concat(&local_qiov, qiov, 0, max_bytes);
1043 ret = bdrv_driver_preadv(bs, offset, max_bytes, &local_qiov, 0);
1045 qemu_iovec_destroy(&local_qiov);
1050 /* Reading beyond end of file is supposed to produce zeroes */
1051 if (ret == 0 && total_bytes < offset + bytes) {
1052 uint64_t zero_offset = MAX(0, total_bytes - offset);
1053 uint64_t zero_bytes = offset + bytes - zero_offset;
1054 qemu_iovec_memset(qiov, zero_offset, 0, zero_bytes);
1062 * Handle a read request in coroutine context
1064 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1065 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1066 BdrvRequestFlags flags)
1068 BlockDriverState *bs = child->bs;
1069 BlockDriver *drv = bs->drv;
1070 BdrvTrackedRequest req;
1072 uint64_t align = bs->bl.request_alignment;
1073 uint8_t *head_buf = NULL;
1074 uint8_t *tail_buf = NULL;
1075 QEMUIOVector local_qiov;
1076 bool use_local_qiov = false;
1083 ret = bdrv_check_byte_request(bs, offset, bytes);
1088 /* Don't do copy-on-read if we read data before write operation */
1089 if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) {
1090 flags |= BDRV_REQ_COPY_ON_READ;
1093 /* Align read if necessary by padding qiov */
1094 if (offset & (align - 1)) {
1095 head_buf = qemu_blockalign(bs, align);
1096 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1097 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1098 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1099 use_local_qiov = true;
1101 bytes += offset & (align - 1);
1102 offset = offset & ~(align - 1);
1105 if ((offset + bytes) & (align - 1)) {
1106 if (!use_local_qiov) {
1107 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1108 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1109 use_local_qiov = true;
1111 tail_buf = qemu_blockalign(bs, align);
1112 qemu_iovec_add(&local_qiov, tail_buf,
1113 align - ((offset + bytes) & (align - 1)));
1115 bytes = ROUND_UP(bytes, align);
1118 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1119 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
1120 use_local_qiov ? &local_qiov : qiov,
1122 tracked_request_end(&req);
1124 if (use_local_qiov) {
1125 qemu_iovec_destroy(&local_qiov);
1126 qemu_vfree(head_buf);
1127 qemu_vfree(tail_buf);
1133 static int coroutine_fn bdrv_co_do_readv(BdrvChild *child,
1134 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1135 BdrvRequestFlags flags)
1137 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1141 return bdrv_co_preadv(child, sector_num << BDRV_SECTOR_BITS,
1142 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1145 int coroutine_fn bdrv_co_readv(BdrvChild *child, int64_t sector_num,
1146 int nb_sectors, QEMUIOVector *qiov)
1148 trace_bdrv_co_readv(child->bs, sector_num, nb_sectors);
1150 return bdrv_co_do_readv(child, sector_num, nb_sectors, qiov, 0);
1153 /* Maximum buffer for write zeroes fallback, in bytes */
1154 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
1156 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1157 int64_t offset, int count, BdrvRequestFlags flags)
1159 BlockDriver *drv = bs->drv;
1161 struct iovec iov = {0};
1163 bool need_flush = false;
1167 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
1168 int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1169 bs->bl.request_alignment);
1171 assert(is_power_of_2(alignment));
1172 head = offset & (alignment - 1);
1173 tail = (offset + count) & (alignment - 1);
1174 max_write_zeroes &= ~(alignment - 1);
1176 while (count > 0 && !ret) {
1179 /* Align request. Block drivers can expect the "bulk" of the request
1180 * to be aligned, and that unaligned requests do not cross cluster
1184 /* Make a small request up to the first aligned sector. */
1185 num = MIN(count, alignment - head);
1187 } else if (tail && num > alignment) {
1188 /* Shorten the request to the last aligned sector. */
1192 /* limit request size */
1193 if (num > max_write_zeroes) {
1194 num = max_write_zeroes;
1198 /* First try the efficient write zeroes operation */
1199 if (drv->bdrv_co_pwrite_zeroes) {
1200 ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1201 flags & bs->supported_zero_flags);
1202 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1203 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1207 assert(!bs->supported_zero_flags);
1210 if (ret == -ENOTSUP) {
1211 /* Fall back to bounce buffer if write zeroes is unsupported */
1212 int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1213 MAX_WRITE_ZEROES_BOUNCE_BUFFER);
1214 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1216 if ((flags & BDRV_REQ_FUA) &&
1217 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1218 /* No need for bdrv_driver_pwrite() to do a fallback
1219 * flush on each chunk; use just one at the end */
1220 write_flags &= ~BDRV_REQ_FUA;
1223 num = MIN(num, max_transfer);
1225 if (iov.iov_base == NULL) {
1226 iov.iov_base = qemu_try_blockalign(bs, num);
1227 if (iov.iov_base == NULL) {
1231 memset(iov.iov_base, 0, num);
1233 qemu_iovec_init_external(&qiov, &iov, 1);
1235 ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
1237 /* Keep bounce buffer around if it is big enough for all
1238 * all future requests.
1240 if (num < max_transfer) {
1241 qemu_vfree(iov.iov_base);
1242 iov.iov_base = NULL;
1251 if (ret == 0 && need_flush) {
1252 ret = bdrv_co_flush(bs);
1254 qemu_vfree(iov.iov_base);
1259 * Forwards an already correctly aligned write request to the BlockDriver.
1261 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
1262 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1263 int64_t align, QEMUIOVector *qiov, int flags)
1265 BlockDriver *drv = bs->drv;
1269 int64_t start_sector = offset >> BDRV_SECTOR_BITS;
1270 int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1272 assert(is_power_of_2(align));
1273 assert((offset & (align - 1)) == 0);
1274 assert((bytes & (align - 1)) == 0);
1275 assert(!qiov || bytes == qiov->size);
1276 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1277 assert(!(flags & ~BDRV_REQ_MASK));
1279 waited = wait_serialising_requests(req);
1280 assert(!waited || !req->serialising);
1281 assert(req->overlap_offset <= offset);
1282 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1284 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1286 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1287 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
1288 qemu_iovec_is_zero(qiov)) {
1289 flags |= BDRV_REQ_ZERO_WRITE;
1290 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1291 flags |= BDRV_REQ_MAY_UNMAP;
1296 /* Do nothing, write notifier decided to fail this request */
1297 } else if (flags & BDRV_REQ_ZERO_WRITE) {
1298 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1299 ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
1301 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1302 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
1304 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
1306 bdrv_set_dirty(bs, start_sector, end_sector - start_sector);
1308 if (bs->wr_highest_offset < offset + bytes) {
1309 bs->wr_highest_offset = offset + bytes;
1313 bs->total_sectors = MAX(bs->total_sectors, end_sector);
1319 static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
1322 BdrvRequestFlags flags,
1323 BdrvTrackedRequest *req)
1325 uint8_t *buf = NULL;
1326 QEMUIOVector local_qiov;
1328 uint64_t align = bs->bl.request_alignment;
1329 unsigned int head_padding_bytes, tail_padding_bytes;
1332 head_padding_bytes = offset & (align - 1);
1333 tail_padding_bytes = align - ((offset + bytes) & (align - 1));
1336 assert(flags & BDRV_REQ_ZERO_WRITE);
1337 if (head_padding_bytes || tail_padding_bytes) {
1338 buf = qemu_blockalign(bs, align);
1339 iov = (struct iovec) {
1343 qemu_iovec_init_external(&local_qiov, &iov, 1);
1345 if (head_padding_bytes) {
1346 uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1348 /* RMW the unaligned part before head. */
1349 mark_request_serialising(req, align);
1350 wait_serialising_requests(req);
1351 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1352 ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align,
1353 align, &local_qiov, 0);
1357 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1359 memset(buf + head_padding_bytes, 0, zero_bytes);
1360 ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align,
1362 flags & ~BDRV_REQ_ZERO_WRITE);
1366 offset += zero_bytes;
1367 bytes -= zero_bytes;
1370 assert(!bytes || (offset & (align - 1)) == 0);
1371 if (bytes >= align) {
1372 /* Write the aligned part in the middle. */
1373 uint64_t aligned_bytes = bytes & ~(align - 1);
1374 ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes, align,
1379 bytes -= aligned_bytes;
1380 offset += aligned_bytes;
1383 assert(!bytes || (offset & (align - 1)) == 0);
1385 assert(align == tail_padding_bytes + bytes);
1386 /* RMW the unaligned part after tail. */
1387 mark_request_serialising(req, align);
1388 wait_serialising_requests(req);
1389 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1390 ret = bdrv_aligned_preadv(bs, req, offset, align,
1391 align, &local_qiov, 0);
1395 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1397 memset(buf, 0, bytes);
1398 ret = bdrv_aligned_pwritev(bs, req, offset, align, align,
1399 &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1408 * Handle a write request in coroutine context
1410 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
1411 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1412 BdrvRequestFlags flags)
1414 BlockDriverState *bs = child->bs;
1415 BdrvTrackedRequest req;
1416 uint64_t align = bs->bl.request_alignment;
1417 uint8_t *head_buf = NULL;
1418 uint8_t *tail_buf = NULL;
1419 QEMUIOVector local_qiov;
1420 bool use_local_qiov = false;
1426 if (bs->read_only) {
1429 assert(!(bs->open_flags & BDRV_O_INACTIVE));
1431 ret = bdrv_check_byte_request(bs, offset, bytes);
1437 * Align write if necessary by performing a read-modify-write cycle.
1438 * Pad qiov with the read parts and be sure to have a tracked request not
1439 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1441 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
1444 ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req);
1448 if (offset & (align - 1)) {
1449 QEMUIOVector head_qiov;
1450 struct iovec head_iov;
1452 mark_request_serialising(&req, align);
1453 wait_serialising_requests(&req);
1455 head_buf = qemu_blockalign(bs, align);
1456 head_iov = (struct iovec) {
1457 .iov_base = head_buf,
1460 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1462 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1463 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
1464 align, &head_qiov, 0);
1468 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1470 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1471 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1472 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1473 use_local_qiov = true;
1475 bytes += offset & (align - 1);
1476 offset = offset & ~(align - 1);
1478 /* We have read the tail already if the request is smaller
1479 * than one aligned block.
1481 if (bytes < align) {
1482 qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
1487 if ((offset + bytes) & (align - 1)) {
1488 QEMUIOVector tail_qiov;
1489 struct iovec tail_iov;
1493 mark_request_serialising(&req, align);
1494 waited = wait_serialising_requests(&req);
1495 assert(!waited || !use_local_qiov);
1497 tail_buf = qemu_blockalign(bs, align);
1498 tail_iov = (struct iovec) {
1499 .iov_base = tail_buf,
1502 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1504 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1505 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
1506 align, &tail_qiov, 0);
1510 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1512 if (!use_local_qiov) {
1513 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1514 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1515 use_local_qiov = true;
1518 tail_bytes = (offset + bytes) & (align - 1);
1519 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1521 bytes = ROUND_UP(bytes, align);
1524 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, align,
1525 use_local_qiov ? &local_qiov : qiov,
1530 if (use_local_qiov) {
1531 qemu_iovec_destroy(&local_qiov);
1533 qemu_vfree(head_buf);
1534 qemu_vfree(tail_buf);
1536 tracked_request_end(&req);
1540 static int coroutine_fn bdrv_co_do_writev(BdrvChild *child,
1541 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1542 BdrvRequestFlags flags)
1544 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1548 return bdrv_co_pwritev(child, sector_num << BDRV_SECTOR_BITS,
1549 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1552 int coroutine_fn bdrv_co_writev(BdrvChild *child, int64_t sector_num,
1553 int nb_sectors, QEMUIOVector *qiov)
1555 trace_bdrv_co_writev(child->bs, sector_num, nb_sectors);
1557 return bdrv_co_do_writev(child, sector_num, nb_sectors, qiov, 0);
1560 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
1561 int count, BdrvRequestFlags flags)
1563 trace_bdrv_co_pwrite_zeroes(child->bs, offset, count, flags);
1565 if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
1566 flags &= ~BDRV_REQ_MAY_UNMAP;
1569 return bdrv_co_pwritev(child, offset, count, NULL,
1570 BDRV_REQ_ZERO_WRITE | flags);
1573 typedef struct BdrvCoGetBlockStatusData {
1574 BlockDriverState *bs;
1575 BlockDriverState *base;
1576 BlockDriverState **file;
1582 } BdrvCoGetBlockStatusData;
1585 * Returns the allocation status of the specified sectors.
1586 * Drivers not implementing the functionality are assumed to not support
1587 * backing files, hence all their sectors are reported as allocated.
1589 * If 'sector_num' is beyond the end of the disk image the return value is 0
1590 * and 'pnum' is set to 0.
1592 * 'pnum' is set to the number of sectors (including and immediately following
1593 * the specified sector) that are known to be in the same
1594 * allocated/unallocated state.
1596 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1597 * beyond the end of the disk image it will be clamped.
1599 * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1600 * points to the BDS which the sector range is allocated in.
1602 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
1604 int nb_sectors, int *pnum,
1605 BlockDriverState **file)
1607 int64_t total_sectors;
1611 total_sectors = bdrv_nb_sectors(bs);
1612 if (total_sectors < 0) {
1613 return total_sectors;
1616 if (sector_num >= total_sectors) {
1621 n = total_sectors - sector_num;
1622 if (n < nb_sectors) {
1626 if (!bs->drv->bdrv_co_get_block_status) {
1628 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
1629 if (bs->drv->protocol_name) {
1630 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
1636 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
1643 if (ret & BDRV_BLOCK_RAW) {
1644 assert(ret & BDRV_BLOCK_OFFSET_VALID);
1645 return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
1649 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
1650 ret |= BDRV_BLOCK_ALLOCATED;
1652 if (bdrv_unallocated_blocks_are_zero(bs)) {
1653 ret |= BDRV_BLOCK_ZERO;
1654 } else if (bs->backing) {
1655 BlockDriverState *bs2 = bs->backing->bs;
1656 int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
1657 if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
1658 ret |= BDRV_BLOCK_ZERO;
1663 if (*file && *file != bs &&
1664 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
1665 (ret & BDRV_BLOCK_OFFSET_VALID)) {
1666 BlockDriverState *file2;
1669 ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
1670 *pnum, &file_pnum, &file2);
1672 /* Ignore errors. This is just providing extra information, it
1673 * is useful but not necessary.
1676 /* !file_pnum indicates an offset at or beyond the EOF; it is
1677 * perfectly valid for the format block driver to point to such
1678 * offsets, so catch it and mark everything as zero */
1679 ret |= BDRV_BLOCK_ZERO;
1681 /* Limit request to the range reported by the protocol driver */
1683 ret |= (ret2 & BDRV_BLOCK_ZERO);
1691 static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
1692 BlockDriverState *base,
1696 BlockDriverState **file)
1698 BlockDriverState *p;
1702 for (p = bs; p != base; p = backing_bs(p)) {
1703 ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file);
1704 if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) {
1707 /* [sector_num, pnum] unallocated on this layer, which could be only
1708 * the first part of [sector_num, nb_sectors]. */
1709 nb_sectors = MIN(nb_sectors, *pnum);
1714 /* Coroutine wrapper for bdrv_get_block_status_above() */
1715 static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
1717 BdrvCoGetBlockStatusData *data = opaque;
1719 data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
1728 * Synchronous wrapper around bdrv_co_get_block_status_above().
1730 * See bdrv_co_get_block_status_above() for details.
1732 int64_t bdrv_get_block_status_above(BlockDriverState *bs,
1733 BlockDriverState *base,
1735 int nb_sectors, int *pnum,
1736 BlockDriverState **file)
1739 BdrvCoGetBlockStatusData data = {
1743 .sector_num = sector_num,
1744 .nb_sectors = nb_sectors,
1749 if (qemu_in_coroutine()) {
1750 /* Fast-path if already in coroutine context */
1751 bdrv_get_block_status_above_co_entry(&data);
1753 AioContext *aio_context = bdrv_get_aio_context(bs);
1755 co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry);
1756 qemu_coroutine_enter(co, &data);
1757 while (!data.done) {
1758 aio_poll(aio_context, true);
1764 int64_t bdrv_get_block_status(BlockDriverState *bs,
1766 int nb_sectors, int *pnum,
1767 BlockDriverState **file)
1769 return bdrv_get_block_status_above(bs, backing_bs(bs),
1770 sector_num, nb_sectors, pnum, file);
1773 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
1774 int nb_sectors, int *pnum)
1776 BlockDriverState *file;
1777 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum,
1782 return !!(ret & BDRV_BLOCK_ALLOCATED);
1786 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1788 * Return true if the given sector is allocated in any image between
1789 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1790 * sector is allocated in any image of the chain. Return false otherwise.
1792 * 'pnum' is set to the number of sectors (including and immediately following
1793 * the specified sector) that are known to be in the same
1794 * allocated/unallocated state.
1797 int bdrv_is_allocated_above(BlockDriverState *top,
1798 BlockDriverState *base,
1800 int nb_sectors, int *pnum)
1802 BlockDriverState *intermediate;
1803 int ret, n = nb_sectors;
1806 while (intermediate && intermediate != base) {
1808 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
1818 * [sector_num, nb_sectors] is unallocated on top but intermediate
1821 * [sector_num+x, nr_sectors] allocated.
1823 if (n > pnum_inter &&
1824 (intermediate == top ||
1825 sector_num + pnum_inter < intermediate->total_sectors)) {
1829 intermediate = backing_bs(intermediate);
1836 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
1837 const uint8_t *buf, int nb_sectors)
1839 BlockDriver *drv = bs->drv;
1845 if (!drv->bdrv_write_compressed) {
1848 ret = bdrv_check_request(bs, sector_num, nb_sectors);
1853 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
1855 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
1858 typedef struct BdrvVmstateCo {
1859 BlockDriverState *bs;
1866 static int coroutine_fn
1867 bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
1870 BlockDriver *drv = bs->drv;
1874 } else if (drv->bdrv_load_vmstate) {
1875 return is_read ? drv->bdrv_load_vmstate(bs, qiov, pos)
1876 : drv->bdrv_save_vmstate(bs, qiov, pos);
1877 } else if (bs->file) {
1878 return bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
1884 static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
1886 BdrvVmstateCo *co = opaque;
1887 co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
1891 bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
1894 if (qemu_in_coroutine()) {
1895 return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
1897 BdrvVmstateCo data = {
1902 .ret = -EINPROGRESS,
1904 Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry);
1906 qemu_coroutine_enter(co, &data);
1907 while (data.ret == -EINPROGRESS) {
1908 aio_poll(bdrv_get_aio_context(bs), true);
1914 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
1915 int64_t pos, int size)
1918 struct iovec iov = {
1919 .iov_base = (void *) buf,
1924 qemu_iovec_init_external(&qiov, &iov, 1);
1926 ret = bdrv_writev_vmstate(bs, &qiov, pos);
1934 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
1936 return bdrv_rw_vmstate(bs, qiov, pos, false);
1939 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
1940 int64_t pos, int size)
1943 struct iovec iov = {
1949 qemu_iovec_init_external(&qiov, &iov, 1);
1950 ret = bdrv_readv_vmstate(bs, &qiov, pos);
1958 int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
1960 return bdrv_rw_vmstate(bs, qiov, pos, true);
1963 /**************************************************************/
1966 BlockAIOCB *bdrv_aio_readv(BdrvChild *child, int64_t sector_num,
1967 QEMUIOVector *qiov, int nb_sectors,
1968 BlockCompletionFunc *cb, void *opaque)
1970 trace_bdrv_aio_readv(child->bs, sector_num, nb_sectors, opaque);
1972 return bdrv_co_aio_rw_vector(child, sector_num, qiov, nb_sectors, 0,
1976 BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
1977 QEMUIOVector *qiov, int nb_sectors,
1978 BlockCompletionFunc *cb, void *opaque)
1980 trace_bdrv_aio_writev(child->bs, sector_num, nb_sectors, opaque);
1982 return bdrv_co_aio_rw_vector(child, sector_num, qiov, nb_sectors, 0,
1986 void bdrv_aio_cancel(BlockAIOCB *acb)
1989 bdrv_aio_cancel_async(acb);
1990 while (acb->refcnt > 1) {
1991 if (acb->aiocb_info->get_aio_context) {
1992 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
1993 } else if (acb->bs) {
1994 aio_poll(bdrv_get_aio_context(acb->bs), true);
1999 qemu_aio_unref(acb);
2002 /* Async version of aio cancel. The caller is not blocked if the acb implements
2003 * cancel_async, otherwise we do nothing and let the request normally complete.
2004 * In either case the completion callback must be called. */
2005 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2007 if (acb->aiocb_info->cancel_async) {
2008 acb->aiocb_info->cancel_async(acb);
2012 /**************************************************************/
2013 /* async block device emulation */
2015 typedef struct BlockRequest {
2017 /* Used during read, write, trim */
2024 /* Used during ioctl */
2030 BlockCompletionFunc *cb;
2036 typedef struct BlockAIOCBCoroutine {
2044 } BlockAIOCBCoroutine;
2046 static const AIOCBInfo bdrv_em_co_aiocb_info = {
2047 .aiocb_size = sizeof(BlockAIOCBCoroutine),
2050 static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
2052 if (!acb->need_bh) {
2053 acb->common.cb(acb->common.opaque, acb->req.error);
2054 qemu_aio_unref(acb);
2058 static void bdrv_co_em_bh(void *opaque)
2060 BlockAIOCBCoroutine *acb = opaque;
2062 assert(!acb->need_bh);
2063 qemu_bh_delete(acb->bh);
2064 bdrv_co_complete(acb);
2067 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
2069 acb->need_bh = false;
2070 if (acb->req.error != -EINPROGRESS) {
2071 BlockDriverState *bs = acb->common.bs;
2073 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
2074 qemu_bh_schedule(acb->bh);
2078 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2079 static void coroutine_fn bdrv_co_do_rw(void *opaque)
2081 BlockAIOCBCoroutine *acb = opaque;
2083 if (!acb->is_write) {
2084 acb->req.error = bdrv_co_do_readv(acb->child, acb->req.sector,
2085 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
2087 acb->req.error = bdrv_co_do_writev(acb->child, acb->req.sector,
2088 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
2091 bdrv_co_complete(acb);
2094 static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child,
2098 BdrvRequestFlags flags,
2099 BlockCompletionFunc *cb,
2104 BlockAIOCBCoroutine *acb;
2106 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, child->bs, cb, opaque);
2108 acb->need_bh = true;
2109 acb->req.error = -EINPROGRESS;
2110 acb->req.sector = sector_num;
2111 acb->req.nb_sectors = nb_sectors;
2112 acb->req.qiov = qiov;
2113 acb->req.flags = flags;
2114 acb->is_write = is_write;
2116 co = qemu_coroutine_create(bdrv_co_do_rw);
2117 qemu_coroutine_enter(co, acb);
2119 bdrv_co_maybe_schedule_bh(acb);
2120 return &acb->common;
2123 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
2125 BlockAIOCBCoroutine *acb = opaque;
2126 BlockDriverState *bs = acb->common.bs;
2128 acb->req.error = bdrv_co_flush(bs);
2129 bdrv_co_complete(acb);
2132 BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
2133 BlockCompletionFunc *cb, void *opaque)
2135 trace_bdrv_aio_flush(bs, opaque);
2138 BlockAIOCBCoroutine *acb;
2140 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2141 acb->need_bh = true;
2142 acb->req.error = -EINPROGRESS;
2144 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
2145 qemu_coroutine_enter(co, acb);
2147 bdrv_co_maybe_schedule_bh(acb);
2148 return &acb->common;
2151 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
2153 BlockAIOCBCoroutine *acb = opaque;
2154 BlockDriverState *bs = acb->common.bs;
2156 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
2157 bdrv_co_complete(acb);
2160 BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
2161 int64_t sector_num, int nb_sectors,
2162 BlockCompletionFunc *cb, void *opaque)
2165 BlockAIOCBCoroutine *acb;
2167 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
2169 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2170 acb->need_bh = true;
2171 acb->req.error = -EINPROGRESS;
2172 acb->req.sector = sector_num;
2173 acb->req.nb_sectors = nb_sectors;
2174 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
2175 qemu_coroutine_enter(co, acb);
2177 bdrv_co_maybe_schedule_bh(acb);
2178 return &acb->common;
2181 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
2182 BlockCompletionFunc *cb, void *opaque)
2186 acb = g_malloc(aiocb_info->aiocb_size);
2187 acb->aiocb_info = aiocb_info;
2190 acb->opaque = opaque;
2195 void qemu_aio_ref(void *p)
2197 BlockAIOCB *acb = p;
2201 void qemu_aio_unref(void *p)
2203 BlockAIOCB *acb = p;
2204 assert(acb->refcnt > 0);
2205 if (--acb->refcnt == 0) {
2210 /**************************************************************/
2211 /* Coroutine block device emulation */
2213 typedef struct FlushCo {
2214 BlockDriverState *bs;
2219 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2221 FlushCo *rwco = opaque;
2223 rwco->ret = bdrv_co_flush(rwco->bs);
2226 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2229 BdrvTrackedRequest req;
2231 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2236 tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH);
2238 /* Write back all layers by calling one driver function */
2239 if (bs->drv->bdrv_co_flush) {
2240 ret = bs->drv->bdrv_co_flush(bs);
2244 /* Write back cached data to the OS even with cache=unsafe */
2245 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2246 if (bs->drv->bdrv_co_flush_to_os) {
2247 ret = bs->drv->bdrv_co_flush_to_os(bs);
2253 /* But don't actually force it to the disk with cache=unsafe */
2254 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2258 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2259 if (bs->drv->bdrv_co_flush_to_disk) {
2260 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2261 } else if (bs->drv->bdrv_aio_flush) {
2263 CoroutineIOCompletion co = {
2264 .coroutine = qemu_coroutine_self(),
2267 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2271 qemu_coroutine_yield();
2276 * Some block drivers always operate in either writethrough or unsafe
2277 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2278 * know how the server works (because the behaviour is hardcoded or
2279 * depends on server-side configuration), so we can't ensure that
2280 * everything is safe on disk. Returning an error doesn't work because
2281 * that would break guests even if the server operates in writethrough
2284 * Let's hope the user knows what he's doing.
2292 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2293 * in the case of cache=unsafe, so there are no useless flushes.
2296 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2298 tracked_request_end(&req);
2302 int bdrv_flush(BlockDriverState *bs)
2305 FlushCo flush_co = {
2310 if (qemu_in_coroutine()) {
2311 /* Fast-path if already in coroutine context */
2312 bdrv_flush_co_entry(&flush_co);
2314 AioContext *aio_context = bdrv_get_aio_context(bs);
2316 co = qemu_coroutine_create(bdrv_flush_co_entry);
2317 qemu_coroutine_enter(co, &flush_co);
2318 while (flush_co.ret == NOT_DONE) {
2319 aio_poll(aio_context, true);
2323 return flush_co.ret;
2326 typedef struct DiscardCo {
2327 BlockDriverState *bs;
2332 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
2334 DiscardCo *rwco = opaque;
2336 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
2339 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
2342 BdrvTrackedRequest req;
2343 int max_discard, ret;
2349 ret = bdrv_check_request(bs, sector_num, nb_sectors);
2352 } else if (bs->read_only) {
2355 assert(!(bs->open_flags & BDRV_O_INACTIVE));
2357 /* Do nothing if disabled. */
2358 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2362 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
2366 tracked_request_begin(&req, bs, sector_num << BDRV_SECTOR_BITS,
2367 nb_sectors << BDRV_SECTOR_BITS, BDRV_TRACKED_DISCARD);
2369 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
2374 max_discard = MIN_NON_ZERO(bs->bl.max_pdiscard >> BDRV_SECTOR_BITS,
2375 BDRV_REQUEST_MAX_SECTORS);
2376 while (nb_sectors > 0) {
2378 int num = nb_sectors;
2379 int discard_alignment = bs->bl.pdiscard_alignment >> BDRV_SECTOR_BITS;
2382 if (discard_alignment &&
2383 num >= discard_alignment &&
2384 sector_num % discard_alignment) {
2385 if (num > discard_alignment) {
2386 num = discard_alignment;
2388 num -= sector_num % discard_alignment;
2391 /* limit request size */
2392 if (num > max_discard) {
2396 if (bs->drv->bdrv_co_discard) {
2397 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
2400 CoroutineIOCompletion co = {
2401 .coroutine = qemu_coroutine_self(),
2404 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
2405 bdrv_co_io_em_complete, &co);
2410 qemu_coroutine_yield();
2414 if (ret && ret != -ENOTSUP) {
2423 bdrv_set_dirty(bs, req.offset >> BDRV_SECTOR_BITS,
2424 req.bytes >> BDRV_SECTOR_BITS);
2425 tracked_request_end(&req);
2429 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
2434 .sector_num = sector_num,
2435 .nb_sectors = nb_sectors,
2439 if (qemu_in_coroutine()) {
2440 /* Fast-path if already in coroutine context */
2441 bdrv_discard_co_entry(&rwco);
2443 AioContext *aio_context = bdrv_get_aio_context(bs);
2445 co = qemu_coroutine_create(bdrv_discard_co_entry);
2446 qemu_coroutine_enter(co, &rwco);
2447 while (rwco.ret == NOT_DONE) {
2448 aio_poll(aio_context, true);
2455 static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf)
2457 BlockDriver *drv = bs->drv;
2458 BdrvTrackedRequest tracked_req;
2459 CoroutineIOCompletion co = {
2460 .coroutine = qemu_coroutine_self(),
2464 tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL);
2465 if (!drv || !drv->bdrv_aio_ioctl) {
2470 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2475 qemu_coroutine_yield();
2477 tracked_request_end(&tracked_req);
2482 BlockDriverState *bs;
2488 static void coroutine_fn bdrv_co_ioctl_entry(void *opaque)
2490 BdrvIoctlCoData *data = opaque;
2491 data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf);
2494 /* needed for generic scsi interface */
2495 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
2497 BdrvIoctlCoData data = {
2501 .ret = -EINPROGRESS,
2504 if (qemu_in_coroutine()) {
2505 /* Fast-path if already in coroutine context */
2506 bdrv_co_ioctl_entry(&data);
2508 Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry);
2510 qemu_coroutine_enter(co, &data);
2511 while (data.ret == -EINPROGRESS) {
2512 aio_poll(bdrv_get_aio_context(bs), true);
2518 static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque)
2520 BlockAIOCBCoroutine *acb = opaque;
2521 acb->req.error = bdrv_co_do_ioctl(acb->common.bs,
2522 acb->req.req, acb->req.buf);
2523 bdrv_co_complete(acb);
2526 BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
2527 unsigned long int req, void *buf,
2528 BlockCompletionFunc *cb, void *opaque)
2530 BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info,
2534 acb->need_bh = true;
2535 acb->req.error = -EINPROGRESS;
2538 co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry);
2539 qemu_coroutine_enter(co, acb);
2541 bdrv_co_maybe_schedule_bh(acb);
2542 return &acb->common;
2545 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2547 return qemu_memalign(bdrv_opt_mem_align(bs), size);
2550 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2552 return memset(qemu_blockalign(bs, size), 0, size);
2555 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2557 size_t align = bdrv_opt_mem_align(bs);
2559 /* Ensure that NULL is never returned on success */
2565 return qemu_try_memalign(align, size);
2568 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2570 void *mem = qemu_try_blockalign(bs, size);
2573 memset(mem, 0, size);
2580 * Check if all memory in this vector is sector aligned.
2582 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2585 size_t alignment = bdrv_min_mem_align(bs);
2587 for (i = 0; i < qiov->niov; i++) {
2588 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2591 if (qiov->iov[i].iov_len % alignment) {
2599 void bdrv_add_before_write_notifier(BlockDriverState *bs,
2600 NotifierWithReturn *notifier)
2602 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2605 void bdrv_io_plug(BlockDriverState *bs)
2609 QLIST_FOREACH(child, &bs->children, next) {
2610 bdrv_io_plug(child->bs);
2613 if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) {
2614 BlockDriver *drv = bs->drv;
2615 if (drv && drv->bdrv_io_plug) {
2616 drv->bdrv_io_plug(bs);
2621 void bdrv_io_unplug(BlockDriverState *bs)
2625 assert(bs->io_plugged);
2626 if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) {
2627 BlockDriver *drv = bs->drv;
2628 if (drv && drv->bdrv_io_unplug) {
2629 drv->bdrv_io_unplug(bs);
2633 QLIST_FOREACH(child, &bs->children, next) {
2634 bdrv_io_unplug(child->bs);
2638 void bdrv_io_unplugged_begin(BlockDriverState *bs)
2642 if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) {
2643 BlockDriver *drv = bs->drv;
2644 if (drv && drv->bdrv_io_unplug) {
2645 drv->bdrv_io_unplug(bs);
2649 QLIST_FOREACH(child, &bs->children, next) {
2650 bdrv_io_unplugged_begin(child->bs);
2654 void bdrv_io_unplugged_end(BlockDriverState *bs)
2658 assert(bs->io_plug_disabled);
2659 QLIST_FOREACH(child, &bs->children, next) {
2660 bdrv_io_unplugged_end(child->bs);
2663 if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) {
2664 BlockDriver *drv = bs->drv;
2665 if (drv && drv->bdrv_io_plug) {
2666 drv->bdrv_io_plug(bs);