2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "sysemu/block-backend.h"
27 #include "block/blockjob.h"
28 #include "block/block_int.h"
29 #include "block/throttle-groups.h"
30 #include "qemu/error-report.h"
32 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
34 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
35 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
36 BlockCompletionFunc *cb, void *opaque);
37 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
38 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
39 BlockCompletionFunc *cb, void *opaque);
40 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
41 int64_t sector_num, int nb_sectors,
43 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
44 int64_t sector_num, int nb_sectors,
46 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
47 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
48 BdrvRequestFlags flags);
49 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
50 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
51 BdrvRequestFlags flags);
52 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
56 BdrvRequestFlags flags,
57 BlockCompletionFunc *cb,
60 static void coroutine_fn bdrv_co_do_rw(void *opaque);
61 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
62 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
64 /* throttling disk I/O limits */
65 void bdrv_set_io_limits(BlockDriverState *bs,
70 throttle_group_config(bs, cfg);
72 for (i = 0; i < 2; i++) {
73 qemu_co_enter_next(&bs->throttled_reqs[i]);
77 /* this function drain all the throttled IOs */
78 static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
81 bool enabled = bs->io_limits_enabled;
84 bs->io_limits_enabled = false;
86 for (i = 0; i < 2; i++) {
87 while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
92 bs->io_limits_enabled = enabled;
97 void bdrv_io_limits_disable(BlockDriverState *bs)
99 bs->io_limits_enabled = false;
100 bdrv_start_throttled_reqs(bs);
101 throttle_group_unregister_bs(bs);
104 /* should be called before bdrv_set_io_limits if a limit is set */
105 void bdrv_io_limits_enable(BlockDriverState *bs, const char *group)
107 assert(!bs->io_limits_enabled);
108 throttle_group_register_bs(bs, group);
109 bs->io_limits_enabled = true;
112 void bdrv_io_limits_update_group(BlockDriverState *bs, const char *group)
114 /* this bs is not part of any group */
115 if (!bs->throttle_state) {
119 /* this bs is a part of the same group than the one we want */
120 if (!g_strcmp0(throttle_group_get_name(bs), group)) {
124 /* need to change the group this bs belong to */
125 bdrv_io_limits_disable(bs);
126 bdrv_io_limits_enable(bs, group);
129 void bdrv_setup_io_funcs(BlockDriver *bdrv)
131 /* Block drivers without coroutine functions need emulation */
132 if (!bdrv->bdrv_co_readv) {
133 bdrv->bdrv_co_readv = bdrv_co_readv_em;
134 bdrv->bdrv_co_writev = bdrv_co_writev_em;
136 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
137 * the block driver lacks aio we need to emulate that too.
139 if (!bdrv->bdrv_aio_readv) {
140 /* add AIO emulation layer */
141 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
142 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
147 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
149 BlockDriver *drv = bs->drv;
150 Error *local_err = NULL;
152 memset(&bs->bl, 0, sizeof(bs->bl));
158 /* Take some limits from the children as a default */
160 bdrv_refresh_limits(bs->file->bs, &local_err);
162 error_propagate(errp, local_err);
165 bs->bl.opt_transfer_length = bs->file->bs->bl.opt_transfer_length;
166 bs->bl.max_transfer_length = bs->file->bs->bl.max_transfer_length;
167 bs->bl.min_mem_alignment = bs->file->bs->bl.min_mem_alignment;
168 bs->bl.opt_mem_alignment = bs->file->bs->bl.opt_mem_alignment;
170 bs->bl.min_mem_alignment = 512;
171 bs->bl.opt_mem_alignment = getpagesize();
175 bdrv_refresh_limits(bs->backing->bs, &local_err);
177 error_propagate(errp, local_err);
180 bs->bl.opt_transfer_length =
181 MAX(bs->bl.opt_transfer_length,
182 bs->backing->bs->bl.opt_transfer_length);
183 bs->bl.max_transfer_length =
184 MIN_NON_ZERO(bs->bl.max_transfer_length,
185 bs->backing->bs->bl.max_transfer_length);
186 bs->bl.opt_mem_alignment =
187 MAX(bs->bl.opt_mem_alignment,
188 bs->backing->bs->bl.opt_mem_alignment);
189 bs->bl.min_mem_alignment =
190 MAX(bs->bl.min_mem_alignment,
191 bs->backing->bs->bl.min_mem_alignment);
194 /* Then let the driver override it */
195 if (drv->bdrv_refresh_limits) {
196 drv->bdrv_refresh_limits(bs, errp);
201 * The copy-on-read flag is actually a reference count so multiple users may
202 * use the feature without worrying about clobbering its previous state.
203 * Copy-on-read stays enabled until all users have called to disable it.
205 void bdrv_enable_copy_on_read(BlockDriverState *bs)
210 void bdrv_disable_copy_on_read(BlockDriverState *bs)
212 assert(bs->copy_on_read > 0);
216 /* Check if any requests are in-flight (including throttled requests) */
217 bool bdrv_requests_pending(BlockDriverState *bs)
221 if (!QLIST_EMPTY(&bs->tracked_requests)) {
224 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
227 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
231 QLIST_FOREACH(child, &bs->children, next) {
232 if (bdrv_requests_pending(child->bs)) {
240 static void bdrv_drain_recurse(BlockDriverState *bs)
244 if (bs->drv && bs->drv->bdrv_drain) {
245 bs->drv->bdrv_drain(bs);
247 QLIST_FOREACH(child, &bs->children, next) {
248 bdrv_drain_recurse(child->bs);
253 * Wait for pending requests to complete on a single BlockDriverState subtree,
254 * and suspend block driver's internal I/O until next request arrives.
256 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
259 * Only this BlockDriverState's AioContext is run, so in-flight requests must
260 * not depend on events in other AioContexts. In that case, use
261 * bdrv_drain_all() instead.
263 void bdrv_drain(BlockDriverState *bs)
267 bdrv_drain_recurse(bs);
270 bdrv_flush_io_queue(bs);
271 busy = bdrv_requests_pending(bs);
272 busy |= aio_poll(bdrv_get_aio_context(bs), busy);
277 * Wait for pending requests to complete across all BlockDriverStates
279 * This function does not flush data to disk, use bdrv_flush_all() for that
280 * after calling this function.
282 void bdrv_drain_all(void)
284 /* Always run first iteration so any pending completion BHs run */
286 BlockDriverState *bs = NULL;
287 GSList *aio_ctxs = NULL, *ctx;
289 while ((bs = bdrv_next(bs))) {
290 AioContext *aio_context = bdrv_get_aio_context(bs);
292 aio_context_acquire(aio_context);
294 block_job_pause(bs->job);
296 aio_context_release(aio_context);
298 if (!g_slist_find(aio_ctxs, aio_context)) {
299 aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
303 /* Note that completion of an asynchronous I/O operation can trigger any
304 * number of other I/O operations on other devices---for example a
305 * coroutine can submit an I/O request to another device in response to
306 * request completion. Therefore we must keep looping until there was no
307 * more activity rather than simply draining each device independently.
312 for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
313 AioContext *aio_context = ctx->data;
316 aio_context_acquire(aio_context);
317 while ((bs = bdrv_next(bs))) {
318 if (aio_context == bdrv_get_aio_context(bs)) {
319 bdrv_flush_io_queue(bs);
320 if (bdrv_requests_pending(bs)) {
322 aio_poll(aio_context, busy);
326 busy |= aio_poll(aio_context, false);
327 aio_context_release(aio_context);
332 while ((bs = bdrv_next(bs))) {
333 AioContext *aio_context = bdrv_get_aio_context(bs);
335 aio_context_acquire(aio_context);
337 block_job_resume(bs->job);
339 aio_context_release(aio_context);
341 g_slist_free(aio_ctxs);
345 * Remove an active request from the tracked requests list
347 * This function should be called when a tracked request is completing.
349 static void tracked_request_end(BdrvTrackedRequest *req)
351 if (req->serialising) {
352 req->bs->serialising_in_flight--;
355 QLIST_REMOVE(req, list);
356 qemu_co_queue_restart_all(&req->wait_queue);
360 * Add an active request to the tracked requests list
362 static void tracked_request_begin(BdrvTrackedRequest *req,
363 BlockDriverState *bs,
366 enum BdrvTrackedRequestType type)
368 *req = (BdrvTrackedRequest){
373 .co = qemu_coroutine_self(),
374 .serialising = false,
375 .overlap_offset = offset,
376 .overlap_bytes = bytes,
379 qemu_co_queue_init(&req->wait_queue);
381 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
384 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
386 int64_t overlap_offset = req->offset & ~(align - 1);
387 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
390 if (!req->serialising) {
391 req->bs->serialising_in_flight++;
392 req->serialising = true;
395 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
396 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
400 * Round a region to cluster boundaries
402 void bdrv_round_to_clusters(BlockDriverState *bs,
403 int64_t sector_num, int nb_sectors,
404 int64_t *cluster_sector_num,
405 int *cluster_nb_sectors)
409 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
410 *cluster_sector_num = sector_num;
411 *cluster_nb_sectors = nb_sectors;
413 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
414 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
415 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
420 static int bdrv_get_cluster_size(BlockDriverState *bs)
425 ret = bdrv_get_info(bs, &bdi);
426 if (ret < 0 || bdi.cluster_size == 0) {
427 return bs->request_alignment;
429 return bdi.cluster_size;
433 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
434 int64_t offset, unsigned int bytes)
437 if (offset >= req->overlap_offset + req->overlap_bytes) {
441 if (req->overlap_offset >= offset + bytes) {
447 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
449 BlockDriverState *bs = self->bs;
450 BdrvTrackedRequest *req;
454 if (!bs->serialising_in_flight) {
460 QLIST_FOREACH(req, &bs->tracked_requests, list) {
461 if (req == self || (!req->serialising && !self->serialising)) {
464 if (tracked_request_overlaps(req, self->overlap_offset,
465 self->overlap_bytes))
467 /* Hitting this means there was a reentrant request, for
468 * example, a block driver issuing nested requests. This must
469 * never happen since it means deadlock.
471 assert(qemu_coroutine_self() != req->co);
473 /* If the request is already (indirectly) waiting for us, or
474 * will wait for us as soon as it wakes up, then just go on
475 * (instead of producing a deadlock in the former case). */
476 if (!req->waiting_for) {
477 self->waiting_for = req;
478 qemu_co_queue_wait(&req->wait_queue);
479 self->waiting_for = NULL;
491 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
494 if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
498 if (!bdrv_is_inserted(bs)) {
509 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
512 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
516 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
517 nb_sectors * BDRV_SECTOR_SIZE);
520 typedef struct RwCo {
521 BlockDriverState *bs;
526 BdrvRequestFlags flags;
529 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
533 if (!rwco->is_write) {
534 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
535 rwco->qiov->size, rwco->qiov,
538 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
539 rwco->qiov->size, rwco->qiov,
545 * Process a vectored synchronous request using coroutines
547 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
548 QEMUIOVector *qiov, bool is_write,
549 BdrvRequestFlags flags)
556 .is_write = is_write,
562 * In sync call context, when the vcpu is blocked, this throttling timer
563 * will not fire; so the I/O throttling function has to be disabled here
564 * if it has been enabled.
566 if (bs->io_limits_enabled) {
567 fprintf(stderr, "Disabling I/O throttling on '%s' due "
568 "to synchronous I/O.\n", bdrv_get_device_name(bs));
569 bdrv_io_limits_disable(bs);
572 if (qemu_in_coroutine()) {
573 /* Fast-path if already in coroutine context */
574 bdrv_rw_co_entry(&rwco);
576 AioContext *aio_context = bdrv_get_aio_context(bs);
578 co = qemu_coroutine_create(bdrv_rw_co_entry);
579 qemu_coroutine_enter(co, &rwco);
580 while (rwco.ret == NOT_DONE) {
581 aio_poll(aio_context, true);
588 * Process a synchronous request using coroutines
590 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
591 int nb_sectors, bool is_write, BdrvRequestFlags flags)
595 .iov_base = (void *)buf,
596 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
599 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
603 qemu_iovec_init_external(&qiov, &iov, 1);
604 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
605 &qiov, is_write, flags);
608 /* return < 0 if error. See bdrv_write() for the return codes */
609 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
610 uint8_t *buf, int nb_sectors)
612 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
615 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
616 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
617 uint8_t *buf, int nb_sectors)
622 enabled = bs->io_limits_enabled;
623 bs->io_limits_enabled = false;
624 ret = bdrv_read(bs, sector_num, buf, nb_sectors);
625 bs->io_limits_enabled = enabled;
629 /* Return < 0 if error. Important errors are:
630 -EIO generic I/O error (may happen for all errors)
631 -ENOMEDIUM No media inserted.
632 -EINVAL Invalid sector number or nb_sectors
633 -EACCES Trying to write a read-only device
635 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
636 const uint8_t *buf, int nb_sectors)
638 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
641 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
642 int nb_sectors, BdrvRequestFlags flags)
644 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
645 BDRV_REQ_ZERO_WRITE | flags);
649 * Completely zero out a block device with the help of bdrv_write_zeroes.
650 * The operation is sped up by checking the block status and only writing
651 * zeroes to the device if they currently do not return zeroes. Optional
652 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
654 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
656 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
658 int64_t target_sectors, ret, nb_sectors, sector_num = 0;
661 target_sectors = bdrv_nb_sectors(bs);
662 if (target_sectors < 0) {
663 return target_sectors;
667 nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
668 if (nb_sectors <= 0) {
671 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
673 error_report("error getting block status at sector %" PRId64 ": %s",
674 sector_num, strerror(-ret));
677 if (ret & BDRV_BLOCK_ZERO) {
681 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
683 error_report("error writing zeroes at sector %" PRId64 ": %s",
684 sector_num, strerror(-ret));
691 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
695 .iov_base = (void *)buf,
704 qemu_iovec_init_external(&qiov, &iov, 1);
705 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
713 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
717 ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
725 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
726 const void *buf, int bytes)
730 .iov_base = (void *) buf,
738 qemu_iovec_init_external(&qiov, &iov, 1);
739 return bdrv_pwritev(bs, offset, &qiov);
743 * Writes to the file and ensures that no writes are reordered across this
744 * request (acts as a barrier)
746 * Returns 0 on success, -errno in error cases.
748 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
749 const void *buf, int count)
753 ret = bdrv_pwrite(bs, offset, buf, count);
758 /* No flush needed for cache modes that already do it */
759 if (bs->enable_write_cache) {
766 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
767 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
769 /* Perform I/O through a temporary buffer so that users who scribble over
770 * their read buffer while the operation is in progress do not end up
771 * modifying the image file. This is critical for zero-copy guest I/O
772 * where anything might happen inside guest memory.
776 BlockDriver *drv = bs->drv;
778 QEMUIOVector bounce_qiov;
779 int64_t cluster_sector_num;
780 int cluster_nb_sectors;
784 /* Cover entire cluster so no additional backing file I/O is required when
785 * allocating cluster in the image file.
787 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
788 &cluster_sector_num, &cluster_nb_sectors);
790 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
791 cluster_sector_num, cluster_nb_sectors);
793 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
794 iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
795 if (bounce_buffer == NULL) {
800 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
802 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
808 if (drv->bdrv_co_write_zeroes &&
809 buffer_is_zero(bounce_buffer, iov.iov_len)) {
810 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
811 cluster_nb_sectors, 0);
813 /* This does not change the data on the disk, it is not necessary
814 * to flush even in cache=writethrough mode.
816 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
821 /* It might be okay to ignore write errors for guest requests. If this
822 * is a deliberate copy-on-read then we don't want to ignore the error.
823 * Simply report it in all cases.
828 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
829 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
830 nb_sectors * BDRV_SECTOR_SIZE);
833 qemu_vfree(bounce_buffer);
838 * Forwards an already correctly aligned request to the BlockDriver. This
839 * handles copy on read and zeroing after EOF; any other features must be
840 * implemented by the caller.
842 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
843 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
844 int64_t align, QEMUIOVector *qiov, int flags)
846 BlockDriver *drv = bs->drv;
849 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
850 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
852 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
853 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
854 assert(!qiov || bytes == qiov->size);
856 /* Handle Copy on Read and associated serialisation */
857 if (flags & BDRV_REQ_COPY_ON_READ) {
858 /* If we touch the same cluster it counts as an overlap. This
859 * guarantees that allocating writes will be serialized and not race
860 * with each other for the same cluster. For example, in copy-on-read
861 * it ensures that the CoR read and write operations are atomic and
862 * guest writes cannot interleave between them. */
863 mark_request_serialising(req, bdrv_get_cluster_size(bs));
866 wait_serialising_requests(req);
868 if (flags & BDRV_REQ_COPY_ON_READ) {
871 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
876 if (!ret || pnum != nb_sectors) {
877 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
882 /* Forward the request to the BlockDriver */
883 if (!bs->zero_beyond_eof) {
884 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
886 /* Read zeros after EOF */
887 int64_t total_sectors, max_nb_sectors;
889 total_sectors = bdrv_nb_sectors(bs);
890 if (total_sectors < 0) {
895 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
896 align >> BDRV_SECTOR_BITS);
897 if (nb_sectors < max_nb_sectors) {
898 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
899 } else if (max_nb_sectors > 0) {
900 QEMUIOVector local_qiov;
902 qemu_iovec_init(&local_qiov, qiov->niov);
903 qemu_iovec_concat(&local_qiov, qiov, 0,
904 max_nb_sectors * BDRV_SECTOR_SIZE);
906 ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors,
909 qemu_iovec_destroy(&local_qiov);
914 /* Reading beyond end of file is supposed to produce zeroes */
915 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
916 uint64_t offset = MAX(0, total_sectors - sector_num);
917 uint64_t bytes = (sector_num + nb_sectors - offset) *
919 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
928 * Handle a read request in coroutine context
930 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
931 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
932 BdrvRequestFlags flags)
934 BlockDriver *drv = bs->drv;
935 BdrvTrackedRequest req;
937 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
938 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
939 uint8_t *head_buf = NULL;
940 uint8_t *tail_buf = NULL;
941 QEMUIOVector local_qiov;
942 bool use_local_qiov = false;
949 ret = bdrv_check_byte_request(bs, offset, bytes);
954 /* Don't do copy-on-read if we read data before write operation */
955 if (bs->copy_on_read && !(flags & BDRV_REQ_NO_COPY_ON_READ)) {
956 flags |= BDRV_REQ_COPY_ON_READ;
959 /* throttling disk I/O */
960 if (bs->io_limits_enabled) {
961 throttle_group_co_io_limits_intercept(bs, bytes, false);
964 /* Align read if necessary by padding qiov */
965 if (offset & (align - 1)) {
966 head_buf = qemu_blockalign(bs, align);
967 qemu_iovec_init(&local_qiov, qiov->niov + 2);
968 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
969 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
970 use_local_qiov = true;
972 bytes += offset & (align - 1);
973 offset = offset & ~(align - 1);
976 if ((offset + bytes) & (align - 1)) {
977 if (!use_local_qiov) {
978 qemu_iovec_init(&local_qiov, qiov->niov + 1);
979 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
980 use_local_qiov = true;
982 tail_buf = qemu_blockalign(bs, align);
983 qemu_iovec_add(&local_qiov, tail_buf,
984 align - ((offset + bytes) & (align - 1)));
986 bytes = ROUND_UP(bytes, align);
989 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
990 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
991 use_local_qiov ? &local_qiov : qiov,
993 tracked_request_end(&req);
995 if (use_local_qiov) {
996 qemu_iovec_destroy(&local_qiov);
997 qemu_vfree(head_buf);
998 qemu_vfree(tail_buf);
1004 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
1005 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1006 BdrvRequestFlags flags)
1008 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1012 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
1013 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1016 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
1017 int nb_sectors, QEMUIOVector *qiov)
1019 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
1021 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
1024 int coroutine_fn bdrv_co_no_copy_on_readv(BlockDriverState *bs,
1025 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1027 trace_bdrv_co_no_copy_on_readv(bs, sector_num, nb_sectors);
1029 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
1030 BDRV_REQ_NO_COPY_ON_READ);
1033 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
1034 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1036 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
1038 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
1039 BDRV_REQ_COPY_ON_READ);
1042 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
1044 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
1045 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
1047 BlockDriver *drv = bs->drv;
1049 struct iovec iov = {0};
1052 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes,
1053 BDRV_REQUEST_MAX_SECTORS);
1055 while (nb_sectors > 0 && !ret) {
1056 int num = nb_sectors;
1058 /* Align request. Block drivers can expect the "bulk" of the request
1061 if (bs->bl.write_zeroes_alignment
1062 && num > bs->bl.write_zeroes_alignment) {
1063 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
1064 /* Make a small request up to the first aligned sector. */
1065 num = bs->bl.write_zeroes_alignment;
1066 num -= sector_num % bs->bl.write_zeroes_alignment;
1067 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
1068 /* Shorten the request to the last aligned sector. num cannot
1069 * underflow because num > bs->bl.write_zeroes_alignment.
1071 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
1075 /* limit request size */
1076 if (num > max_write_zeroes) {
1077 num = max_write_zeroes;
1081 /* First try the efficient write zeroes operation */
1082 if (drv->bdrv_co_write_zeroes) {
1083 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
1086 if (ret == -ENOTSUP) {
1087 /* Fall back to bounce buffer if write zeroes is unsupported */
1088 int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length,
1089 MAX_WRITE_ZEROES_BOUNCE_BUFFER);
1090 num = MIN(num, max_xfer_len);
1091 iov.iov_len = num * BDRV_SECTOR_SIZE;
1092 if (iov.iov_base == NULL) {
1093 iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE);
1094 if (iov.iov_base == NULL) {
1098 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
1100 qemu_iovec_init_external(&qiov, &iov, 1);
1102 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
1104 /* Keep bounce buffer around if it is big enough for all
1105 * all future requests.
1107 if (num < max_xfer_len) {
1108 qemu_vfree(iov.iov_base);
1109 iov.iov_base = NULL;
1118 qemu_vfree(iov.iov_base);
1123 * Forwards an already correctly aligned write request to the BlockDriver.
1125 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
1126 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1127 QEMUIOVector *qiov, int flags)
1129 BlockDriver *drv = bs->drv;
1133 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
1134 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
1136 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1137 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1138 assert(!qiov || bytes == qiov->size);
1140 waited = wait_serialising_requests(req);
1141 assert(!waited || !req->serialising);
1142 assert(req->overlap_offset <= offset);
1143 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1145 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1147 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1148 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
1149 qemu_iovec_is_zero(qiov)) {
1150 flags |= BDRV_REQ_ZERO_WRITE;
1151 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1152 flags |= BDRV_REQ_MAY_UNMAP;
1157 /* Do nothing, write notifier decided to fail this request */
1158 } else if (flags & BDRV_REQ_ZERO_WRITE) {
1159 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1160 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
1162 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1163 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
1165 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
1167 if (ret == 0 && !bs->enable_write_cache) {
1168 ret = bdrv_co_flush(bs);
1171 bdrv_set_dirty(bs, sector_num, nb_sectors);
1173 if (bs->wr_highest_offset < offset + bytes) {
1174 bs->wr_highest_offset = offset + bytes;
1178 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
1184 static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
1187 BdrvRequestFlags flags,
1188 BdrvTrackedRequest *req)
1190 uint8_t *buf = NULL;
1191 QEMUIOVector local_qiov;
1193 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1194 unsigned int head_padding_bytes, tail_padding_bytes;
1197 head_padding_bytes = offset & (align - 1);
1198 tail_padding_bytes = align - ((offset + bytes) & (align - 1));
1201 assert(flags & BDRV_REQ_ZERO_WRITE);
1202 if (head_padding_bytes || tail_padding_bytes) {
1203 buf = qemu_blockalign(bs, align);
1204 iov = (struct iovec) {
1208 qemu_iovec_init_external(&local_qiov, &iov, 1);
1210 if (head_padding_bytes) {
1211 uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1213 /* RMW the unaligned part before head. */
1214 mark_request_serialising(req, align);
1215 wait_serialising_requests(req);
1216 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1217 ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align,
1218 align, &local_qiov, 0);
1222 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1224 memset(buf + head_padding_bytes, 0, zero_bytes);
1225 ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align,
1227 flags & ~BDRV_REQ_ZERO_WRITE);
1231 offset += zero_bytes;
1232 bytes -= zero_bytes;
1235 assert(!bytes || (offset & (align - 1)) == 0);
1236 if (bytes >= align) {
1237 /* Write the aligned part in the middle. */
1238 uint64_t aligned_bytes = bytes & ~(align - 1);
1239 ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes,
1244 bytes -= aligned_bytes;
1245 offset += aligned_bytes;
1248 assert(!bytes || (offset & (align - 1)) == 0);
1250 assert(align == tail_padding_bytes + bytes);
1251 /* RMW the unaligned part after tail. */
1252 mark_request_serialising(req, align);
1253 wait_serialising_requests(req);
1254 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1255 ret = bdrv_aligned_preadv(bs, req, offset, align,
1256 align, &local_qiov, 0);
1260 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1262 memset(buf, 0, bytes);
1263 ret = bdrv_aligned_pwritev(bs, req, offset, align,
1264 &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1273 * Handle a write request in coroutine context
1275 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
1276 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1277 BdrvRequestFlags flags)
1279 BdrvTrackedRequest req;
1280 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1281 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1282 uint8_t *head_buf = NULL;
1283 uint8_t *tail_buf = NULL;
1284 QEMUIOVector local_qiov;
1285 bool use_local_qiov = false;
1291 if (bs->read_only) {
1295 ret = bdrv_check_byte_request(bs, offset, bytes);
1300 /* throttling disk I/O */
1301 if (bs->io_limits_enabled) {
1302 throttle_group_co_io_limits_intercept(bs, bytes, true);
1306 * Align write if necessary by performing a read-modify-write cycle.
1307 * Pad qiov with the read parts and be sure to have a tracked request not
1308 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1310 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
1313 ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req);
1317 if (offset & (align - 1)) {
1318 QEMUIOVector head_qiov;
1319 struct iovec head_iov;
1321 mark_request_serialising(&req, align);
1322 wait_serialising_requests(&req);
1324 head_buf = qemu_blockalign(bs, align);
1325 head_iov = (struct iovec) {
1326 .iov_base = head_buf,
1329 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1331 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1332 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
1333 align, &head_qiov, 0);
1337 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1339 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1340 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1341 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1342 use_local_qiov = true;
1344 bytes += offset & (align - 1);
1345 offset = offset & ~(align - 1);
1348 if ((offset + bytes) & (align - 1)) {
1349 QEMUIOVector tail_qiov;
1350 struct iovec tail_iov;
1354 mark_request_serialising(&req, align);
1355 waited = wait_serialising_requests(&req);
1356 assert(!waited || !use_local_qiov);
1358 tail_buf = qemu_blockalign(bs, align);
1359 tail_iov = (struct iovec) {
1360 .iov_base = tail_buf,
1363 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1365 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1366 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
1367 align, &tail_qiov, 0);
1371 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1373 if (!use_local_qiov) {
1374 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1375 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1376 use_local_qiov = true;
1379 tail_bytes = (offset + bytes) & (align - 1);
1380 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1382 bytes = ROUND_UP(bytes, align);
1385 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
1386 use_local_qiov ? &local_qiov : qiov,
1391 if (use_local_qiov) {
1392 qemu_iovec_destroy(&local_qiov);
1394 qemu_vfree(head_buf);
1395 qemu_vfree(tail_buf);
1397 tracked_request_end(&req);
1401 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
1402 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1403 BdrvRequestFlags flags)
1405 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1409 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
1410 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1413 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
1414 int nb_sectors, QEMUIOVector *qiov)
1416 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
1418 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
1421 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
1422 int64_t sector_num, int nb_sectors,
1423 BdrvRequestFlags flags)
1425 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
1427 if (!(bs->open_flags & BDRV_O_UNMAP)) {
1428 flags &= ~BDRV_REQ_MAY_UNMAP;
1431 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
1432 BDRV_REQ_ZERO_WRITE | flags);
1435 int bdrv_flush_all(void)
1437 BlockDriverState *bs = NULL;
1440 while ((bs = bdrv_next(bs))) {
1441 AioContext *aio_context = bdrv_get_aio_context(bs);
1444 aio_context_acquire(aio_context);
1445 ret = bdrv_flush(bs);
1446 if (ret < 0 && !result) {
1449 aio_context_release(aio_context);
1455 typedef struct BdrvCoGetBlockStatusData {
1456 BlockDriverState *bs;
1457 BlockDriverState *base;
1463 } BdrvCoGetBlockStatusData;
1466 * Returns the allocation status of the specified sectors.
1467 * Drivers not implementing the functionality are assumed to not support
1468 * backing files, hence all their sectors are reported as allocated.
1470 * If 'sector_num' is beyond the end of the disk image the return value is 0
1471 * and 'pnum' is set to 0.
1473 * 'pnum' is set to the number of sectors (including and immediately following
1474 * the specified sector) that are known to be in the same
1475 * allocated/unallocated state.
1477 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1478 * beyond the end of the disk image it will be clamped.
1480 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
1482 int nb_sectors, int *pnum)
1484 int64_t total_sectors;
1488 total_sectors = bdrv_nb_sectors(bs);
1489 if (total_sectors < 0) {
1490 return total_sectors;
1493 if (sector_num >= total_sectors) {
1498 n = total_sectors - sector_num;
1499 if (n < nb_sectors) {
1503 if (!bs->drv->bdrv_co_get_block_status) {
1505 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
1506 if (bs->drv->protocol_name) {
1507 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
1512 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum);
1518 if (ret & BDRV_BLOCK_RAW) {
1519 assert(ret & BDRV_BLOCK_OFFSET_VALID);
1520 return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
1524 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
1525 ret |= BDRV_BLOCK_ALLOCATED;
1527 if (bdrv_unallocated_blocks_are_zero(bs)) {
1528 ret |= BDRV_BLOCK_ZERO;
1529 } else if (bs->backing) {
1530 BlockDriverState *bs2 = bs->backing->bs;
1531 int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
1532 if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
1533 ret |= BDRV_BLOCK_ZERO;
1539 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
1540 (ret & BDRV_BLOCK_OFFSET_VALID)) {
1543 ret2 = bdrv_co_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
1546 /* Ignore errors. This is just providing extra information, it
1547 * is useful but not necessary.
1550 /* !file_pnum indicates an offset at or beyond the EOF; it is
1551 * perfectly valid for the format block driver to point to such
1552 * offsets, so catch it and mark everything as zero */
1553 ret |= BDRV_BLOCK_ZERO;
1555 /* Limit request to the range reported by the protocol driver */
1557 ret |= (ret2 & BDRV_BLOCK_ZERO);
1565 static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
1566 BlockDriverState *base,
1571 BlockDriverState *p;
1575 for (p = bs; p != base; p = backing_bs(p)) {
1576 ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum);
1577 if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) {
1580 /* [sector_num, pnum] unallocated on this layer, which could be only
1581 * the first part of [sector_num, nb_sectors]. */
1582 nb_sectors = MIN(nb_sectors, *pnum);
1587 /* Coroutine wrapper for bdrv_get_block_status_above() */
1588 static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
1590 BdrvCoGetBlockStatusData *data = opaque;
1592 data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
1600 * Synchronous wrapper around bdrv_co_get_block_status_above().
1602 * See bdrv_co_get_block_status_above() for details.
1604 int64_t bdrv_get_block_status_above(BlockDriverState *bs,
1605 BlockDriverState *base,
1607 int nb_sectors, int *pnum)
1610 BdrvCoGetBlockStatusData data = {
1613 .sector_num = sector_num,
1614 .nb_sectors = nb_sectors,
1619 if (qemu_in_coroutine()) {
1620 /* Fast-path if already in coroutine context */
1621 bdrv_get_block_status_above_co_entry(&data);
1623 AioContext *aio_context = bdrv_get_aio_context(bs);
1625 co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry);
1626 qemu_coroutine_enter(co, &data);
1627 while (!data.done) {
1628 aio_poll(aio_context, true);
1634 int64_t bdrv_get_block_status(BlockDriverState *bs,
1636 int nb_sectors, int *pnum)
1638 return bdrv_get_block_status_above(bs, backing_bs(bs),
1639 sector_num, nb_sectors, pnum);
1642 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
1643 int nb_sectors, int *pnum)
1645 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum);
1649 return !!(ret & BDRV_BLOCK_ALLOCATED);
1653 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1655 * Return true if the given sector is allocated in any image between
1656 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1657 * sector is allocated in any image of the chain. Return false otherwise.
1659 * 'pnum' is set to the number of sectors (including and immediately following
1660 * the specified sector) that are known to be in the same
1661 * allocated/unallocated state.
1664 int bdrv_is_allocated_above(BlockDriverState *top,
1665 BlockDriverState *base,
1667 int nb_sectors, int *pnum)
1669 BlockDriverState *intermediate;
1670 int ret, n = nb_sectors;
1673 while (intermediate && intermediate != base) {
1675 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
1685 * [sector_num, nb_sectors] is unallocated on top but intermediate
1688 * [sector_num+x, nr_sectors] allocated.
1690 if (n > pnum_inter &&
1691 (intermediate == top ||
1692 sector_num + pnum_inter < intermediate->total_sectors)) {
1696 intermediate = backing_bs(intermediate);
1703 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
1704 const uint8_t *buf, int nb_sectors)
1706 BlockDriver *drv = bs->drv;
1712 if (!drv->bdrv_write_compressed) {
1715 ret = bdrv_check_request(bs, sector_num, nb_sectors);
1720 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
1722 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
1725 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
1726 int64_t pos, int size)
1729 struct iovec iov = {
1730 .iov_base = (void *) buf,
1734 qemu_iovec_init_external(&qiov, &iov, 1);
1735 return bdrv_writev_vmstate(bs, &qiov, pos);
1738 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
1740 BlockDriver *drv = bs->drv;
1744 } else if (drv->bdrv_save_vmstate) {
1745 return drv->bdrv_save_vmstate(bs, qiov, pos);
1746 } else if (bs->file) {
1747 return bdrv_writev_vmstate(bs->file->bs, qiov, pos);
1753 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
1754 int64_t pos, int size)
1756 BlockDriver *drv = bs->drv;
1759 if (drv->bdrv_load_vmstate)
1760 return drv->bdrv_load_vmstate(bs, buf, pos, size);
1762 return bdrv_load_vmstate(bs->file->bs, buf, pos, size);
1766 /**************************************************************/
1769 BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
1770 QEMUIOVector *qiov, int nb_sectors,
1771 BlockCompletionFunc *cb, void *opaque)
1773 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
1775 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
1779 BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
1780 QEMUIOVector *qiov, int nb_sectors,
1781 BlockCompletionFunc *cb, void *opaque)
1783 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
1785 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
1789 BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
1790 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
1791 BlockCompletionFunc *cb, void *opaque)
1793 trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
1795 return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
1796 BDRV_REQ_ZERO_WRITE | flags,
1801 typedef struct MultiwriteCB {
1806 BlockCompletionFunc *cb;
1808 QEMUIOVector *free_qiov;
1812 static void multiwrite_user_cb(MultiwriteCB *mcb)
1816 for (i = 0; i < mcb->num_callbacks; i++) {
1817 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
1818 if (mcb->callbacks[i].free_qiov) {
1819 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
1821 g_free(mcb->callbacks[i].free_qiov);
1825 static void multiwrite_cb(void *opaque, int ret)
1827 MultiwriteCB *mcb = opaque;
1829 trace_multiwrite_cb(mcb, ret);
1831 if (ret < 0 && !mcb->error) {
1835 mcb->num_requests--;
1836 if (mcb->num_requests == 0) {
1837 multiwrite_user_cb(mcb);
1842 static int multiwrite_req_compare(const void *a, const void *b)
1844 const BlockRequest *req1 = a, *req2 = b;
1847 * Note that we can't simply subtract req2->sector from req1->sector
1848 * here as that could overflow the return value.
1850 if (req1->sector > req2->sector) {
1852 } else if (req1->sector < req2->sector) {
1860 * Takes a bunch of requests and tries to merge them. Returns the number of
1861 * requests that remain after merging.
1863 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
1864 int num_reqs, MultiwriteCB *mcb)
1868 // Sort requests by start sector
1869 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
1871 // Check if adjacent requests touch the same clusters. If so, combine them,
1872 // filling up gaps with zero sectors.
1874 for (i = 1; i < num_reqs; i++) {
1876 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
1878 // Handle exactly sequential writes and overlapping writes.
1879 if (reqs[i].sector <= oldreq_last) {
1883 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
1887 if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors +
1888 reqs[i].nb_sectors > bs->bl.max_transfer_length) {
1894 QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
1895 qemu_iovec_init(qiov,
1896 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
1898 // Add the first request to the merged one. If the requests are
1899 // overlapping, drop the last sectors of the first request.
1900 size = (reqs[i].sector - reqs[outidx].sector) << 9;
1901 qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
1903 // We should need to add any zeros between the two requests
1904 assert (reqs[i].sector <= oldreq_last);
1906 // Add the second request
1907 qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
1909 // Add tail of first request, if necessary
1910 if (qiov->size < reqs[outidx].qiov->size) {
1911 qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size,
1912 reqs[outidx].qiov->size - qiov->size);
1915 reqs[outidx].nb_sectors = qiov->size >> 9;
1916 reqs[outidx].qiov = qiov;
1918 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
1921 reqs[outidx].sector = reqs[i].sector;
1922 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
1923 reqs[outidx].qiov = reqs[i].qiov;
1928 block_acct_merge_done(blk_get_stats(bs->blk), BLOCK_ACCT_WRITE,
1929 num_reqs - outidx - 1);
1936 * Submit multiple AIO write requests at once.
1938 * On success, the function returns 0 and all requests in the reqs array have
1939 * been submitted. In error case this function returns -1, and any of the
1940 * requests may or may not be submitted yet. In particular, this means that the
1941 * callback will be called for some of the requests, for others it won't. The
1942 * caller must check the error field of the BlockRequest to wait for the right
1943 * callbacks (if error != 0, no callback will be called).
1945 * The implementation may modify the contents of the reqs array, e.g. to merge
1946 * requests. However, the fields opaque and error are left unmodified as they
1947 * are used to signal failure for a single request to the caller.
1949 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
1954 /* don't submit writes if we don't have a medium */
1955 if (bs->drv == NULL) {
1956 for (i = 0; i < num_reqs; i++) {
1957 reqs[i].error = -ENOMEDIUM;
1962 if (num_reqs == 0) {
1966 // Create MultiwriteCB structure
1967 mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
1968 mcb->num_requests = 0;
1969 mcb->num_callbacks = num_reqs;
1971 for (i = 0; i < num_reqs; i++) {
1972 mcb->callbacks[i].cb = reqs[i].cb;
1973 mcb->callbacks[i].opaque = reqs[i].opaque;
1976 // Check for mergable requests
1977 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
1979 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
1981 /* Run the aio requests. */
1982 mcb->num_requests = num_reqs;
1983 for (i = 0; i < num_reqs; i++) {
1984 bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
1985 reqs[i].nb_sectors, reqs[i].flags,
1993 void bdrv_aio_cancel(BlockAIOCB *acb)
1996 bdrv_aio_cancel_async(acb);
1997 while (acb->refcnt > 1) {
1998 if (acb->aiocb_info->get_aio_context) {
1999 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2000 } else if (acb->bs) {
2001 aio_poll(bdrv_get_aio_context(acb->bs), true);
2006 qemu_aio_unref(acb);
2009 /* Async version of aio cancel. The caller is not blocked if the acb implements
2010 * cancel_async, otherwise we do nothing and let the request normally complete.
2011 * In either case the completion callback must be called. */
2012 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2014 if (acb->aiocb_info->cancel_async) {
2015 acb->aiocb_info->cancel_async(acb);
2019 /**************************************************************/
2020 /* async block device emulation */
2022 typedef struct BlockAIOCBSync {
2026 /* vector translation state */
2032 static const AIOCBInfo bdrv_em_aiocb_info = {
2033 .aiocb_size = sizeof(BlockAIOCBSync),
2036 static void bdrv_aio_bh_cb(void *opaque)
2038 BlockAIOCBSync *acb = opaque;
2040 if (!acb->is_write && acb->ret >= 0) {
2041 qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
2043 qemu_vfree(acb->bounce);
2044 acb->common.cb(acb->common.opaque, acb->ret);
2045 qemu_bh_delete(acb->bh);
2047 qemu_aio_unref(acb);
2050 static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
2054 BlockCompletionFunc *cb,
2059 BlockAIOCBSync *acb;
2061 acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
2062 acb->is_write = is_write;
2064 acb->bounce = qemu_try_blockalign(bs, qiov->size);
2065 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
2067 if (acb->bounce == NULL) {
2069 } else if (is_write) {
2070 qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
2071 acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
2073 acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
2076 qemu_bh_schedule(acb->bh);
2078 return &acb->common;
2081 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
2082 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2083 BlockCompletionFunc *cb, void *opaque)
2085 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
2088 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
2089 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2090 BlockCompletionFunc *cb, void *opaque)
2092 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
2096 typedef struct BlockAIOCBCoroutine {
2103 } BlockAIOCBCoroutine;
2105 static const AIOCBInfo bdrv_em_co_aiocb_info = {
2106 .aiocb_size = sizeof(BlockAIOCBCoroutine),
2109 static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
2111 if (!acb->need_bh) {
2112 acb->common.cb(acb->common.opaque, acb->req.error);
2113 qemu_aio_unref(acb);
2117 static void bdrv_co_em_bh(void *opaque)
2119 BlockAIOCBCoroutine *acb = opaque;
2121 assert(!acb->need_bh);
2122 qemu_bh_delete(acb->bh);
2123 bdrv_co_complete(acb);
2126 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
2128 acb->need_bh = false;
2129 if (acb->req.error != -EINPROGRESS) {
2130 BlockDriverState *bs = acb->common.bs;
2132 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
2133 qemu_bh_schedule(acb->bh);
2137 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2138 static void coroutine_fn bdrv_co_do_rw(void *opaque)
2140 BlockAIOCBCoroutine *acb = opaque;
2141 BlockDriverState *bs = acb->common.bs;
2143 if (!acb->is_write) {
2144 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
2145 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
2147 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
2148 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
2151 bdrv_co_complete(acb);
2154 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
2158 BdrvRequestFlags flags,
2159 BlockCompletionFunc *cb,
2164 BlockAIOCBCoroutine *acb;
2166 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2167 acb->need_bh = true;
2168 acb->req.error = -EINPROGRESS;
2169 acb->req.sector = sector_num;
2170 acb->req.nb_sectors = nb_sectors;
2171 acb->req.qiov = qiov;
2172 acb->req.flags = flags;
2173 acb->is_write = is_write;
2175 co = qemu_coroutine_create(bdrv_co_do_rw);
2176 qemu_coroutine_enter(co, acb);
2178 bdrv_co_maybe_schedule_bh(acb);
2179 return &acb->common;
2182 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
2184 BlockAIOCBCoroutine *acb = opaque;
2185 BlockDriverState *bs = acb->common.bs;
2187 acb->req.error = bdrv_co_flush(bs);
2188 bdrv_co_complete(acb);
2191 BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
2192 BlockCompletionFunc *cb, void *opaque)
2194 trace_bdrv_aio_flush(bs, opaque);
2197 BlockAIOCBCoroutine *acb;
2199 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2200 acb->need_bh = true;
2201 acb->req.error = -EINPROGRESS;
2203 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
2204 qemu_coroutine_enter(co, acb);
2206 bdrv_co_maybe_schedule_bh(acb);
2207 return &acb->common;
2210 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
2212 BlockAIOCBCoroutine *acb = opaque;
2213 BlockDriverState *bs = acb->common.bs;
2215 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
2216 bdrv_co_complete(acb);
2219 BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
2220 int64_t sector_num, int nb_sectors,
2221 BlockCompletionFunc *cb, void *opaque)
2224 BlockAIOCBCoroutine *acb;
2226 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
2228 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2229 acb->need_bh = true;
2230 acb->req.error = -EINPROGRESS;
2231 acb->req.sector = sector_num;
2232 acb->req.nb_sectors = nb_sectors;
2233 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
2234 qemu_coroutine_enter(co, acb);
2236 bdrv_co_maybe_schedule_bh(acb);
2237 return &acb->common;
2240 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
2241 BlockCompletionFunc *cb, void *opaque)
2245 acb = g_malloc(aiocb_info->aiocb_size);
2246 acb->aiocb_info = aiocb_info;
2249 acb->opaque = opaque;
2254 void qemu_aio_ref(void *p)
2256 BlockAIOCB *acb = p;
2260 void qemu_aio_unref(void *p)
2262 BlockAIOCB *acb = p;
2263 assert(acb->refcnt > 0);
2264 if (--acb->refcnt == 0) {
2269 /**************************************************************/
2270 /* Coroutine block device emulation */
2272 typedef struct CoroutineIOCompletion {
2273 Coroutine *coroutine;
2275 } CoroutineIOCompletion;
2277 static void bdrv_co_io_em_complete(void *opaque, int ret)
2279 CoroutineIOCompletion *co = opaque;
2282 qemu_coroutine_enter(co->coroutine, NULL);
2285 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
2286 int nb_sectors, QEMUIOVector *iov,
2289 CoroutineIOCompletion co = {
2290 .coroutine = qemu_coroutine_self(),
2295 acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
2296 bdrv_co_io_em_complete, &co);
2298 acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
2299 bdrv_co_io_em_complete, &co);
2302 trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
2306 qemu_coroutine_yield();
2311 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
2312 int64_t sector_num, int nb_sectors,
2315 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
2318 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
2319 int64_t sector_num, int nb_sectors,
2322 return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
2325 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2327 RwCo *rwco = opaque;
2329 rwco->ret = bdrv_co_flush(rwco->bs);
2332 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2335 BdrvTrackedRequest req;
2337 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2342 tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH);
2343 /* Write back cached data to the OS even with cache=unsafe */
2344 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2345 if (bs->drv->bdrv_co_flush_to_os) {
2346 ret = bs->drv->bdrv_co_flush_to_os(bs);
2352 /* But don't actually force it to the disk with cache=unsafe */
2353 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2357 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2358 if (bs->drv->bdrv_co_flush_to_disk) {
2359 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2360 } else if (bs->drv->bdrv_aio_flush) {
2362 CoroutineIOCompletion co = {
2363 .coroutine = qemu_coroutine_self(),
2366 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2370 qemu_coroutine_yield();
2375 * Some block drivers always operate in either writethrough or unsafe
2376 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2377 * know how the server works (because the behaviour is hardcoded or
2378 * depends on server-side configuration), so we can't ensure that
2379 * everything is safe on disk. Returning an error doesn't work because
2380 * that would break guests even if the server operates in writethrough
2383 * Let's hope the user knows what he's doing.
2391 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2392 * in the case of cache=unsafe, so there are no useless flushes.
2395 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2397 tracked_request_end(&req);
2401 int bdrv_flush(BlockDriverState *bs)
2409 if (qemu_in_coroutine()) {
2410 /* Fast-path if already in coroutine context */
2411 bdrv_flush_co_entry(&rwco);
2413 AioContext *aio_context = bdrv_get_aio_context(bs);
2415 co = qemu_coroutine_create(bdrv_flush_co_entry);
2416 qemu_coroutine_enter(co, &rwco);
2417 while (rwco.ret == NOT_DONE) {
2418 aio_poll(aio_context, true);
2425 typedef struct DiscardCo {
2426 BlockDriverState *bs;
2431 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
2433 DiscardCo *rwco = opaque;
2435 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
2438 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
2441 BdrvTrackedRequest req;
2442 int max_discard, ret;
2448 ret = bdrv_check_request(bs, sector_num, nb_sectors);
2451 } else if (bs->read_only) {
2455 /* Do nothing if disabled. */
2456 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2460 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
2464 tracked_request_begin(&req, bs, sector_num, nb_sectors,
2465 BDRV_TRACKED_DISCARD);
2466 bdrv_set_dirty(bs, sector_num, nb_sectors);
2468 max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS);
2469 while (nb_sectors > 0) {
2471 int num = nb_sectors;
2474 if (bs->bl.discard_alignment &&
2475 num >= bs->bl.discard_alignment &&
2476 sector_num % bs->bl.discard_alignment) {
2477 if (num > bs->bl.discard_alignment) {
2478 num = bs->bl.discard_alignment;
2480 num -= sector_num % bs->bl.discard_alignment;
2483 /* limit request size */
2484 if (num > max_discard) {
2488 if (bs->drv->bdrv_co_discard) {
2489 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
2492 CoroutineIOCompletion co = {
2493 .coroutine = qemu_coroutine_self(),
2496 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
2497 bdrv_co_io_em_complete, &co);
2502 qemu_coroutine_yield();
2506 if (ret && ret != -ENOTSUP) {
2515 tracked_request_end(&req);
2519 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
2524 .sector_num = sector_num,
2525 .nb_sectors = nb_sectors,
2529 if (qemu_in_coroutine()) {
2530 /* Fast-path if already in coroutine context */
2531 bdrv_discard_co_entry(&rwco);
2533 AioContext *aio_context = bdrv_get_aio_context(bs);
2535 co = qemu_coroutine_create(bdrv_discard_co_entry);
2536 qemu_coroutine_enter(co, &rwco);
2537 while (rwco.ret == NOT_DONE) {
2538 aio_poll(aio_context, true);
2546 CoroutineIOCompletion *co;
2548 } BdrvIoctlCompletionData;
2550 static void bdrv_ioctl_bh_cb(void *opaque)
2552 BdrvIoctlCompletionData *data = opaque;
2554 bdrv_co_io_em_complete(data->co, -ENOTSUP);
2555 qemu_bh_delete(data->bh);
2558 static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf)
2560 BlockDriver *drv = bs->drv;
2561 BdrvTrackedRequest tracked_req;
2562 CoroutineIOCompletion co = {
2563 .coroutine = qemu_coroutine_self(),
2567 tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL);
2568 if (!drv || !drv->bdrv_aio_ioctl) {
2573 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2575 BdrvIoctlCompletionData *data = g_new(BdrvIoctlCompletionData, 1);
2576 data->bh = aio_bh_new(bdrv_get_aio_context(bs),
2577 bdrv_ioctl_bh_cb, data);
2579 qemu_bh_schedule(data->bh);
2581 qemu_coroutine_yield();
2583 tracked_request_end(&tracked_req);
2588 BlockDriverState *bs;
2594 static void coroutine_fn bdrv_co_ioctl_entry(void *opaque)
2596 BdrvIoctlCoData *data = opaque;
2597 data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf);
2600 /* needed for generic scsi interface */
2601 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
2603 BdrvIoctlCoData data = {
2607 .ret = -EINPROGRESS,
2610 if (qemu_in_coroutine()) {
2611 /* Fast-path if already in coroutine context */
2612 bdrv_co_ioctl_entry(&data);
2614 Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry);
2615 qemu_coroutine_enter(co, &data);
2617 while (data.ret == -EINPROGRESS) {
2618 aio_poll(bdrv_get_aio_context(bs), true);
2623 static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque)
2625 BlockAIOCBCoroutine *acb = opaque;
2626 acb->req.error = bdrv_co_do_ioctl(acb->common.bs,
2627 acb->req.req, acb->req.buf);
2628 bdrv_co_complete(acb);
2631 BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
2632 unsigned long int req, void *buf,
2633 BlockCompletionFunc *cb, void *opaque)
2635 BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info,
2639 acb->need_bh = true;
2640 acb->req.error = -EINPROGRESS;
2643 co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry);
2644 qemu_coroutine_enter(co, acb);
2646 bdrv_co_maybe_schedule_bh(acb);
2647 return &acb->common;
2650 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2652 return qemu_memalign(bdrv_opt_mem_align(bs), size);
2655 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2657 return memset(qemu_blockalign(bs, size), 0, size);
2660 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2662 size_t align = bdrv_opt_mem_align(bs);
2664 /* Ensure that NULL is never returned on success */
2670 return qemu_try_memalign(align, size);
2673 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2675 void *mem = qemu_try_blockalign(bs, size);
2678 memset(mem, 0, size);
2685 * Check if all memory in this vector is sector aligned.
2687 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2690 size_t alignment = bdrv_min_mem_align(bs);
2692 for (i = 0; i < qiov->niov; i++) {
2693 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2696 if (qiov->iov[i].iov_len % alignment) {
2704 void bdrv_add_before_write_notifier(BlockDriverState *bs,
2705 NotifierWithReturn *notifier)
2707 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2710 void bdrv_io_plug(BlockDriverState *bs)
2712 BlockDriver *drv = bs->drv;
2713 if (drv && drv->bdrv_io_plug) {
2714 drv->bdrv_io_plug(bs);
2715 } else if (bs->file) {
2716 bdrv_io_plug(bs->file->bs);
2720 void bdrv_io_unplug(BlockDriverState *bs)
2722 BlockDriver *drv = bs->drv;
2723 if (drv && drv->bdrv_io_unplug) {
2724 drv->bdrv_io_unplug(bs);
2725 } else if (bs->file) {
2726 bdrv_io_unplug(bs->file->bs);
2730 void bdrv_flush_io_queue(BlockDriverState *bs)
2732 BlockDriver *drv = bs->drv;
2733 if (drv && drv->bdrv_flush_io_queue) {
2734 drv->bdrv_flush_io_queue(bs);
2735 } else if (bs->file) {
2736 bdrv_flush_io_queue(bs->file->bs);
2738 bdrv_start_throttled_reqs(bs);
2741 void bdrv_drained_begin(BlockDriverState *bs)
2743 if (!bs->quiesce_counter++) {
2744 aio_disable_external(bdrv_get_aio_context(bs));
2749 void bdrv_drained_end(BlockDriverState *bs)
2751 assert(bs->quiesce_counter > 0);
2752 if (--bs->quiesce_counter > 0) {
2755 aio_enable_external(bdrv_get_aio_context(bs));