2 * Block layer I/O functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "qemu/cutils.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
36 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
40 BdrvRequestFlags flags,
41 BlockCompletionFunc *cb,
44 static void coroutine_fn bdrv_co_do_rw(void *opaque);
45 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
46 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
48 static void bdrv_parent_drained_begin(BlockDriverState *bs)
52 QLIST_FOREACH(c, &bs->parents, next_parent) {
53 if (c->role->drained_begin) {
54 c->role->drained_begin(c);
59 static void bdrv_parent_drained_end(BlockDriverState *bs)
63 QLIST_FOREACH(c, &bs->parents, next_parent) {
64 if (c->role->drained_end) {
65 c->role->drained_end(c);
70 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
72 BlockDriver *drv = bs->drv;
73 Error *local_err = NULL;
75 memset(&bs->bl, 0, sizeof(bs->bl));
81 /* Take some limits from the children as a default */
83 bdrv_refresh_limits(bs->file->bs, &local_err);
85 error_propagate(errp, local_err);
88 bs->bl.opt_transfer_length = bs->file->bs->bl.opt_transfer_length;
89 bs->bl.max_transfer_length = bs->file->bs->bl.max_transfer_length;
90 bs->bl.min_mem_alignment = bs->file->bs->bl.min_mem_alignment;
91 bs->bl.opt_mem_alignment = bs->file->bs->bl.opt_mem_alignment;
92 bs->bl.max_iov = bs->file->bs->bl.max_iov;
94 bs->bl.min_mem_alignment = 512;
95 bs->bl.opt_mem_alignment = getpagesize();
97 /* Safe default since most protocols use readv()/writev()/etc */
98 bs->bl.max_iov = IOV_MAX;
102 bdrv_refresh_limits(bs->backing->bs, &local_err);
104 error_propagate(errp, local_err);
107 bs->bl.opt_transfer_length =
108 MAX(bs->bl.opt_transfer_length,
109 bs->backing->bs->bl.opt_transfer_length);
110 bs->bl.max_transfer_length =
111 MIN_NON_ZERO(bs->bl.max_transfer_length,
112 bs->backing->bs->bl.max_transfer_length);
113 bs->bl.opt_mem_alignment =
114 MAX(bs->bl.opt_mem_alignment,
115 bs->backing->bs->bl.opt_mem_alignment);
116 bs->bl.min_mem_alignment =
117 MAX(bs->bl.min_mem_alignment,
118 bs->backing->bs->bl.min_mem_alignment);
121 bs->backing->bs->bl.max_iov);
124 /* Then let the driver override it */
125 if (drv->bdrv_refresh_limits) {
126 drv->bdrv_refresh_limits(bs, errp);
131 * The copy-on-read flag is actually a reference count so multiple users may
132 * use the feature without worrying about clobbering its previous state.
133 * Copy-on-read stays enabled until all users have called to disable it.
135 void bdrv_enable_copy_on_read(BlockDriverState *bs)
140 void bdrv_disable_copy_on_read(BlockDriverState *bs)
142 assert(bs->copy_on_read > 0);
146 /* Check if any requests are in-flight (including throttled requests) */
147 bool bdrv_requests_pending(BlockDriverState *bs)
151 if (!QLIST_EMPTY(&bs->tracked_requests)) {
155 QLIST_FOREACH(child, &bs->children, next) {
156 if (bdrv_requests_pending(child->bs)) {
164 static void bdrv_drain_recurse(BlockDriverState *bs)
168 if (bs->drv && bs->drv->bdrv_drain) {
169 bs->drv->bdrv_drain(bs);
171 QLIST_FOREACH(child, &bs->children, next) {
172 bdrv_drain_recurse(child->bs);
178 BlockDriverState *bs;
183 static void bdrv_drain_poll(BlockDriverState *bs)
189 busy = bdrv_requests_pending(bs);
190 busy |= aio_poll(bdrv_get_aio_context(bs), busy);
194 static void bdrv_co_drain_bh_cb(void *opaque)
196 BdrvCoDrainData *data = opaque;
197 Coroutine *co = data->co;
199 qemu_bh_delete(data->bh);
200 bdrv_drain_poll(data->bs);
202 qemu_coroutine_enter(co, NULL);
205 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
207 BdrvCoDrainData data;
209 /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
210 * other coroutines run if they were queued from
211 * qemu_co_queue_run_restart(). */
213 assert(qemu_in_coroutine());
214 data = (BdrvCoDrainData) {
215 .co = qemu_coroutine_self(),
218 .bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_drain_bh_cb, &data),
220 qemu_bh_schedule(data.bh);
222 qemu_coroutine_yield();
223 /* If we are resumed from some other event (such as an aio completion or a
224 * timer callback), it is a bug in the caller that should be fixed. */
228 void bdrv_drained_begin(BlockDriverState *bs)
230 if (!bs->quiesce_counter++) {
231 aio_disable_external(bdrv_get_aio_context(bs));
232 bdrv_parent_drained_begin(bs);
235 bdrv_io_unplugged_begin(bs);
236 bdrv_drain_recurse(bs);
237 if (qemu_in_coroutine()) {
238 bdrv_co_yield_to_drain(bs);
242 bdrv_io_unplugged_end(bs);
245 void bdrv_drained_end(BlockDriverState *bs)
247 assert(bs->quiesce_counter > 0);
248 if (--bs->quiesce_counter > 0) {
252 bdrv_parent_drained_end(bs);
253 aio_enable_external(bdrv_get_aio_context(bs));
257 * Wait for pending requests to complete on a single BlockDriverState subtree,
258 * and suspend block driver's internal I/O until next request arrives.
260 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
263 * Only this BlockDriverState's AioContext is run, so in-flight requests must
264 * not depend on events in other AioContexts. In that case, use
265 * bdrv_drain_all() instead.
267 void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
269 assert(qemu_in_coroutine());
270 bdrv_drained_begin(bs);
271 bdrv_drained_end(bs);
274 void bdrv_drain(BlockDriverState *bs)
276 bdrv_drained_begin(bs);
277 bdrv_drained_end(bs);
281 * Wait for pending requests to complete across all BlockDriverStates
283 * This function does not flush data to disk, use bdrv_flush_all() for that
284 * after calling this function.
286 void bdrv_drain_all(void)
288 /* Always run first iteration so any pending completion BHs run */
290 BlockDriverState *bs;
292 GSList *aio_ctxs = NULL, *ctx;
294 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
295 AioContext *aio_context = bdrv_get_aio_context(bs);
297 aio_context_acquire(aio_context);
299 block_job_pause(bs->job);
301 bdrv_parent_drained_begin(bs);
302 bdrv_io_unplugged_begin(bs);
303 bdrv_drain_recurse(bs);
304 aio_context_release(aio_context);
306 if (!g_slist_find(aio_ctxs, aio_context)) {
307 aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
311 /* Note that completion of an asynchronous I/O operation can trigger any
312 * number of other I/O operations on other devices---for example a
313 * coroutine can submit an I/O request to another device in response to
314 * request completion. Therefore we must keep looping until there was no
315 * more activity rather than simply draining each device independently.
320 for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
321 AioContext *aio_context = ctx->data;
323 aio_context_acquire(aio_context);
324 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
325 if (aio_context == bdrv_get_aio_context(bs)) {
326 if (bdrv_requests_pending(bs)) {
328 aio_poll(aio_context, busy);
332 busy |= aio_poll(aio_context, false);
333 aio_context_release(aio_context);
337 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
338 AioContext *aio_context = bdrv_get_aio_context(bs);
340 aio_context_acquire(aio_context);
341 bdrv_io_unplugged_end(bs);
342 bdrv_parent_drained_end(bs);
344 block_job_resume(bs->job);
346 aio_context_release(aio_context);
348 g_slist_free(aio_ctxs);
352 * Remove an active request from the tracked requests list
354 * This function should be called when a tracked request is completing.
356 static void tracked_request_end(BdrvTrackedRequest *req)
358 if (req->serialising) {
359 req->bs->serialising_in_flight--;
362 QLIST_REMOVE(req, list);
363 qemu_co_queue_restart_all(&req->wait_queue);
367 * Add an active request to the tracked requests list
369 static void tracked_request_begin(BdrvTrackedRequest *req,
370 BlockDriverState *bs,
373 enum BdrvTrackedRequestType type)
375 *req = (BdrvTrackedRequest){
380 .co = qemu_coroutine_self(),
381 .serialising = false,
382 .overlap_offset = offset,
383 .overlap_bytes = bytes,
386 qemu_co_queue_init(&req->wait_queue);
388 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
391 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
393 int64_t overlap_offset = req->offset & ~(align - 1);
394 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
397 if (!req->serialising) {
398 req->bs->serialising_in_flight++;
399 req->serialising = true;
402 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
403 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
407 * Round a region to cluster boundaries
409 void bdrv_round_to_clusters(BlockDriverState *bs,
410 int64_t sector_num, int nb_sectors,
411 int64_t *cluster_sector_num,
412 int *cluster_nb_sectors)
416 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
417 *cluster_sector_num = sector_num;
418 *cluster_nb_sectors = nb_sectors;
420 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
421 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
422 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
427 static int bdrv_get_cluster_size(BlockDriverState *bs)
432 ret = bdrv_get_info(bs, &bdi);
433 if (ret < 0 || bdi.cluster_size == 0) {
434 return bs->request_alignment;
436 return bdi.cluster_size;
440 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
441 int64_t offset, unsigned int bytes)
444 if (offset >= req->overlap_offset + req->overlap_bytes) {
448 if (req->overlap_offset >= offset + bytes) {
454 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
456 BlockDriverState *bs = self->bs;
457 BdrvTrackedRequest *req;
461 if (!bs->serialising_in_flight) {
467 QLIST_FOREACH(req, &bs->tracked_requests, list) {
468 if (req == self || (!req->serialising && !self->serialising)) {
471 if (tracked_request_overlaps(req, self->overlap_offset,
472 self->overlap_bytes))
474 /* Hitting this means there was a reentrant request, for
475 * example, a block driver issuing nested requests. This must
476 * never happen since it means deadlock.
478 assert(qemu_coroutine_self() != req->co);
480 /* If the request is already (indirectly) waiting for us, or
481 * will wait for us as soon as it wakes up, then just go on
482 * (instead of producing a deadlock in the former case). */
483 if (!req->waiting_for) {
484 self->waiting_for = req;
485 qemu_co_queue_wait(&req->wait_queue);
486 self->waiting_for = NULL;
498 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
501 if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
505 if (!bdrv_is_inserted(bs)) {
516 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
519 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
523 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
524 nb_sectors * BDRV_SECTOR_SIZE);
527 typedef struct RwCo {
528 BlockDriverState *bs;
533 BdrvRequestFlags flags;
536 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
540 if (!rwco->is_write) {
541 rwco->ret = bdrv_co_preadv(rwco->bs, rwco->offset,
542 rwco->qiov->size, rwco->qiov,
545 rwco->ret = bdrv_co_pwritev(rwco->bs, rwco->offset,
546 rwco->qiov->size, rwco->qiov,
552 * Process a vectored synchronous request using coroutines
554 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
555 QEMUIOVector *qiov, bool is_write,
556 BdrvRequestFlags flags)
563 .is_write = is_write,
568 if (qemu_in_coroutine()) {
569 /* Fast-path if already in coroutine context */
570 bdrv_rw_co_entry(&rwco);
572 AioContext *aio_context = bdrv_get_aio_context(bs);
574 co = qemu_coroutine_create(bdrv_rw_co_entry);
575 qemu_coroutine_enter(co, &rwco);
576 while (rwco.ret == NOT_DONE) {
577 aio_poll(aio_context, true);
584 * Process a synchronous request using coroutines
586 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
587 int nb_sectors, bool is_write, BdrvRequestFlags flags)
591 .iov_base = (void *)buf,
592 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
595 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
599 qemu_iovec_init_external(&qiov, &iov, 1);
600 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
601 &qiov, is_write, flags);
604 /* return < 0 if error. See bdrv_write() for the return codes */
605 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
606 uint8_t *buf, int nb_sectors)
608 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
611 /* Return < 0 if error. Important errors are:
612 -EIO generic I/O error (may happen for all errors)
613 -ENOMEDIUM No media inserted.
614 -EINVAL Invalid sector number or nb_sectors
615 -EACCES Trying to write a read-only device
617 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
618 const uint8_t *buf, int nb_sectors)
620 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
623 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
624 int nb_sectors, BdrvRequestFlags flags)
626 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
627 BDRV_REQ_ZERO_WRITE | flags);
631 * Completely zero out a block device with the help of bdrv_write_zeroes.
632 * The operation is sped up by checking the block status and only writing
633 * zeroes to the device if they currently do not return zeroes. Optional
634 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
637 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
639 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
641 int64_t target_sectors, ret, nb_sectors, sector_num = 0;
642 BlockDriverState *file;
645 target_sectors = bdrv_nb_sectors(bs);
646 if (target_sectors < 0) {
647 return target_sectors;
651 nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
652 if (nb_sectors <= 0) {
655 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file);
657 error_report("error getting block status at sector %" PRId64 ": %s",
658 sector_num, strerror(-ret));
661 if (ret & BDRV_BLOCK_ZERO) {
665 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
667 error_report("error writing zeroes at sector %" PRId64 ": %s",
668 sector_num, strerror(-ret));
675 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
679 .iov_base = (void *)buf,
688 qemu_iovec_init_external(&qiov, &iov, 1);
689 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
697 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
701 ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
709 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
710 const void *buf, int bytes)
714 .iov_base = (void *) buf,
722 qemu_iovec_init_external(&qiov, &iov, 1);
723 return bdrv_pwritev(bs, offset, &qiov);
727 * Writes to the file and ensures that no writes are reordered across this
728 * request (acts as a barrier)
730 * Returns 0 on success, -errno in error cases.
732 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
733 const void *buf, int count)
737 ret = bdrv_pwrite(bs, offset, buf, count);
742 ret = bdrv_flush(bs);
750 typedef struct CoroutineIOCompletion {
751 Coroutine *coroutine;
753 } CoroutineIOCompletion;
755 static void bdrv_co_io_em_complete(void *opaque, int ret)
757 CoroutineIOCompletion *co = opaque;
760 qemu_coroutine_enter(co->coroutine, NULL);
763 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
764 uint64_t offset, uint64_t bytes,
765 QEMUIOVector *qiov, int flags)
767 BlockDriver *drv = bs->drv;
769 unsigned int nb_sectors;
771 if (drv->bdrv_co_preadv) {
772 return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
775 sector_num = offset >> BDRV_SECTOR_BITS;
776 nb_sectors = bytes >> BDRV_SECTOR_BITS;
778 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
779 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
780 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
782 if (drv->bdrv_co_readv) {
783 return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
786 CoroutineIOCompletion co = {
787 .coroutine = qemu_coroutine_self(),
790 acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
791 bdrv_co_io_em_complete, &co);
795 qemu_coroutine_yield();
801 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
802 uint64_t offset, uint64_t bytes,
803 QEMUIOVector *qiov, int flags)
805 BlockDriver *drv = bs->drv;
807 unsigned int nb_sectors;
810 if (drv->bdrv_co_pwritev) {
811 ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags);
815 sector_num = offset >> BDRV_SECTOR_BITS;
816 nb_sectors = bytes >> BDRV_SECTOR_BITS;
818 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
819 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
820 assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
822 if (drv->bdrv_co_writev_flags) {
823 ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov,
824 flags & bs->supported_write_flags);
825 flags &= ~bs->supported_write_flags;
826 } else if (drv->bdrv_co_writev) {
827 assert(!bs->supported_write_flags);
828 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
831 CoroutineIOCompletion co = {
832 .coroutine = qemu_coroutine_self(),
835 acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
836 bdrv_co_io_em_complete, &co);
840 qemu_coroutine_yield();
846 if (ret == 0 && (flags & BDRV_REQ_FUA)) {
847 ret = bdrv_co_flush(bs);
853 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
854 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
856 /* Perform I/O through a temporary buffer so that users who scribble over
857 * their read buffer while the operation is in progress do not end up
858 * modifying the image file. This is critical for zero-copy guest I/O
859 * where anything might happen inside guest memory.
863 BlockDriver *drv = bs->drv;
865 QEMUIOVector bounce_qiov;
866 int64_t cluster_sector_num;
867 int cluster_nb_sectors;
871 /* Cover entire cluster so no additional backing file I/O is required when
872 * allocating cluster in the image file.
874 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
875 &cluster_sector_num, &cluster_nb_sectors);
877 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
878 cluster_sector_num, cluster_nb_sectors);
880 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
881 iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
882 if (bounce_buffer == NULL) {
887 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
889 ret = bdrv_driver_preadv(bs, cluster_sector_num * BDRV_SECTOR_SIZE,
890 cluster_nb_sectors * BDRV_SECTOR_SIZE,
896 if (drv->bdrv_co_write_zeroes &&
897 buffer_is_zero(bounce_buffer, iov.iov_len)) {
898 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
899 cluster_nb_sectors, 0);
901 /* This does not change the data on the disk, it is not necessary
902 * to flush even in cache=writethrough mode.
904 ret = bdrv_driver_pwritev(bs, cluster_sector_num * BDRV_SECTOR_SIZE,
905 cluster_nb_sectors * BDRV_SECTOR_SIZE,
910 /* It might be okay to ignore write errors for guest requests. If this
911 * is a deliberate copy-on-read then we don't want to ignore the error.
912 * Simply report it in all cases.
917 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
918 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
919 nb_sectors * BDRV_SECTOR_SIZE);
922 qemu_vfree(bounce_buffer);
927 * Forwards an already correctly aligned request to the BlockDriver. This
928 * handles copy on read and zeroing after EOF; any other features must be
929 * implemented by the caller.
931 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
932 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
933 int64_t align, QEMUIOVector *qiov, int flags)
937 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
938 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
940 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
941 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
942 assert(!qiov || bytes == qiov->size);
943 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
945 /* Handle Copy on Read and associated serialisation */
946 if (flags & BDRV_REQ_COPY_ON_READ) {
947 /* If we touch the same cluster it counts as an overlap. This
948 * guarantees that allocating writes will be serialized and not race
949 * with each other for the same cluster. For example, in copy-on-read
950 * it ensures that the CoR read and write operations are atomic and
951 * guest writes cannot interleave between them. */
952 mark_request_serialising(req, bdrv_get_cluster_size(bs));
955 if (!(flags & BDRV_REQ_NO_SERIALISING)) {
956 wait_serialising_requests(req);
959 if (flags & BDRV_REQ_COPY_ON_READ) {
962 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
967 if (!ret || pnum != nb_sectors) {
968 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
973 /* Forward the request to the BlockDriver */
974 if (!bs->zero_beyond_eof) {
975 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
977 /* Read zeros after EOF */
978 int64_t total_sectors, max_nb_sectors;
980 total_sectors = bdrv_nb_sectors(bs);
981 if (total_sectors < 0) {
986 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
987 align >> BDRV_SECTOR_BITS);
988 if (nb_sectors < max_nb_sectors) {
989 ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
990 } else if (max_nb_sectors > 0) {
991 QEMUIOVector local_qiov;
993 qemu_iovec_init(&local_qiov, qiov->niov);
994 qemu_iovec_concat(&local_qiov, qiov, 0,
995 max_nb_sectors * BDRV_SECTOR_SIZE);
997 ret = bdrv_driver_preadv(bs, offset,
998 max_nb_sectors * BDRV_SECTOR_SIZE,
1001 qemu_iovec_destroy(&local_qiov);
1006 /* Reading beyond end of file is supposed to produce zeroes */
1007 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
1008 uint64_t offset = MAX(0, total_sectors - sector_num);
1009 uint64_t bytes = (sector_num + nb_sectors - offset) *
1011 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
1020 * Handle a read request in coroutine context
1022 int coroutine_fn bdrv_co_preadv(BlockDriverState *bs,
1023 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1024 BdrvRequestFlags flags)
1026 BlockDriver *drv = bs->drv;
1027 BdrvTrackedRequest req;
1029 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1030 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1031 uint8_t *head_buf = NULL;
1032 uint8_t *tail_buf = NULL;
1033 QEMUIOVector local_qiov;
1034 bool use_local_qiov = false;
1041 ret = bdrv_check_byte_request(bs, offset, bytes);
1046 /* Don't do copy-on-read if we read data before write operation */
1047 if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) {
1048 flags |= BDRV_REQ_COPY_ON_READ;
1051 /* Align read if necessary by padding qiov */
1052 if (offset & (align - 1)) {
1053 head_buf = qemu_blockalign(bs, align);
1054 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1055 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1056 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1057 use_local_qiov = true;
1059 bytes += offset & (align - 1);
1060 offset = offset & ~(align - 1);
1063 if ((offset + bytes) & (align - 1)) {
1064 if (!use_local_qiov) {
1065 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1066 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1067 use_local_qiov = true;
1069 tail_buf = qemu_blockalign(bs, align);
1070 qemu_iovec_add(&local_qiov, tail_buf,
1071 align - ((offset + bytes) & (align - 1)));
1073 bytes = ROUND_UP(bytes, align);
1076 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1077 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
1078 use_local_qiov ? &local_qiov : qiov,
1080 tracked_request_end(&req);
1082 if (use_local_qiov) {
1083 qemu_iovec_destroy(&local_qiov);
1084 qemu_vfree(head_buf);
1085 qemu_vfree(tail_buf);
1091 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
1092 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1093 BdrvRequestFlags flags)
1095 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1099 return bdrv_co_preadv(bs, sector_num << BDRV_SECTOR_BITS,
1100 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1103 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
1104 int nb_sectors, QEMUIOVector *qiov)
1106 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
1108 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
1111 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
1113 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
1114 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
1116 BlockDriver *drv = bs->drv;
1118 struct iovec iov = {0};
1120 bool need_flush = false;
1122 int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes,
1123 BDRV_REQUEST_MAX_SECTORS);
1125 while (nb_sectors > 0 && !ret) {
1126 int num = nb_sectors;
1128 /* Align request. Block drivers can expect the "bulk" of the request
1131 if (bs->bl.write_zeroes_alignment
1132 && num > bs->bl.write_zeroes_alignment) {
1133 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
1134 /* Make a small request up to the first aligned sector. */
1135 num = bs->bl.write_zeroes_alignment;
1136 num -= sector_num % bs->bl.write_zeroes_alignment;
1137 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
1138 /* Shorten the request to the last aligned sector. num cannot
1139 * underflow because num > bs->bl.write_zeroes_alignment.
1141 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
1145 /* limit request size */
1146 if (num > max_write_zeroes) {
1147 num = max_write_zeroes;
1151 /* First try the efficient write zeroes operation */
1152 if (drv->bdrv_co_write_zeroes) {
1153 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num,
1154 flags & bs->supported_zero_flags);
1155 if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1156 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1160 assert(!bs->supported_zero_flags);
1163 if (ret == -ENOTSUP) {
1164 /* Fall back to bounce buffer if write zeroes is unsupported */
1165 int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length,
1166 MAX_WRITE_ZEROES_BOUNCE_BUFFER);
1167 BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1169 if ((flags & BDRV_REQ_FUA) &&
1170 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1171 /* No need for bdrv_driver_pwrite() to do a fallback
1172 * flush on each chunk; use just one at the end */
1173 write_flags &= ~BDRV_REQ_FUA;
1176 num = MIN(num, max_xfer_len);
1177 iov.iov_len = num * BDRV_SECTOR_SIZE;
1178 if (iov.iov_base == NULL) {
1179 iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE);
1180 if (iov.iov_base == NULL) {
1184 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
1186 qemu_iovec_init_external(&qiov, &iov, 1);
1188 ret = bdrv_driver_pwritev(bs, sector_num * BDRV_SECTOR_SIZE,
1189 num * BDRV_SECTOR_SIZE, &qiov,
1192 /* Keep bounce buffer around if it is big enough for all
1193 * all future requests.
1195 if (num < max_xfer_len) {
1196 qemu_vfree(iov.iov_base);
1197 iov.iov_base = NULL;
1206 if (ret == 0 && need_flush) {
1207 ret = bdrv_co_flush(bs);
1209 qemu_vfree(iov.iov_base);
1214 * Forwards an already correctly aligned write request to the BlockDriver.
1216 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
1217 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1218 QEMUIOVector *qiov, int flags)
1220 BlockDriver *drv = bs->drv;
1224 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
1225 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
1227 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1228 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1229 assert(!qiov || bytes == qiov->size);
1230 assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1232 waited = wait_serialising_requests(req);
1233 assert(!waited || !req->serialising);
1234 assert(req->overlap_offset <= offset);
1235 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1237 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1239 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1240 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
1241 qemu_iovec_is_zero(qiov)) {
1242 flags |= BDRV_REQ_ZERO_WRITE;
1243 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1244 flags |= BDRV_REQ_MAY_UNMAP;
1249 /* Do nothing, write notifier decided to fail this request */
1250 } else if (flags & BDRV_REQ_ZERO_WRITE) {
1251 bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1252 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
1254 bdrv_debug_event(bs, BLKDBG_PWRITEV);
1255 ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
1257 bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
1259 bdrv_set_dirty(bs, sector_num, nb_sectors);
1261 if (bs->wr_highest_offset < offset + bytes) {
1262 bs->wr_highest_offset = offset + bytes;
1266 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
1272 static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
1275 BdrvRequestFlags flags,
1276 BdrvTrackedRequest *req)
1278 uint8_t *buf = NULL;
1279 QEMUIOVector local_qiov;
1281 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1282 unsigned int head_padding_bytes, tail_padding_bytes;
1285 head_padding_bytes = offset & (align - 1);
1286 tail_padding_bytes = align - ((offset + bytes) & (align - 1));
1289 assert(flags & BDRV_REQ_ZERO_WRITE);
1290 if (head_padding_bytes || tail_padding_bytes) {
1291 buf = qemu_blockalign(bs, align);
1292 iov = (struct iovec) {
1296 qemu_iovec_init_external(&local_qiov, &iov, 1);
1298 if (head_padding_bytes) {
1299 uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1301 /* RMW the unaligned part before head. */
1302 mark_request_serialising(req, align);
1303 wait_serialising_requests(req);
1304 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1305 ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align,
1306 align, &local_qiov, 0);
1310 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1312 memset(buf + head_padding_bytes, 0, zero_bytes);
1313 ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align,
1315 flags & ~BDRV_REQ_ZERO_WRITE);
1319 offset += zero_bytes;
1320 bytes -= zero_bytes;
1323 assert(!bytes || (offset & (align - 1)) == 0);
1324 if (bytes >= align) {
1325 /* Write the aligned part in the middle. */
1326 uint64_t aligned_bytes = bytes & ~(align - 1);
1327 ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes,
1332 bytes -= aligned_bytes;
1333 offset += aligned_bytes;
1336 assert(!bytes || (offset & (align - 1)) == 0);
1338 assert(align == tail_padding_bytes + bytes);
1339 /* RMW the unaligned part after tail. */
1340 mark_request_serialising(req, align);
1341 wait_serialising_requests(req);
1342 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1343 ret = bdrv_aligned_preadv(bs, req, offset, align,
1344 align, &local_qiov, 0);
1348 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1350 memset(buf, 0, bytes);
1351 ret = bdrv_aligned_pwritev(bs, req, offset, align,
1352 &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1361 * Handle a write request in coroutine context
1363 int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs,
1364 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1365 BdrvRequestFlags flags)
1367 BdrvTrackedRequest req;
1368 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1369 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1370 uint8_t *head_buf = NULL;
1371 uint8_t *tail_buf = NULL;
1372 QEMUIOVector local_qiov;
1373 bool use_local_qiov = false;
1379 if (bs->read_only) {
1382 assert(!(bs->open_flags & BDRV_O_INACTIVE));
1384 ret = bdrv_check_byte_request(bs, offset, bytes);
1390 * Align write if necessary by performing a read-modify-write cycle.
1391 * Pad qiov with the read parts and be sure to have a tracked request not
1392 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1394 tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
1397 ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req);
1401 if (offset & (align - 1)) {
1402 QEMUIOVector head_qiov;
1403 struct iovec head_iov;
1405 mark_request_serialising(&req, align);
1406 wait_serialising_requests(&req);
1408 head_buf = qemu_blockalign(bs, align);
1409 head_iov = (struct iovec) {
1410 .iov_base = head_buf,
1413 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1415 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1416 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
1417 align, &head_qiov, 0);
1421 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1423 qemu_iovec_init(&local_qiov, qiov->niov + 2);
1424 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1425 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1426 use_local_qiov = true;
1428 bytes += offset & (align - 1);
1429 offset = offset & ~(align - 1);
1432 if ((offset + bytes) & (align - 1)) {
1433 QEMUIOVector tail_qiov;
1434 struct iovec tail_iov;
1438 mark_request_serialising(&req, align);
1439 waited = wait_serialising_requests(&req);
1440 assert(!waited || !use_local_qiov);
1442 tail_buf = qemu_blockalign(bs, align);
1443 tail_iov = (struct iovec) {
1444 .iov_base = tail_buf,
1447 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1449 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1450 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
1451 align, &tail_qiov, 0);
1455 bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1457 if (!use_local_qiov) {
1458 qemu_iovec_init(&local_qiov, qiov->niov + 1);
1459 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1460 use_local_qiov = true;
1463 tail_bytes = (offset + bytes) & (align - 1);
1464 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1466 bytes = ROUND_UP(bytes, align);
1469 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
1470 use_local_qiov ? &local_qiov : qiov,
1475 if (use_local_qiov) {
1476 qemu_iovec_destroy(&local_qiov);
1478 qemu_vfree(head_buf);
1479 qemu_vfree(tail_buf);
1481 tracked_request_end(&req);
1485 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
1486 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1487 BdrvRequestFlags flags)
1489 if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1493 return bdrv_co_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
1494 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1497 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
1498 int nb_sectors, QEMUIOVector *qiov)
1500 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
1502 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
1505 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
1506 int64_t sector_num, int nb_sectors,
1507 BdrvRequestFlags flags)
1509 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
1511 if (!(bs->open_flags & BDRV_O_UNMAP)) {
1512 flags &= ~BDRV_REQ_MAY_UNMAP;
1515 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
1516 BDRV_REQ_ZERO_WRITE | flags);
1519 typedef struct BdrvCoGetBlockStatusData {
1520 BlockDriverState *bs;
1521 BlockDriverState *base;
1522 BlockDriverState **file;
1528 } BdrvCoGetBlockStatusData;
1531 * Returns the allocation status of the specified sectors.
1532 * Drivers not implementing the functionality are assumed to not support
1533 * backing files, hence all their sectors are reported as allocated.
1535 * If 'sector_num' is beyond the end of the disk image the return value is 0
1536 * and 'pnum' is set to 0.
1538 * 'pnum' is set to the number of sectors (including and immediately following
1539 * the specified sector) that are known to be in the same
1540 * allocated/unallocated state.
1542 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1543 * beyond the end of the disk image it will be clamped.
1545 * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1546 * points to the BDS which the sector range is allocated in.
1548 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
1550 int nb_sectors, int *pnum,
1551 BlockDriverState **file)
1553 int64_t total_sectors;
1557 total_sectors = bdrv_nb_sectors(bs);
1558 if (total_sectors < 0) {
1559 return total_sectors;
1562 if (sector_num >= total_sectors) {
1567 n = total_sectors - sector_num;
1568 if (n < nb_sectors) {
1572 if (!bs->drv->bdrv_co_get_block_status) {
1574 ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
1575 if (bs->drv->protocol_name) {
1576 ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
1582 ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
1589 if (ret & BDRV_BLOCK_RAW) {
1590 assert(ret & BDRV_BLOCK_OFFSET_VALID);
1591 return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
1595 if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
1596 ret |= BDRV_BLOCK_ALLOCATED;
1598 if (bdrv_unallocated_blocks_are_zero(bs)) {
1599 ret |= BDRV_BLOCK_ZERO;
1600 } else if (bs->backing) {
1601 BlockDriverState *bs2 = bs->backing->bs;
1602 int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
1603 if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
1604 ret |= BDRV_BLOCK_ZERO;
1609 if (*file && *file != bs &&
1610 (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
1611 (ret & BDRV_BLOCK_OFFSET_VALID)) {
1612 BlockDriverState *file2;
1615 ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
1616 *pnum, &file_pnum, &file2);
1618 /* Ignore errors. This is just providing extra information, it
1619 * is useful but not necessary.
1622 /* !file_pnum indicates an offset at or beyond the EOF; it is
1623 * perfectly valid for the format block driver to point to such
1624 * offsets, so catch it and mark everything as zero */
1625 ret |= BDRV_BLOCK_ZERO;
1627 /* Limit request to the range reported by the protocol driver */
1629 ret |= (ret2 & BDRV_BLOCK_ZERO);
1637 static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
1638 BlockDriverState *base,
1642 BlockDriverState **file)
1644 BlockDriverState *p;
1648 for (p = bs; p != base; p = backing_bs(p)) {
1649 ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file);
1650 if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) {
1653 /* [sector_num, pnum] unallocated on this layer, which could be only
1654 * the first part of [sector_num, nb_sectors]. */
1655 nb_sectors = MIN(nb_sectors, *pnum);
1660 /* Coroutine wrapper for bdrv_get_block_status_above() */
1661 static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
1663 BdrvCoGetBlockStatusData *data = opaque;
1665 data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
1674 * Synchronous wrapper around bdrv_co_get_block_status_above().
1676 * See bdrv_co_get_block_status_above() for details.
1678 int64_t bdrv_get_block_status_above(BlockDriverState *bs,
1679 BlockDriverState *base,
1681 int nb_sectors, int *pnum,
1682 BlockDriverState **file)
1685 BdrvCoGetBlockStatusData data = {
1689 .sector_num = sector_num,
1690 .nb_sectors = nb_sectors,
1695 if (qemu_in_coroutine()) {
1696 /* Fast-path if already in coroutine context */
1697 bdrv_get_block_status_above_co_entry(&data);
1699 AioContext *aio_context = bdrv_get_aio_context(bs);
1701 co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry);
1702 qemu_coroutine_enter(co, &data);
1703 while (!data.done) {
1704 aio_poll(aio_context, true);
1710 int64_t bdrv_get_block_status(BlockDriverState *bs,
1712 int nb_sectors, int *pnum,
1713 BlockDriverState **file)
1715 return bdrv_get_block_status_above(bs, backing_bs(bs),
1716 sector_num, nb_sectors, pnum, file);
1719 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
1720 int nb_sectors, int *pnum)
1722 BlockDriverState *file;
1723 int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum,
1728 return !!(ret & BDRV_BLOCK_ALLOCATED);
1732 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1734 * Return true if the given sector is allocated in any image between
1735 * BASE and TOP (inclusive). BASE can be NULL to check if the given
1736 * sector is allocated in any image of the chain. Return false otherwise.
1738 * 'pnum' is set to the number of sectors (including and immediately following
1739 * the specified sector) that are known to be in the same
1740 * allocated/unallocated state.
1743 int bdrv_is_allocated_above(BlockDriverState *top,
1744 BlockDriverState *base,
1746 int nb_sectors, int *pnum)
1748 BlockDriverState *intermediate;
1749 int ret, n = nb_sectors;
1752 while (intermediate && intermediate != base) {
1754 ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
1764 * [sector_num, nb_sectors] is unallocated on top but intermediate
1767 * [sector_num+x, nr_sectors] allocated.
1769 if (n > pnum_inter &&
1770 (intermediate == top ||
1771 sector_num + pnum_inter < intermediate->total_sectors)) {
1775 intermediate = backing_bs(intermediate);
1782 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
1783 const uint8_t *buf, int nb_sectors)
1785 BlockDriver *drv = bs->drv;
1791 if (!drv->bdrv_write_compressed) {
1794 ret = bdrv_check_request(bs, sector_num, nb_sectors);
1799 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
1801 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
1804 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
1805 int64_t pos, int size)
1808 struct iovec iov = {
1809 .iov_base = (void *) buf,
1813 qemu_iovec_init_external(&qiov, &iov, 1);
1814 return bdrv_writev_vmstate(bs, &qiov, pos);
1817 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
1819 BlockDriver *drv = bs->drv;
1823 } else if (drv->bdrv_save_vmstate) {
1824 return drv->bdrv_save_vmstate(bs, qiov, pos);
1825 } else if (bs->file) {
1826 return bdrv_writev_vmstate(bs->file->bs, qiov, pos);
1832 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
1833 int64_t pos, int size)
1835 BlockDriver *drv = bs->drv;
1838 if (drv->bdrv_load_vmstate)
1839 return drv->bdrv_load_vmstate(bs, buf, pos, size);
1841 return bdrv_load_vmstate(bs->file->bs, buf, pos, size);
1845 /**************************************************************/
1848 BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
1849 QEMUIOVector *qiov, int nb_sectors,
1850 BlockCompletionFunc *cb, void *opaque)
1852 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
1854 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
1858 BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
1859 QEMUIOVector *qiov, int nb_sectors,
1860 BlockCompletionFunc *cb, void *opaque)
1862 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
1864 return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
1868 void bdrv_aio_cancel(BlockAIOCB *acb)
1871 bdrv_aio_cancel_async(acb);
1872 while (acb->refcnt > 1) {
1873 if (acb->aiocb_info->get_aio_context) {
1874 aio_poll(acb->aiocb_info->get_aio_context(acb), true);
1875 } else if (acb->bs) {
1876 aio_poll(bdrv_get_aio_context(acb->bs), true);
1881 qemu_aio_unref(acb);
1884 /* Async version of aio cancel. The caller is not blocked if the acb implements
1885 * cancel_async, otherwise we do nothing and let the request normally complete.
1886 * In either case the completion callback must be called. */
1887 void bdrv_aio_cancel_async(BlockAIOCB *acb)
1889 if (acb->aiocb_info->cancel_async) {
1890 acb->aiocb_info->cancel_async(acb);
1894 /**************************************************************/
1895 /* async block device emulation */
1897 typedef struct BlockAIOCBCoroutine {
1904 } BlockAIOCBCoroutine;
1906 static const AIOCBInfo bdrv_em_co_aiocb_info = {
1907 .aiocb_size = sizeof(BlockAIOCBCoroutine),
1910 static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
1912 if (!acb->need_bh) {
1913 acb->common.cb(acb->common.opaque, acb->req.error);
1914 qemu_aio_unref(acb);
1918 static void bdrv_co_em_bh(void *opaque)
1920 BlockAIOCBCoroutine *acb = opaque;
1922 assert(!acb->need_bh);
1923 qemu_bh_delete(acb->bh);
1924 bdrv_co_complete(acb);
1927 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
1929 acb->need_bh = false;
1930 if (acb->req.error != -EINPROGRESS) {
1931 BlockDriverState *bs = acb->common.bs;
1933 acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
1934 qemu_bh_schedule(acb->bh);
1938 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
1939 static void coroutine_fn bdrv_co_do_rw(void *opaque)
1941 BlockAIOCBCoroutine *acb = opaque;
1942 BlockDriverState *bs = acb->common.bs;
1944 if (!acb->is_write) {
1945 acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
1946 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
1948 acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
1949 acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
1952 bdrv_co_complete(acb);
1955 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
1959 BdrvRequestFlags flags,
1960 BlockCompletionFunc *cb,
1965 BlockAIOCBCoroutine *acb;
1967 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
1968 acb->need_bh = true;
1969 acb->req.error = -EINPROGRESS;
1970 acb->req.sector = sector_num;
1971 acb->req.nb_sectors = nb_sectors;
1972 acb->req.qiov = qiov;
1973 acb->req.flags = flags;
1974 acb->is_write = is_write;
1976 co = qemu_coroutine_create(bdrv_co_do_rw);
1977 qemu_coroutine_enter(co, acb);
1979 bdrv_co_maybe_schedule_bh(acb);
1980 return &acb->common;
1983 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
1985 BlockAIOCBCoroutine *acb = opaque;
1986 BlockDriverState *bs = acb->common.bs;
1988 acb->req.error = bdrv_co_flush(bs);
1989 bdrv_co_complete(acb);
1992 BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
1993 BlockCompletionFunc *cb, void *opaque)
1995 trace_bdrv_aio_flush(bs, opaque);
1998 BlockAIOCBCoroutine *acb;
2000 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2001 acb->need_bh = true;
2002 acb->req.error = -EINPROGRESS;
2004 co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
2005 qemu_coroutine_enter(co, acb);
2007 bdrv_co_maybe_schedule_bh(acb);
2008 return &acb->common;
2011 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
2013 BlockAIOCBCoroutine *acb = opaque;
2014 BlockDriverState *bs = acb->common.bs;
2016 acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
2017 bdrv_co_complete(acb);
2020 BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
2021 int64_t sector_num, int nb_sectors,
2022 BlockCompletionFunc *cb, void *opaque)
2025 BlockAIOCBCoroutine *acb;
2027 trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
2029 acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2030 acb->need_bh = true;
2031 acb->req.error = -EINPROGRESS;
2032 acb->req.sector = sector_num;
2033 acb->req.nb_sectors = nb_sectors;
2034 co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
2035 qemu_coroutine_enter(co, acb);
2037 bdrv_co_maybe_schedule_bh(acb);
2038 return &acb->common;
2041 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
2042 BlockCompletionFunc *cb, void *opaque)
2046 acb = g_malloc(aiocb_info->aiocb_size);
2047 acb->aiocb_info = aiocb_info;
2050 acb->opaque = opaque;
2055 void qemu_aio_ref(void *p)
2057 BlockAIOCB *acb = p;
2061 void qemu_aio_unref(void *p)
2063 BlockAIOCB *acb = p;
2064 assert(acb->refcnt > 0);
2065 if (--acb->refcnt == 0) {
2070 /**************************************************************/
2071 /* Coroutine block device emulation */
2073 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2075 RwCo *rwco = opaque;
2077 rwco->ret = bdrv_co_flush(rwco->bs);
2080 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2083 BdrvTrackedRequest req;
2085 if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2090 tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH);
2092 /* Write back all layers by calling one driver function */
2093 if (bs->drv->bdrv_co_flush) {
2094 ret = bs->drv->bdrv_co_flush(bs);
2098 /* Write back cached data to the OS even with cache=unsafe */
2099 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2100 if (bs->drv->bdrv_co_flush_to_os) {
2101 ret = bs->drv->bdrv_co_flush_to_os(bs);
2107 /* But don't actually force it to the disk with cache=unsafe */
2108 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2112 BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2113 if (bs->drv->bdrv_co_flush_to_disk) {
2114 ret = bs->drv->bdrv_co_flush_to_disk(bs);
2115 } else if (bs->drv->bdrv_aio_flush) {
2117 CoroutineIOCompletion co = {
2118 .coroutine = qemu_coroutine_self(),
2121 acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2125 qemu_coroutine_yield();
2130 * Some block drivers always operate in either writethrough or unsafe
2131 * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2132 * know how the server works (because the behaviour is hardcoded or
2133 * depends on server-side configuration), so we can't ensure that
2134 * everything is safe on disk. Returning an error doesn't work because
2135 * that would break guests even if the server operates in writethrough
2138 * Let's hope the user knows what he's doing.
2146 /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
2147 * in the case of cache=unsafe, so there are no useless flushes.
2150 ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2152 tracked_request_end(&req);
2156 int bdrv_flush(BlockDriverState *bs)
2164 if (qemu_in_coroutine()) {
2165 /* Fast-path if already in coroutine context */
2166 bdrv_flush_co_entry(&rwco);
2168 AioContext *aio_context = bdrv_get_aio_context(bs);
2170 co = qemu_coroutine_create(bdrv_flush_co_entry);
2171 qemu_coroutine_enter(co, &rwco);
2172 while (rwco.ret == NOT_DONE) {
2173 aio_poll(aio_context, true);
2180 typedef struct DiscardCo {
2181 BlockDriverState *bs;
2186 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
2188 DiscardCo *rwco = opaque;
2190 rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
2193 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
2196 BdrvTrackedRequest req;
2197 int max_discard, ret;
2203 ret = bdrv_check_request(bs, sector_num, nb_sectors);
2206 } else if (bs->read_only) {
2209 assert(!(bs->open_flags & BDRV_O_INACTIVE));
2211 /* Do nothing if disabled. */
2212 if (!(bs->open_flags & BDRV_O_UNMAP)) {
2216 if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
2220 tracked_request_begin(&req, bs, sector_num, nb_sectors,
2221 BDRV_TRACKED_DISCARD);
2222 bdrv_set_dirty(bs, sector_num, nb_sectors);
2224 max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS);
2225 while (nb_sectors > 0) {
2227 int num = nb_sectors;
2230 if (bs->bl.discard_alignment &&
2231 num >= bs->bl.discard_alignment &&
2232 sector_num % bs->bl.discard_alignment) {
2233 if (num > bs->bl.discard_alignment) {
2234 num = bs->bl.discard_alignment;
2236 num -= sector_num % bs->bl.discard_alignment;
2239 /* limit request size */
2240 if (num > max_discard) {
2244 if (bs->drv->bdrv_co_discard) {
2245 ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
2248 CoroutineIOCompletion co = {
2249 .coroutine = qemu_coroutine_self(),
2252 acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
2253 bdrv_co_io_em_complete, &co);
2258 qemu_coroutine_yield();
2262 if (ret && ret != -ENOTSUP) {
2271 tracked_request_end(&req);
2275 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
2280 .sector_num = sector_num,
2281 .nb_sectors = nb_sectors,
2285 if (qemu_in_coroutine()) {
2286 /* Fast-path if already in coroutine context */
2287 bdrv_discard_co_entry(&rwco);
2289 AioContext *aio_context = bdrv_get_aio_context(bs);
2291 co = qemu_coroutine_create(bdrv_discard_co_entry);
2292 qemu_coroutine_enter(co, &rwco);
2293 while (rwco.ret == NOT_DONE) {
2294 aio_poll(aio_context, true);
2302 CoroutineIOCompletion *co;
2304 } BdrvIoctlCompletionData;
2306 static void bdrv_ioctl_bh_cb(void *opaque)
2308 BdrvIoctlCompletionData *data = opaque;
2310 bdrv_co_io_em_complete(data->co, -ENOTSUP);
2311 qemu_bh_delete(data->bh);
2314 static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf)
2316 BlockDriver *drv = bs->drv;
2317 BdrvTrackedRequest tracked_req;
2318 CoroutineIOCompletion co = {
2319 .coroutine = qemu_coroutine_self(),
2323 tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL);
2324 if (!drv || !drv->bdrv_aio_ioctl) {
2329 acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2331 BdrvIoctlCompletionData *data = g_new(BdrvIoctlCompletionData, 1);
2332 data->bh = aio_bh_new(bdrv_get_aio_context(bs),
2333 bdrv_ioctl_bh_cb, data);
2335 qemu_bh_schedule(data->bh);
2337 qemu_coroutine_yield();
2339 tracked_request_end(&tracked_req);
2344 BlockDriverState *bs;
2350 static void coroutine_fn bdrv_co_ioctl_entry(void *opaque)
2352 BdrvIoctlCoData *data = opaque;
2353 data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf);
2356 /* needed for generic scsi interface */
2357 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
2359 BdrvIoctlCoData data = {
2363 .ret = -EINPROGRESS,
2366 if (qemu_in_coroutine()) {
2367 /* Fast-path if already in coroutine context */
2368 bdrv_co_ioctl_entry(&data);
2370 Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry);
2372 qemu_coroutine_enter(co, &data);
2373 while (data.ret == -EINPROGRESS) {
2374 aio_poll(bdrv_get_aio_context(bs), true);
2380 static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque)
2382 BlockAIOCBCoroutine *acb = opaque;
2383 acb->req.error = bdrv_co_do_ioctl(acb->common.bs,
2384 acb->req.req, acb->req.buf);
2385 bdrv_co_complete(acb);
2388 BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
2389 unsigned long int req, void *buf,
2390 BlockCompletionFunc *cb, void *opaque)
2392 BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info,
2396 acb->need_bh = true;
2397 acb->req.error = -EINPROGRESS;
2400 co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry);
2401 qemu_coroutine_enter(co, acb);
2403 bdrv_co_maybe_schedule_bh(acb);
2404 return &acb->common;
2407 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2409 return qemu_memalign(bdrv_opt_mem_align(bs), size);
2412 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2414 return memset(qemu_blockalign(bs, size), 0, size);
2417 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2419 size_t align = bdrv_opt_mem_align(bs);
2421 /* Ensure that NULL is never returned on success */
2427 return qemu_try_memalign(align, size);
2430 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2432 void *mem = qemu_try_blockalign(bs, size);
2435 memset(mem, 0, size);
2442 * Check if all memory in this vector is sector aligned.
2444 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2447 size_t alignment = bdrv_min_mem_align(bs);
2449 for (i = 0; i < qiov->niov; i++) {
2450 if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2453 if (qiov->iov[i].iov_len % alignment) {
2461 void bdrv_add_before_write_notifier(BlockDriverState *bs,
2462 NotifierWithReturn *notifier)
2464 notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2467 void bdrv_io_plug(BlockDriverState *bs)
2471 QLIST_FOREACH(child, &bs->children, next) {
2472 bdrv_io_plug(child->bs);
2475 if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) {
2476 BlockDriver *drv = bs->drv;
2477 if (drv && drv->bdrv_io_plug) {
2478 drv->bdrv_io_plug(bs);
2483 void bdrv_io_unplug(BlockDriverState *bs)
2487 assert(bs->io_plugged);
2488 if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) {
2489 BlockDriver *drv = bs->drv;
2490 if (drv && drv->bdrv_io_unplug) {
2491 drv->bdrv_io_unplug(bs);
2495 QLIST_FOREACH(child, &bs->children, next) {
2496 bdrv_io_unplug(child->bs);
2500 void bdrv_io_unplugged_begin(BlockDriverState *bs)
2504 if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) {
2505 BlockDriver *drv = bs->drv;
2506 if (drv && drv->bdrv_io_unplug) {
2507 drv->bdrv_io_unplug(bs);
2511 QLIST_FOREACH(child, &bs->children, next) {
2512 bdrv_io_unplugged_begin(child->bs);
2516 void bdrv_io_unplugged_end(BlockDriverState *bs)
2520 assert(bs->io_plug_disabled);
2521 QLIST_FOREACH(child, &bs->children, next) {
2522 bdrv_io_unplugged_end(child->bs);
2525 if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) {
2526 BlockDriver *drv = bs->drv;
2527 if (drv && drv->bdrv_io_plug) {
2528 drv->bdrv_io_plug(bs);