4 * Copyright (C) 2014 Red Hat, Inc.
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "sysemu/block-backend.h"
15 #include "block/block_int.h"
16 #include "block/blockjob.h"
17 #include "block/throttle-groups.h"
18 #include "sysemu/blockdev.h"
19 #include "sysemu/sysemu.h"
20 #include "qapi-event.h"
22 /* Number of coroutines to reserve per attached device model */
23 #define COROUTINE_POOL_RESERVATION 64
25 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
31 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */
32 QTAILQ_ENTRY(BlockBackend) link; /* for blk_backends */
34 void *dev; /* attached device model, if any */
35 /* TODO change to DeviceState when all users are qdevified */
36 const BlockDevOps *dev_ops;
39 /* the block size for which the guest device expects atomicity */
42 /* If the BDS tree is removed, some of its options are stored here (which
43 * can be used to restore those options in the new BDS on insert) */
44 BlockBackendRootState root_state;
46 /* I/O stats (display with "info blockstats"). */
49 BlockdevOnError on_read_error, on_write_error;
50 bool iostatus_enabled;
51 BlockDeviceIoStatus iostatus;
54 typedef struct BlockBackendAIOCB {
61 static const AIOCBInfo block_backend_aiocb_info = {
62 .get_aio_context = blk_aiocb_get_aio_context,
63 .aiocb_size = sizeof(BlockBackendAIOCB),
66 static void drive_info_del(DriveInfo *dinfo);
68 /* All the BlockBackends (except for hidden ones) */
69 static QTAILQ_HEAD(, BlockBackend) blk_backends =
70 QTAILQ_HEAD_INITIALIZER(blk_backends);
73 * Create a new BlockBackend with @name, with a reference count of one.
74 * @name must not be null or empty.
75 * Fail if a BlockBackend with this name already exists.
76 * Store an error through @errp on failure, unless it's null.
77 * Return the new BlockBackend on success, null on failure.
79 BlockBackend *blk_new(const char *name, Error **errp)
83 assert(name && name[0]);
84 if (!id_wellformed(name)) {
85 error_setg(errp, "Invalid device name");
88 if (blk_by_name(name)) {
89 error_setg(errp, "Device with id '%s' already exists", name);
92 if (bdrv_find_node(name)) {
94 "Device name '%s' conflicts with an existing node name",
99 blk = g_new0(BlockBackend, 1);
100 blk->name = g_strdup(name);
102 QTAILQ_INSERT_TAIL(&blk_backends, blk, link);
107 * Create a new BlockBackend with a new BlockDriverState attached.
108 * Otherwise just like blk_new(), which see.
110 BlockBackend *blk_new_with_bs(const char *name, Error **errp)
113 BlockDriverState *bs;
115 blk = blk_new(name, errp);
120 bs = bdrv_new_root();
127 * Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState.
129 * Just as with bdrv_open(), after having called this function the reference to
130 * @options belongs to the block layer (even on failure).
132 * TODO: Remove @filename and @flags; it should be possible to specify a whole
133 * BDS tree just by specifying the @options QDict (or @reference,
134 * alternatively). At the time of adding this function, this is not possible,
135 * though, so callers of this function have to be able to specify @filename and
138 BlockBackend *blk_new_open(const char *name, const char *filename,
139 const char *reference, QDict *options, int flags,
145 blk = blk_new_with_bs(name, errp);
151 ret = bdrv_open(&blk->bs, filename, reference, options, flags, errp);
160 static void blk_delete(BlockBackend *blk)
162 assert(!blk->refcnt);
165 assert(blk->bs->blk == blk);
170 if (blk->root_state.throttle_state) {
171 g_free(blk->root_state.throttle_group);
172 throttle_group_unref(blk->root_state.throttle_state);
174 /* Avoid double-remove after blk_hide_on_behalf_of_hmp_drive_del() */
176 QTAILQ_REMOVE(&blk_backends, blk, link);
179 drive_info_del(blk->legacy_dinfo);
180 block_acct_cleanup(&blk->stats);
184 static void drive_info_del(DriveInfo *dinfo)
189 qemu_opts_del(dinfo->opts);
190 g_free(dinfo->serial);
194 int blk_get_refcnt(BlockBackend *blk)
196 return blk ? blk->refcnt : 0;
200 * Increment @blk's reference count.
201 * @blk must not be null.
203 void blk_ref(BlockBackend *blk)
209 * Decrement @blk's reference count.
210 * If this drops it to zero, destroy @blk.
211 * For convenience, do nothing if @blk is null.
213 void blk_unref(BlockBackend *blk)
216 assert(blk->refcnt > 0);
217 if (!--blk->refcnt) {
224 * Return the BlockBackend after @blk.
225 * If @blk is null, return the first one.
226 * Else, return @blk's next sibling, which may be null.
228 * To iterate over all BlockBackends, do
229 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
233 BlockBackend *blk_next(BlockBackend *blk)
235 return blk ? QTAILQ_NEXT(blk, link) : QTAILQ_FIRST(&blk_backends);
239 * Return @blk's name, a non-null string.
240 * Wart: the name is empty iff @blk has been hidden with
241 * blk_hide_on_behalf_of_hmp_drive_del().
243 const char *blk_name(BlockBackend *blk)
249 * Return the BlockBackend with name @name if it exists, else null.
250 * @name must not be null.
252 BlockBackend *blk_by_name(const char *name)
257 QTAILQ_FOREACH(blk, &blk_backends, link) {
258 if (!strcmp(name, blk->name)) {
266 * Return the BlockDriverState attached to @blk if any, else null.
268 BlockDriverState *blk_bs(BlockBackend *blk)
274 * Changes the BlockDriverState attached to @blk
276 void blk_set_bs(BlockBackend *blk, BlockDriverState *bs)
284 assert(bs->blk == NULL);
291 * Return @blk's DriveInfo if any, else null.
293 DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
295 return blk->legacy_dinfo;
299 * Set @blk's DriveInfo to @dinfo, and return it.
300 * @blk must not have a DriveInfo set already.
301 * No other BlockBackend may have the same DriveInfo set.
303 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
305 assert(!blk->legacy_dinfo);
306 return blk->legacy_dinfo = dinfo;
310 * Return the BlockBackend with DriveInfo @dinfo.
313 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
317 QTAILQ_FOREACH(blk, &blk_backends, link) {
318 if (blk->legacy_dinfo == dinfo) {
327 * @blk must not have been hidden already.
328 * Make attached BlockDriverState, if any, anonymous.
329 * Once hidden, @blk is invisible to all functions that don't receive
330 * it as argument. For example, blk_by_name() won't return it.
331 * Strictly for use by do_drive_del().
332 * TODO get rid of it!
334 void blk_hide_on_behalf_of_hmp_drive_del(BlockBackend *blk)
336 QTAILQ_REMOVE(&blk_backends, blk, link);
339 bdrv_make_anon(blk->bs);
344 * Disassociates the currently associated BlockDriverState from @blk.
346 void blk_remove_bs(BlockBackend *blk)
348 blk_update_root_state(blk);
356 * Associates a new BlockDriverState with @blk.
358 void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs)
360 assert(!blk->bs && !bs->blk);
367 * Attach device model @dev to @blk.
368 * Return 0 on success, -EBUSY when a device model is attached already.
370 int blk_attach_dev(BlockBackend *blk, void *dev)
371 /* TODO change to DeviceState *dev when all users are qdevified */
378 blk_iostatus_reset(blk);
383 * Attach device model @dev to @blk.
384 * @blk must not have a device model attached already.
385 * TODO qdevified devices don't use this, remove when devices are qdevified
387 void blk_attach_dev_nofail(BlockBackend *blk, void *dev)
389 if (blk_attach_dev(blk, dev) < 0) {
395 * Detach device model @dev from @blk.
396 * @dev must be currently attached to @blk.
398 void blk_detach_dev(BlockBackend *blk, void *dev)
399 /* TODO change to DeviceState *dev when all users are qdevified */
401 assert(blk->dev == dev);
404 blk->dev_opaque = NULL;
405 blk->guest_block_size = 512;
410 * Return the device model attached to @blk if any, else null.
412 void *blk_get_attached_dev(BlockBackend *blk)
413 /* TODO change to return DeviceState * when all users are qdevified */
419 * Set @blk's device model callbacks to @ops.
420 * @opaque is the opaque argument to pass to the callbacks.
421 * This is for use by device models.
423 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
427 blk->dev_opaque = opaque;
431 * Notify @blk's attached device model of media change.
432 * If @load is true, notify of media load.
433 * Else, notify of media eject.
434 * Also send DEVICE_TRAY_MOVED events as appropriate.
436 void blk_dev_change_media_cb(BlockBackend *blk, bool load)
438 if (blk->dev_ops && blk->dev_ops->change_media_cb) {
439 bool tray_was_open, tray_is_open;
441 tray_was_open = blk_dev_is_tray_open(blk);
442 blk->dev_ops->change_media_cb(blk->dev_opaque, load);
443 tray_is_open = blk_dev_is_tray_open(blk);
445 if (tray_was_open != tray_is_open) {
446 qapi_event_send_device_tray_moved(blk_name(blk), tray_is_open,
453 * Does @blk's attached device model have removable media?
454 * %true if no device model is attached.
456 bool blk_dev_has_removable_media(BlockBackend *blk)
458 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
462 * Notify @blk's attached device model of a media eject request.
463 * If @force is true, the medium is about to be yanked out forcefully.
465 void blk_dev_eject_request(BlockBackend *blk, bool force)
467 if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
468 blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
473 * Does @blk's attached device model have a tray, and is it open?
475 bool blk_dev_is_tray_open(BlockBackend *blk)
477 if (blk->dev_ops && blk->dev_ops->is_tray_open) {
478 return blk->dev_ops->is_tray_open(blk->dev_opaque);
484 * Does @blk's attached device model have the medium locked?
485 * %false if the device model has no such lock.
487 bool blk_dev_is_medium_locked(BlockBackend *blk)
489 if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
490 return blk->dev_ops->is_medium_locked(blk->dev_opaque);
496 * Notify @blk's attached device model of a backend size change.
498 void blk_dev_resize_cb(BlockBackend *blk)
500 if (blk->dev_ops && blk->dev_ops->resize_cb) {
501 blk->dev_ops->resize_cb(blk->dev_opaque);
505 void blk_iostatus_enable(BlockBackend *blk)
507 blk->iostatus_enabled = true;
508 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
511 /* The I/O status is only enabled if the drive explicitly
512 * enables it _and_ the VM is configured to stop on errors */
513 bool blk_iostatus_is_enabled(const BlockBackend *blk)
515 return (blk->iostatus_enabled &&
516 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
517 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
518 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
521 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
523 return blk->iostatus;
526 void blk_iostatus_disable(BlockBackend *blk)
528 blk->iostatus_enabled = false;
531 void blk_iostatus_reset(BlockBackend *blk)
533 if (blk_iostatus_is_enabled(blk)) {
534 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
535 if (blk->bs && blk->bs->job) {
536 block_job_iostatus_reset(blk->bs->job);
541 void blk_iostatus_set_err(BlockBackend *blk, int error)
543 assert(blk_iostatus_is_enabled(blk));
544 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
545 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
546 BLOCK_DEVICE_IO_STATUS_FAILED;
550 static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
555 if (size > INT_MAX) {
559 if (!blk_is_available(blk)) {
563 len = blk_getlength(blk);
572 if (offset > len || len - offset < size) {
579 static int blk_check_request(BlockBackend *blk, int64_t sector_num,
582 if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) {
586 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
590 return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE,
591 nb_sectors * BDRV_SECTOR_SIZE);
594 int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
597 int ret = blk_check_request(blk, sector_num, nb_sectors);
602 return bdrv_read(blk->bs, sector_num, buf, nb_sectors);
605 int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
608 int ret = blk_check_request(blk, sector_num, nb_sectors);
613 return bdrv_read_unthrottled(blk->bs, sector_num, buf, nb_sectors);
616 int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf,
619 int ret = blk_check_request(blk, sector_num, nb_sectors);
624 return bdrv_write(blk->bs, sector_num, buf, nb_sectors);
627 int blk_write_zeroes(BlockBackend *blk, int64_t sector_num,
628 int nb_sectors, BdrvRequestFlags flags)
630 int ret = blk_check_request(blk, sector_num, nb_sectors);
635 return bdrv_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
638 static void error_callback_bh(void *opaque)
640 struct BlockBackendAIOCB *acb = opaque;
641 qemu_bh_delete(acb->bh);
642 acb->common.cb(acb->common.opaque, acb->ret);
646 BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
647 BlockCompletionFunc *cb,
648 void *opaque, int ret)
650 struct BlockBackendAIOCB *acb;
653 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
657 bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
659 qemu_bh_schedule(bh);
664 BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num,
665 int nb_sectors, BdrvRequestFlags flags,
666 BlockCompletionFunc *cb, void *opaque)
668 int ret = blk_check_request(blk, sector_num, nb_sectors);
670 return blk_abort_aio_request(blk, cb, opaque, ret);
673 return bdrv_aio_write_zeroes(blk->bs, sector_num, nb_sectors, flags,
677 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
679 int ret = blk_check_byte_request(blk, offset, count);
684 return bdrv_pread(blk->bs, offset, buf, count);
687 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count)
689 int ret = blk_check_byte_request(blk, offset, count);
694 return bdrv_pwrite(blk->bs, offset, buf, count);
697 int64_t blk_getlength(BlockBackend *blk)
699 if (!blk_is_available(blk)) {
703 return bdrv_getlength(blk->bs);
706 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
711 bdrv_get_geometry(blk->bs, nb_sectors_ptr);
715 int64_t blk_nb_sectors(BlockBackend *blk)
717 if (!blk_is_available(blk)) {
721 return bdrv_nb_sectors(blk->bs);
724 BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
725 QEMUIOVector *iov, int nb_sectors,
726 BlockCompletionFunc *cb, void *opaque)
728 int ret = blk_check_request(blk, sector_num, nb_sectors);
730 return blk_abort_aio_request(blk, cb, opaque, ret);
733 return bdrv_aio_readv(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
736 BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
737 QEMUIOVector *iov, int nb_sectors,
738 BlockCompletionFunc *cb, void *opaque)
740 int ret = blk_check_request(blk, sector_num, nb_sectors);
742 return blk_abort_aio_request(blk, cb, opaque, ret);
745 return bdrv_aio_writev(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
748 BlockAIOCB *blk_aio_flush(BlockBackend *blk,
749 BlockCompletionFunc *cb, void *opaque)
751 if (!blk_is_available(blk)) {
752 return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
755 return bdrv_aio_flush(blk->bs, cb, opaque);
758 BlockAIOCB *blk_aio_discard(BlockBackend *blk,
759 int64_t sector_num, int nb_sectors,
760 BlockCompletionFunc *cb, void *opaque)
762 int ret = blk_check_request(blk, sector_num, nb_sectors);
764 return blk_abort_aio_request(blk, cb, opaque, ret);
767 return bdrv_aio_discard(blk->bs, sector_num, nb_sectors, cb, opaque);
770 void blk_aio_cancel(BlockAIOCB *acb)
772 bdrv_aio_cancel(acb);
775 void blk_aio_cancel_async(BlockAIOCB *acb)
777 bdrv_aio_cancel_async(acb);
780 int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs)
784 for (i = 0; i < num_reqs; i++) {
785 ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors);
791 return bdrv_aio_multiwrite(blk->bs, reqs, num_reqs);
794 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
796 if (!blk_is_available(blk)) {
800 return bdrv_ioctl(blk->bs, req, buf);
803 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
804 BlockCompletionFunc *cb, void *opaque)
806 if (!blk_is_available(blk)) {
807 return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
810 return bdrv_aio_ioctl(blk->bs, req, buf, cb, opaque);
813 int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
815 int ret = blk_check_request(blk, sector_num, nb_sectors);
820 return bdrv_co_discard(blk->bs, sector_num, nb_sectors);
823 int blk_co_flush(BlockBackend *blk)
825 if (!blk_is_available(blk)) {
829 return bdrv_co_flush(blk->bs);
832 int blk_flush(BlockBackend *blk)
834 if (!blk_is_available(blk)) {
838 return bdrv_flush(blk->bs);
841 int blk_flush_all(void)
843 return bdrv_flush_all();
846 void blk_drain(BlockBackend *blk)
853 void blk_drain_all(void)
858 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
859 BlockdevOnError on_write_error)
861 blk->on_read_error = on_read_error;
862 blk->on_write_error = on_write_error;
865 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
867 return is_read ? blk->on_read_error : blk->on_write_error;
870 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
873 BlockdevOnError on_err = blk_get_on_error(blk, is_read);
876 case BLOCKDEV_ON_ERROR_ENOSPC:
877 return (error == ENOSPC) ?
878 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
879 case BLOCKDEV_ON_ERROR_STOP:
880 return BLOCK_ERROR_ACTION_STOP;
881 case BLOCKDEV_ON_ERROR_REPORT:
882 return BLOCK_ERROR_ACTION_REPORT;
883 case BLOCKDEV_ON_ERROR_IGNORE:
884 return BLOCK_ERROR_ACTION_IGNORE;
890 static void send_qmp_error_event(BlockBackend *blk,
891 BlockErrorAction action,
892 bool is_read, int error)
894 IoOperationType optype;
896 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
897 qapi_event_send_block_io_error(blk_name(blk), optype, action,
898 blk_iostatus_is_enabled(blk),
899 error == ENOSPC, strerror(error),
903 /* This is done by device models because, while the block layer knows
904 * about the error, it does not know whether an operation comes from
905 * the device or the block layer (from a job, for example).
907 void blk_error_action(BlockBackend *blk, BlockErrorAction action,
908 bool is_read, int error)
912 if (action == BLOCK_ERROR_ACTION_STOP) {
913 /* First set the iostatus, so that "info block" returns an iostatus
914 * that matches the events raised so far (an additional error iostatus
915 * is fine, but not a lost one).
917 blk_iostatus_set_err(blk, error);
919 /* Then raise the request to stop the VM and the event.
920 * qemu_system_vmstop_request_prepare has two effects. First,
921 * it ensures that the STOP event always comes after the
922 * BLOCK_IO_ERROR event. Second, it ensures that even if management
923 * can observe the STOP event and do a "cont" before the STOP
924 * event is issued, the VM will not stop. In this case, vm_start()
925 * also ensures that the STOP/RESUME pair of events is emitted.
927 qemu_system_vmstop_request_prepare();
928 send_qmp_error_event(blk, action, is_read, error);
929 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
931 send_qmp_error_event(blk, action, is_read, error);
935 int blk_is_read_only(BlockBackend *blk)
938 return bdrv_is_read_only(blk->bs);
940 return blk->root_state.read_only;
944 int blk_is_sg(BlockBackend *blk)
950 return bdrv_is_sg(blk->bs);
953 int blk_enable_write_cache(BlockBackend *blk)
956 return bdrv_enable_write_cache(blk->bs);
958 return !!(blk->root_state.open_flags & BDRV_O_CACHE_WB);
962 void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
965 bdrv_set_enable_write_cache(blk->bs, wce);
968 blk->root_state.open_flags |= BDRV_O_CACHE_WB;
970 blk->root_state.open_flags &= ~BDRV_O_CACHE_WB;
975 void blk_invalidate_cache(BlockBackend *blk, Error **errp)
978 error_setg(errp, "Device '%s' has no medium", blk->name);
982 bdrv_invalidate_cache(blk->bs, errp);
985 bool blk_is_inserted(BlockBackend *blk)
987 return blk->bs && bdrv_is_inserted(blk->bs);
990 bool blk_is_available(BlockBackend *blk)
992 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
995 void blk_lock_medium(BlockBackend *blk, bool locked)
998 bdrv_lock_medium(blk->bs, locked);
1002 void blk_eject(BlockBackend *blk, bool eject_flag)
1005 bdrv_eject(blk->bs, eject_flag);
1009 int blk_get_flags(BlockBackend *blk)
1012 return bdrv_get_flags(blk->bs);
1014 return blk->root_state.open_flags;
1018 int blk_get_max_transfer_length(BlockBackend *blk)
1021 return blk->bs->bl.max_transfer_length;
1027 int blk_get_max_iov(BlockBackend *blk)
1029 return blk->bs->bl.max_iov;
1032 void blk_set_guest_block_size(BlockBackend *blk, int align)
1034 blk->guest_block_size = align;
1037 void *blk_try_blockalign(BlockBackend *blk, size_t size)
1039 return qemu_try_blockalign(blk ? blk->bs : NULL, size);
1042 void *blk_blockalign(BlockBackend *blk, size_t size)
1044 return qemu_blockalign(blk ? blk->bs : NULL, size);
1047 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
1053 return bdrv_op_is_blocked(blk->bs, op, errp);
1056 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
1059 bdrv_op_unblock(blk->bs, op, reason);
1063 void blk_op_block_all(BlockBackend *blk, Error *reason)
1066 bdrv_op_block_all(blk->bs, reason);
1070 void blk_op_unblock_all(BlockBackend *blk, Error *reason)
1073 bdrv_op_unblock_all(blk->bs, reason);
1077 AioContext *blk_get_aio_context(BlockBackend *blk)
1080 return bdrv_get_aio_context(blk->bs);
1082 return qemu_get_aio_context();
1086 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
1088 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
1089 return blk_get_aio_context(blk_acb->blk);
1092 void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
1095 bdrv_set_aio_context(blk->bs, new_context);
1099 void blk_add_aio_context_notifier(BlockBackend *blk,
1100 void (*attached_aio_context)(AioContext *new_context, void *opaque),
1101 void (*detach_aio_context)(void *opaque), void *opaque)
1104 bdrv_add_aio_context_notifier(blk->bs, attached_aio_context,
1105 detach_aio_context, opaque);
1109 void blk_remove_aio_context_notifier(BlockBackend *blk,
1110 void (*attached_aio_context)(AioContext *,
1112 void (*detach_aio_context)(void *),
1116 bdrv_remove_aio_context_notifier(blk->bs, attached_aio_context,
1117 detach_aio_context, opaque);
1121 void blk_add_close_notifier(BlockBackend *blk, Notifier *notify)
1124 bdrv_add_close_notifier(blk->bs, notify);
1128 void blk_io_plug(BlockBackend *blk)
1131 bdrv_io_plug(blk->bs);
1135 void blk_io_unplug(BlockBackend *blk)
1138 bdrv_io_unplug(blk->bs);
1142 BlockAcctStats *blk_get_stats(BlockBackend *blk)
1147 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
1148 BlockCompletionFunc *cb, void *opaque)
1150 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
1153 int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num,
1154 int nb_sectors, BdrvRequestFlags flags)
1156 int ret = blk_check_request(blk, sector_num, nb_sectors);
1161 return bdrv_co_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
1164 int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
1165 const uint8_t *buf, int nb_sectors)
1167 int ret = blk_check_request(blk, sector_num, nb_sectors);
1172 return bdrv_write_compressed(blk->bs, sector_num, buf, nb_sectors);
1175 int blk_truncate(BlockBackend *blk, int64_t offset)
1177 if (!blk_is_available(blk)) {
1181 return bdrv_truncate(blk->bs, offset);
1184 int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
1186 int ret = blk_check_request(blk, sector_num, nb_sectors);
1191 return bdrv_discard(blk->bs, sector_num, nb_sectors);
1194 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
1195 int64_t pos, int size)
1197 if (!blk_is_available(blk)) {
1201 return bdrv_save_vmstate(blk->bs, buf, pos, size);
1204 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
1206 if (!blk_is_available(blk)) {
1210 return bdrv_load_vmstate(blk->bs, buf, pos, size);
1213 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
1215 if (!blk_is_available(blk)) {
1219 return bdrv_probe_blocksizes(blk->bs, bsz);
1222 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
1224 if (!blk_is_available(blk)) {
1228 return bdrv_probe_geometry(blk->bs, geo);
1232 * Updates the BlockBackendRootState object with data from the currently
1233 * attached BlockDriverState.
1235 void blk_update_root_state(BlockBackend *blk)
1239 blk->root_state.open_flags = blk->bs->open_flags;
1240 blk->root_state.read_only = blk->bs->read_only;
1241 blk->root_state.detect_zeroes = blk->bs->detect_zeroes;
1243 if (blk->root_state.throttle_group) {
1244 g_free(blk->root_state.throttle_group);
1245 throttle_group_unref(blk->root_state.throttle_state);
1247 if (blk->bs->throttle_state) {
1248 const char *name = throttle_group_get_name(blk->bs);
1249 blk->root_state.throttle_group = g_strdup(name);
1250 blk->root_state.throttle_state = throttle_group_incref(name);
1252 blk->root_state.throttle_group = NULL;
1253 blk->root_state.throttle_state = NULL;
1258 * Applies the information in the root state to the given BlockDriverState. This
1259 * does not include the flags which have to be specified for bdrv_open(), use
1260 * blk_get_open_flags_from_root_state() to inquire them.
1262 void blk_apply_root_state(BlockBackend *blk, BlockDriverState *bs)
1264 bs->detect_zeroes = blk->root_state.detect_zeroes;
1265 if (blk->root_state.throttle_group) {
1266 bdrv_io_limits_enable(bs, blk->root_state.throttle_group);
1271 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
1272 * supposed to inherit the root state.
1274 int blk_get_open_flags_from_root_state(BlockBackend *blk)
1278 bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
1279 bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
1284 BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
1286 return &blk->root_state;