#include "sysemu/block-backend.h"
#include "block/block_int.h"
+#include "block/blockjob.h"
+#include "block/throttle-groups.h"
#include "sysemu/blockdev.h"
+#include "sysemu/sysemu.h"
#include "qapi-event.h"
/* Number of coroutines to reserve per attached device model */
#define COROUTINE_POOL_RESERVATION 64
+static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
+
struct BlockBackend {
char *name;
int refcnt;
/* TODO change to DeviceState when all users are qdevified */
const BlockDevOps *dev_ops;
void *dev_opaque;
+
+ /* the block size for which the guest device expects atomicity */
+ int guest_block_size;
+
+ /* If the BDS tree is removed, some of its options are stored here (which
+ * can be used to restore those options in the new BDS on insert) */
+ BlockBackendRootState root_state;
+
+ /* I/O stats (display with "info blockstats"). */
+ BlockAcctStats stats;
+
+ BlockdevOnError on_read_error, on_write_error;
+ bool iostatus_enabled;
+ BlockDeviceIoStatus iostatus;
};
typedef struct BlockBackendAIOCB {
BlockAIOCB common;
QEMUBH *bh;
+ BlockBackend *blk;
int ret;
} BlockBackendAIOCB;
static const AIOCBInfo block_backend_aiocb_info = {
+ .get_aio_context = blk_aiocb_get_aio_context,
.aiocb_size = sizeof(BlockBackendAIOCB),
};
return NULL;
}
- ret = bdrv_open(&blk->bs, filename, reference, options, flags, NULL, errp);
+ ret = bdrv_open(&blk->bs, filename, reference, options, flags, errp);
if (ret < 0) {
blk_unref(blk);
return NULL;
bdrv_unref(blk->bs);
blk->bs = NULL;
}
+ if (blk->root_state.throttle_state) {
+ g_free(blk->root_state.throttle_group);
+ throttle_group_unref(blk->root_state.throttle_state);
+ }
/* Avoid double-remove after blk_hide_on_behalf_of_hmp_drive_del() */
if (blk->name[0]) {
QTAILQ_REMOVE(&blk_backends, blk, link);
}
g_free(blk->name);
drive_info_del(blk->legacy_dinfo);
+ block_acct_cleanup(&blk->stats);
g_free(blk);
}
g_free(dinfo);
}
+int blk_get_refcnt(BlockBackend *blk)
+{
+ return blk ? blk->refcnt : 0;
+}
+
/*
* Increment @blk's reference count.
* @blk must not be null.
return blk->bs;
}
+/*
+ * Changes the BlockDriverState attached to @blk
+ */
+void blk_set_bs(BlockBackend *blk, BlockDriverState *bs)
+{
+ bdrv_ref(bs);
+
+ if (blk->bs) {
+ blk->bs->blk = NULL;
+ bdrv_unref(blk->bs);
+ }
+ assert(bs->blk == NULL);
+
+ blk->bs = bs;
+ bs->blk = blk;
+}
+
/*
* Return @blk's DriveInfo if any, else null.
*/
}
}
+/*
+ * Disassociates the currently associated BlockDriverState from @blk.
+ */
+void blk_remove_bs(BlockBackend *blk)
+{
+ blk_update_root_state(blk);
+
+ blk->bs->blk = NULL;
+ bdrv_unref(blk->bs);
+ blk->bs = NULL;
+}
+
+/*
+ * Associates a new BlockDriverState with @blk.
+ */
+void blk_insert_bs(BlockBackend *blk, BlockDriverState *bs)
+{
+ assert(!blk->bs && !bs->blk);
+ bdrv_ref(bs);
+ blk->bs = bs;
+ bs->blk = blk;
+}
+
/*
* Attach device model @dev to @blk.
* Return 0 on success, -EBUSY when a device model is attached already.
}
blk_ref(blk);
blk->dev = dev;
- bdrv_iostatus_reset(blk->bs);
+ blk_iostatus_reset(blk);
return 0;
}
blk->dev = NULL;
blk->dev_ops = NULL;
blk->dev_opaque = NULL;
- bdrv_set_guest_block_size(blk->bs, 512);
+ blk->guest_block_size = 512;
blk_unref(blk);
}
void blk_dev_change_media_cb(BlockBackend *blk, bool load)
{
if (blk->dev_ops && blk->dev_ops->change_media_cb) {
- bool tray_was_closed = !blk_dev_is_tray_open(blk);
+ bool tray_was_open, tray_is_open;
+ tray_was_open = blk_dev_is_tray_open(blk);
blk->dev_ops->change_media_cb(blk->dev_opaque, load);
- if (tray_was_closed) {
- /* tray open */
- qapi_event_send_device_tray_moved(blk_name(blk),
- true, &error_abort);
- }
- if (load) {
- /* tray close */
- qapi_event_send_device_tray_moved(blk_name(blk),
- false, &error_abort);
+ tray_is_open = blk_dev_is_tray_open(blk);
+
+ if (tray_was_open != tray_is_open) {
+ qapi_event_send_device_tray_moved(blk_name(blk), tray_is_open,
+ &error_abort);
}
}
}
void blk_iostatus_enable(BlockBackend *blk)
{
- bdrv_iostatus_enable(blk->bs);
+ blk->iostatus_enabled = true;
+ blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
+}
+
+/* The I/O status is only enabled if the drive explicitly
+ * enables it _and_ the VM is configured to stop on errors */
+bool blk_iostatus_is_enabled(const BlockBackend *blk)
+{
+ return (blk->iostatus_enabled &&
+ (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
+ blk->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
+ blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
+}
+
+BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
+{
+ return blk->iostatus;
+}
+
+void blk_iostatus_disable(BlockBackend *blk)
+{
+ blk->iostatus_enabled = false;
+}
+
+void blk_iostatus_reset(BlockBackend *blk)
+{
+ if (blk_iostatus_is_enabled(blk)) {
+ blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
+ if (blk->bs && blk->bs->job) {
+ block_job_iostatus_reset(blk->bs->job);
+ }
+ }
+}
+
+void blk_iostatus_set_err(BlockBackend *blk, int error)
+{
+ assert(blk_iostatus_is_enabled(blk));
+ if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
+ blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
+ BLOCK_DEVICE_IO_STATUS_FAILED;
+ }
}
static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
return -EIO;
}
- if (!blk_is_inserted(blk)) {
+ if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
return bdrv_write(blk->bs, sector_num, buf, nb_sectors);
}
+int blk_write_zeroes(BlockBackend *blk, int64_t sector_num,
+ int nb_sectors, BdrvRequestFlags flags)
+{
+ int ret = blk_check_request(blk, sector_num, nb_sectors);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return bdrv_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
+}
+
static void error_callback_bh(void *opaque)
{
struct BlockBackendAIOCB *acb = opaque;
qemu_aio_unref(acb);
}
-static BlockAIOCB *abort_aio_request(BlockBackend *blk, BlockCompletionFunc *cb,
- void *opaque, int ret)
+BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
+ BlockCompletionFunc *cb,
+ void *opaque, int ret)
{
struct BlockBackendAIOCB *acb;
QEMUBH *bh;
acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
+ acb->blk = blk;
acb->ret = ret;
bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
{
int ret = blk_check_request(blk, sector_num, nb_sectors);
if (ret < 0) {
- return abort_aio_request(blk, cb, opaque, ret);
+ return blk_abort_aio_request(blk, cb, opaque, ret);
}
return bdrv_aio_write_zeroes(blk->bs, sector_num, nb_sectors, flags,
int64_t blk_getlength(BlockBackend *blk)
{
+ if (!blk_is_available(blk)) {
+ return -ENOMEDIUM;
+ }
+
return bdrv_getlength(blk->bs);
}
void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
{
- bdrv_get_geometry(blk->bs, nb_sectors_ptr);
+ if (!blk->bs) {
+ *nb_sectors_ptr = 0;
+ } else {
+ bdrv_get_geometry(blk->bs, nb_sectors_ptr);
+ }
}
int64_t blk_nb_sectors(BlockBackend *blk)
{
+ if (!blk_is_available(blk)) {
+ return -ENOMEDIUM;
+ }
+
return bdrv_nb_sectors(blk->bs);
}
{
int ret = blk_check_request(blk, sector_num, nb_sectors);
if (ret < 0) {
- return abort_aio_request(blk, cb, opaque, ret);
+ return blk_abort_aio_request(blk, cb, opaque, ret);
}
return bdrv_aio_readv(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
{
int ret = blk_check_request(blk, sector_num, nb_sectors);
if (ret < 0) {
- return abort_aio_request(blk, cb, opaque, ret);
+ return blk_abort_aio_request(blk, cb, opaque, ret);
}
return bdrv_aio_writev(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
BlockAIOCB *blk_aio_flush(BlockBackend *blk,
BlockCompletionFunc *cb, void *opaque)
{
+ if (!blk_is_available(blk)) {
+ return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
+ }
+
return bdrv_aio_flush(blk->bs, cb, opaque);
}
{
int ret = blk_check_request(blk, sector_num, nb_sectors);
if (ret < 0) {
- return abort_aio_request(blk, cb, opaque, ret);
+ return blk_abort_aio_request(blk, cb, opaque, ret);
}
return bdrv_aio_discard(blk->bs, sector_num, nb_sectors, cb, opaque);
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
{
+ if (!blk_is_available(blk)) {
+ return -ENOMEDIUM;
+ }
+
return bdrv_ioctl(blk->bs, req, buf);
}
BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
BlockCompletionFunc *cb, void *opaque)
{
+ if (!blk_is_available(blk)) {
+ return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
+ }
+
return bdrv_aio_ioctl(blk->bs, req, buf, cb, opaque);
}
int blk_co_flush(BlockBackend *blk)
{
+ if (!blk_is_available(blk)) {
+ return -ENOMEDIUM;
+ }
+
return bdrv_co_flush(blk->bs);
}
int blk_flush(BlockBackend *blk)
{
+ if (!blk_is_available(blk)) {
+ return -ENOMEDIUM;
+ }
+
return bdrv_flush(blk->bs);
}
return bdrv_flush_all();
}
+void blk_drain(BlockBackend *blk)
+{
+ if (blk->bs) {
+ bdrv_drain(blk->bs);
+ }
+}
+
void blk_drain_all(void)
{
bdrv_drain_all();
}
+void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
+ BlockdevOnError on_write_error)
+{
+ blk->on_read_error = on_read_error;
+ blk->on_write_error = on_write_error;
+}
+
BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
{
- return bdrv_get_on_error(blk->bs, is_read);
+ return is_read ? blk->on_read_error : blk->on_write_error;
}
BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
int error)
{
- return bdrv_get_error_action(blk->bs, is_read, error);
+ BlockdevOnError on_err = blk_get_on_error(blk, is_read);
+
+ switch (on_err) {
+ case BLOCKDEV_ON_ERROR_ENOSPC:
+ return (error == ENOSPC) ?
+ BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
+ case BLOCKDEV_ON_ERROR_STOP:
+ return BLOCK_ERROR_ACTION_STOP;
+ case BLOCKDEV_ON_ERROR_REPORT:
+ return BLOCK_ERROR_ACTION_REPORT;
+ case BLOCKDEV_ON_ERROR_IGNORE:
+ return BLOCK_ERROR_ACTION_IGNORE;
+ default:
+ abort();
+ }
+}
+
+static void send_qmp_error_event(BlockBackend *blk,
+ BlockErrorAction action,
+ bool is_read, int error)
+{
+ IoOperationType optype;
+
+ optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
+ qapi_event_send_block_io_error(blk_name(blk), optype, action,
+ blk_iostatus_is_enabled(blk),
+ error == ENOSPC, strerror(error),
+ &error_abort);
}
+/* This is done by device models because, while the block layer knows
+ * about the error, it does not know whether an operation comes from
+ * the device or the block layer (from a job, for example).
+ */
void blk_error_action(BlockBackend *blk, BlockErrorAction action,
bool is_read, int error)
{
- bdrv_error_action(blk->bs, action, is_read, error);
+ assert(error >= 0);
+
+ if (action == BLOCK_ERROR_ACTION_STOP) {
+ /* First set the iostatus, so that "info block" returns an iostatus
+ * that matches the events raised so far (an additional error iostatus
+ * is fine, but not a lost one).
+ */
+ blk_iostatus_set_err(blk, error);
+
+ /* Then raise the request to stop the VM and the event.
+ * qemu_system_vmstop_request_prepare has two effects. First,
+ * it ensures that the STOP event always comes after the
+ * BLOCK_IO_ERROR event. Second, it ensures that even if management
+ * can observe the STOP event and do a "cont" before the STOP
+ * event is issued, the VM will not stop. In this case, vm_start()
+ * also ensures that the STOP/RESUME pair of events is emitted.
+ */
+ qemu_system_vmstop_request_prepare();
+ send_qmp_error_event(blk, action, is_read, error);
+ qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
+ } else {
+ send_qmp_error_event(blk, action, is_read, error);
+ }
}
int blk_is_read_only(BlockBackend *blk)
{
- return bdrv_is_read_only(blk->bs);
+ if (blk->bs) {
+ return bdrv_is_read_only(blk->bs);
+ } else {
+ return blk->root_state.read_only;
+ }
}
int blk_is_sg(BlockBackend *blk)
{
+ if (!blk->bs) {
+ return 0;
+ }
+
return bdrv_is_sg(blk->bs);
}
int blk_enable_write_cache(BlockBackend *blk)
{
- return bdrv_enable_write_cache(blk->bs);
+ if (blk->bs) {
+ return bdrv_enable_write_cache(blk->bs);
+ } else {
+ return !!(blk->root_state.open_flags & BDRV_O_CACHE_WB);
+ }
}
void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
{
- bdrv_set_enable_write_cache(blk->bs, wce);
+ if (blk->bs) {
+ bdrv_set_enable_write_cache(blk->bs, wce);
+ } else {
+ if (wce) {
+ blk->root_state.open_flags |= BDRV_O_CACHE_WB;
+ } else {
+ blk->root_state.open_flags &= ~BDRV_O_CACHE_WB;
+ }
+ }
}
void blk_invalidate_cache(BlockBackend *blk, Error **errp)
{
+ if (!blk->bs) {
+ error_setg(errp, "Device '%s' has no medium", blk->name);
+ return;
+ }
+
bdrv_invalidate_cache(blk->bs, errp);
}
-int blk_is_inserted(BlockBackend *blk)
+bool blk_is_inserted(BlockBackend *blk)
+{
+ return blk->bs && bdrv_is_inserted(blk->bs);
+}
+
+bool blk_is_available(BlockBackend *blk)
{
- return bdrv_is_inserted(blk->bs);
+ return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
}
void blk_lock_medium(BlockBackend *blk, bool locked)
{
- bdrv_lock_medium(blk->bs, locked);
+ if (blk->bs) {
+ bdrv_lock_medium(blk->bs, locked);
+ }
}
void blk_eject(BlockBackend *blk, bool eject_flag)
{
- bdrv_eject(blk->bs, eject_flag);
+ if (blk->bs) {
+ bdrv_eject(blk->bs, eject_flag);
+ }
}
int blk_get_flags(BlockBackend *blk)
{
- return bdrv_get_flags(blk->bs);
+ if (blk->bs) {
+ return bdrv_get_flags(blk->bs);
+ } else {
+ return blk->root_state.open_flags;
+ }
}
int blk_get_max_transfer_length(BlockBackend *blk)
{
- return blk->bs->bl.max_transfer_length;
+ if (blk->bs) {
+ return blk->bs->bl.max_transfer_length;
+ } else {
+ return 0;
+ }
}
void blk_set_guest_block_size(BlockBackend *blk, int align)
{
- bdrv_set_guest_block_size(blk->bs, align);
+ blk->guest_block_size = align;
}
void *blk_blockalign(BlockBackend *blk, size_t size)
bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
{
+ if (!blk->bs) {
+ return false;
+ }
+
return bdrv_op_is_blocked(blk->bs, op, errp);
}
void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
{
- bdrv_op_unblock(blk->bs, op, reason);
+ if (blk->bs) {
+ bdrv_op_unblock(blk->bs, op, reason);
+ }
}
void blk_op_block_all(BlockBackend *blk, Error *reason)
{
- bdrv_op_block_all(blk->bs, reason);
+ if (blk->bs) {
+ bdrv_op_block_all(blk->bs, reason);
+ }
}
void blk_op_unblock_all(BlockBackend *blk, Error *reason)
{
- bdrv_op_unblock_all(blk->bs, reason);
+ if (blk->bs) {
+ bdrv_op_unblock_all(blk->bs, reason);
+ }
}
AioContext *blk_get_aio_context(BlockBackend *blk)
{
- return bdrv_get_aio_context(blk->bs);
+ if (blk->bs) {
+ return bdrv_get_aio_context(blk->bs);
+ } else {
+ return qemu_get_aio_context();
+ }
+}
+
+static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
+{
+ BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
+ return blk_get_aio_context(blk_acb->blk);
}
void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
{
- bdrv_set_aio_context(blk->bs, new_context);
+ if (blk->bs) {
+ bdrv_set_aio_context(blk->bs, new_context);
+ }
}
void blk_add_aio_context_notifier(BlockBackend *blk,
void (*attached_aio_context)(AioContext *new_context, void *opaque),
void (*detach_aio_context)(void *opaque), void *opaque)
{
- bdrv_add_aio_context_notifier(blk->bs, attached_aio_context,
- detach_aio_context, opaque);
+ if (blk->bs) {
+ bdrv_add_aio_context_notifier(blk->bs, attached_aio_context,
+ detach_aio_context, opaque);
+ }
}
void blk_remove_aio_context_notifier(BlockBackend *blk,
void (*detach_aio_context)(void *),
void *opaque)
{
- bdrv_remove_aio_context_notifier(blk->bs, attached_aio_context,
- detach_aio_context, opaque);
+ if (blk->bs) {
+ bdrv_remove_aio_context_notifier(blk->bs, attached_aio_context,
+ detach_aio_context, opaque);
+ }
}
void blk_add_close_notifier(BlockBackend *blk, Notifier *notify)
{
- bdrv_add_close_notifier(blk->bs, notify);
+ if (blk->bs) {
+ bdrv_add_close_notifier(blk->bs, notify);
+ }
}
void blk_io_plug(BlockBackend *blk)
{
- bdrv_io_plug(blk->bs);
+ if (blk->bs) {
+ bdrv_io_plug(blk->bs);
+ }
}
void blk_io_unplug(BlockBackend *blk)
{
- bdrv_io_unplug(blk->bs);
+ if (blk->bs) {
+ bdrv_io_unplug(blk->bs);
+ }
}
BlockAcctStats *blk_get_stats(BlockBackend *blk)
{
- return bdrv_get_stats(blk->bs);
+ return &blk->stats;
}
void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
int blk_truncate(BlockBackend *blk, int64_t offset)
{
+ if (!blk_is_available(blk)) {
+ return -ENOMEDIUM;
+ }
+
return bdrv_truncate(blk->bs, offset);
}
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
int64_t pos, int size)
{
+ if (!blk_is_available(blk)) {
+ return -ENOMEDIUM;
+ }
+
return bdrv_save_vmstate(blk->bs, buf, pos, size);
}
int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
{
+ if (!blk_is_available(blk)) {
+ return -ENOMEDIUM;
+ }
+
return bdrv_load_vmstate(blk->bs, buf, pos, size);
}
+
+int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
+{
+ if (!blk_is_available(blk)) {
+ return -ENOMEDIUM;
+ }
+
+ return bdrv_probe_blocksizes(blk->bs, bsz);
+}
+
+int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
+{
+ if (!blk_is_available(blk)) {
+ return -ENOMEDIUM;
+ }
+
+ return bdrv_probe_geometry(blk->bs, geo);
+}
+
+/*
+ * Updates the BlockBackendRootState object with data from the currently
+ * attached BlockDriverState.
+ */
+void blk_update_root_state(BlockBackend *blk)
+{
+ assert(blk->bs);
+
+ blk->root_state.open_flags = blk->bs->open_flags;
+ blk->root_state.read_only = blk->bs->read_only;
+ blk->root_state.detect_zeroes = blk->bs->detect_zeroes;
+
+ if (blk->root_state.throttle_group) {
+ g_free(blk->root_state.throttle_group);
+ throttle_group_unref(blk->root_state.throttle_state);
+ }
+ if (blk->bs->throttle_state) {
+ const char *name = throttle_group_get_name(blk->bs);
+ blk->root_state.throttle_group = g_strdup(name);
+ blk->root_state.throttle_state = throttle_group_incref(name);
+ } else {
+ blk->root_state.throttle_group = NULL;
+ blk->root_state.throttle_state = NULL;
+ }
+}
+
+/*
+ * Applies the information in the root state to the given BlockDriverState. This
+ * does not include the flags which have to be specified for bdrv_open(), use
+ * blk_get_open_flags_from_root_state() to inquire them.
+ */
+void blk_apply_root_state(BlockBackend *blk, BlockDriverState *bs)
+{
+ bs->detect_zeroes = blk->root_state.detect_zeroes;
+ if (blk->root_state.throttle_group) {
+ bdrv_io_limits_enable(bs, blk->root_state.throttle_group);
+ }
+}
+
+/*
+ * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
+ * supposed to inherit the root state.
+ */
+int blk_get_open_flags_from_root_state(BlockBackend *blk)
+{
+ int bs_flags;
+
+ bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
+ bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
+
+ return bs_flags;
+}
+
+BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
+{
+ return &blk->root_state;
+}