#include "block/blockjob.h"
#include "qemu/module.h"
#include "qapi/qmp/qjson.h"
+#include "sysemu/block-backend.h"
#include "sysemu/sysemu.h"
-#include "sysemu/blockdev.h" /* FIXME layering violation */
#include "qemu/notify.h"
#include "block/coroutine.h"
#include "block/qapi.h"
#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
-#define COROUTINE_POOL_RESERVATION 64 /* number of coroutines to reserve */
-
-static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
-static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
+static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockDriverCompletionFunc *cb, void *opaque);
-static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
+ BlockCompletionFunc *cb, void *opaque);
+static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockDriverCompletionFunc *cb, void *opaque);
+ BlockCompletionFunc *cb, void *opaque);
static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
int64_t sector_num, int nb_sectors,
QEMUIOVector *iov);
static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags);
-static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
- int64_t sector_num,
- QEMUIOVector *qiov,
- int nb_sectors,
- BdrvRequestFlags flags,
- BlockDriverCompletionFunc *cb,
- void *opaque,
- bool is_write);
+static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
+ int64_t sector_num,
+ QEMUIOVector *qiov,
+ int nb_sectors,
+ BdrvRequestFlags flags,
+ BlockCompletionFunc *cb,
+ void *opaque,
+ bool is_write);
static void coroutine_fn bdrv_co_do_rw(void *opaque);
static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
static QLIST_HEAD(, BlockDriver) bdrv_drivers =
QLIST_HEAD_INITIALIZER(bdrv_drivers);
+static void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
+ int nr_sectors);
+static void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
+ int nr_sectors);
/* If non-zero, use only whitelisted block drivers */
static int use_bdrv_whitelist;
}
/* check if the path starts with "<protocol>:" */
-static int path_has_protocol(const char *path)
+int path_has_protocol(const char *path)
{
const char *p;
}
}
-void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
+void bdrv_get_full_backing_filename_from_filename(const char *backed,
+ const char *backing,
+ char *dest, size_t sz,
+ Error **errp)
{
- if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
- pstrcpy(dest, sz, bs->backing_file);
+ if (backing[0] == '\0' || path_has_protocol(backing) ||
+ path_is_absolute(backing))
+ {
+ pstrcpy(dest, sz, backing);
+ } else if (backed[0] == '\0' || strstart(backed, "json:", NULL)) {
+ error_setg(errp, "Cannot use relative backing file names for '%s'",
+ backed);
} else {
- path_combine(dest, sz, bs->filename, bs->backing_file);
+ path_combine(dest, sz, backed, backing);
}
}
+void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz,
+ Error **errp)
+{
+ char *backed = bs->exact_filename[0] ? bs->exact_filename : bs->filename;
+
+ bdrv_get_full_backing_filename_from_filename(backed, bs->backing_file,
+ dest, sz, errp);
+}
+
void bdrv_register(BlockDriver *bdrv)
{
/* Block drivers without coroutine functions need emulation */
QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
}
-/* create a new block device (by default it is empty) */
-BlockDriverState *bdrv_new(const char *device_name, Error **errp)
+BlockDriverState *bdrv_new_root(void)
{
- BlockDriverState *bs;
- int i;
+ BlockDriverState *bs = bdrv_new();
- if (*device_name && !id_wellformed(device_name)) {
- error_setg(errp, "Invalid device name");
- return NULL;
- }
+ QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
+ return bs;
+}
- if (bdrv_find(device_name)) {
- error_setg(errp, "Device with id '%s' already exists",
- device_name);
- return NULL;
- }
- if (bdrv_find_node(device_name)) {
- error_setg(errp,
- "Device name '%s' conflicts with an existing node name",
- device_name);
- return NULL;
- }
+BlockDriverState *bdrv_new(void)
+{
+ BlockDriverState *bs;
+ int i;
bs = g_new0(BlockDriverState, 1);
QLIST_INIT(&bs->dirty_bitmaps);
- pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
- if (device_name[0] != '\0') {
- QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
- }
for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
QLIST_INIT(&bs->op_blockers[i]);
}
return;
}
bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
+ bs->bl.max_transfer_length = bs->file->bl.max_transfer_length;
bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
} else {
bs->bl.opt_mem_alignment = 512;
bs->bl.opt_transfer_length =
MAX(bs->bl.opt_transfer_length,
bs->backing_hd->bl.opt_transfer_length);
+ bs->bl.max_transfer_length =
+ MIN_NON_ZERO(bs->bl.max_transfer_length,
+ bs->backing_hd->bl.max_transfer_length);
bs->bl.opt_mem_alignment =
MAX(bs->bl.opt_mem_alignment,
bs->backing_hd->bl.opt_mem_alignment);
}
if (!path_has_protocol(filename) || !allow_protocol_prefix) {
- return bdrv_find_format("file");
+ return &bdrv_file;
}
p = strchr(filename, ':');
return NULL;
}
+/*
+ * Guess image format by probing its contents.
+ * This is not a good idea when your image is raw (CVE-2008-2004), but
+ * we do it anyway for backward compatibility.
+ *
+ * @buf contains the image's first @buf_size bytes.
+ * @buf_size is the buffer size in bytes (generally BLOCK_PROBE_BUF_SIZE,
+ * but can be smaller if the image file is smaller)
+ * @filename is its filename.
+ *
+ * For all block drivers, call the bdrv_probe() method to get its
+ * probing score.
+ * Return the first block driver with the highest probing score.
+ */
+BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
+ const char *filename)
+{
+ int score_max = 0, score;
+ BlockDriver *drv = NULL, *d;
+
+ QLIST_FOREACH(d, &bdrv_drivers, list) {
+ if (d->bdrv_probe) {
+ score = d->bdrv_probe(buf, buf_size, filename);
+ if (score > score_max) {
+ score_max = score;
+ drv = d;
+ }
+ }
+ }
+
+ return drv;
+}
+
static int find_image_format(BlockDriverState *bs, const char *filename,
BlockDriver **pdrv, Error **errp)
{
- int score, score_max;
- BlockDriver *drv1, *drv;
- uint8_t buf[2048];
+ BlockDriver *drv;
+ uint8_t buf[BLOCK_PROBE_BUF_SIZE];
int ret = 0;
/* Return the raw BlockDriver * to scsi-generic devices or empty drives */
if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
- drv = bdrv_find_format("raw");
- if (!drv) {
- error_setg(errp, "Could not find raw image format");
- ret = -ENOENT;
- }
- *pdrv = drv;
+ *pdrv = &bdrv_raw;
return ret;
}
return ret;
}
- score_max = 0;
- drv = NULL;
- QLIST_FOREACH(drv1, &bdrv_drivers, list) {
- if (drv1->bdrv_probe) {
- score = drv1->bdrv_probe(buf, ret, filename);
- if (score > score_max) {
- score_max = score;
- drv = drv1;
- }
- }
- }
+ drv = bdrv_probe_all(buf, ret, filename);
if (!drv) {
error_setg(errp, "Could not determine image format: No compatible "
"driver found");
}
/* takes care of avoiding namespaces collisions */
- if (bdrv_find(node_name)) {
+ if (blk_by_name(node_name)) {
error_setg(errp, "node-name=%s is conflicting with a device id",
node_name);
return;
} else if (backing_hd) {
error_setg(&bs->backing_blocker,
"device is used as backing hd of '%s'",
- bs->device_name);
+ bdrv_get_device_name(bs));
}
bs->backing_hd = backing_hd;
bdrv_op_block_all(bs->backing_hd, bs->backing_blocker);
/* Otherwise we won't be able to commit due to check in bdrv_commit */
- bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT,
+ bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT_TARGET,
bs->backing_blocker);
out:
bdrv_refresh_limits(bs, NULL);
{
char *backing_filename = g_malloc0(PATH_MAX);
int ret = 0;
- BlockDriver *back_drv = NULL;
BlockDriverState *backing_hd;
Error *local_err = NULL;
QDECREF(options);
goto free_exit;
} else {
- bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX);
+ bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX,
+ &local_err);
+ if (local_err) {
+ ret = -EINVAL;
+ error_propagate(errp, local_err);
+ QDECREF(options);
+ goto free_exit;
+ }
}
if (!bs->drv || !bs->drv->supports_backing) {
goto free_exit;
}
- backing_hd = bdrv_new("", errp);
+ backing_hd = bdrv_new();
- if (bs->backing_format[0] != '\0') {
- back_drv = bdrv_find_format(bs->backing_format);
+ if (bs->backing_format[0] != '\0' && !qdict_haskey(options, "driver")) {
+ qdict_put(options, "driver", qstring_from_str(bs->backing_format));
}
assert(bs->backing_hd == NULL);
ret = bdrv_open(&backing_hd,
*backing_filename ? backing_filename : NULL, NULL, options,
- bdrv_backing_flags(bs->open_flags), back_drv, &local_err);
+ bdrv_backing_flags(bs->open_flags), NULL, &local_err);
if (ret < 0) {
bdrv_unref(backing_hd);
backing_hd = NULL;
/* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
char *tmp_filename = g_malloc0(PATH_MAX + 1);
int64_t total_size;
- BlockDriver *bdrv_qcow2;
QemuOpts *opts = NULL;
QDict *snapshot_options;
BlockDriverState *bs_snapshot;
goto out;
}
- bdrv_qcow2 = bdrv_find_format("qcow2");
- opts = qemu_opts_create(bdrv_qcow2->create_opts, NULL, 0,
+ opts = qemu_opts_create(bdrv_qcow2.create_opts, NULL, 0,
&error_abort);
qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size);
- ret = bdrv_create(bdrv_qcow2, tmp_filename, opts, &local_err);
+ ret = bdrv_create(&bdrv_qcow2, tmp_filename, opts, &local_err);
qemu_opts_del(opts);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not create temporary overlay "
qdict_put(snapshot_options, "file.filename",
qstring_from_str(tmp_filename));
- bs_snapshot = bdrv_new("", &error_abort);
+ bs_snapshot = bdrv_new();
ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options,
- flags, bdrv_qcow2, &local_err);
+ flags, &bdrv_qcow2, &local_err);
if (ret < 0) {
error_propagate(errp, local_err);
goto out;
if (*pbs) {
bs = *pbs;
} else {
- bs = bdrv_new("", &error_abort);
+ bs = bdrv_new();
}
/* NULL means an empty set of options */
}
/* Image format probing */
+ bs->probed = !drv;
if (!drv && file) {
ret = find_image_format(file, filename, &drv, &local_err);
if (ret < 0) {
} else {
error_setg(errp, "Block format '%s' used by device '%s' doesn't "
"support the option '%s'", drv->format_name,
- bs->device_name, entry->key);
+ bdrv_get_device_name(bs), entry->key);
}
ret = -EINVAL;
}
if (!bdrv_key_required(bs)) {
- bdrv_dev_change_media_cb(bs, true);
+ if (bs->blk) {
+ blk_dev_change_media_cb(bs->blk, true);
+ }
} else if (!runstate_check(RUN_STATE_PRELAUNCH)
&& !runstate_check(RUN_STATE_INMIGRATE)
&& !runstate_check(RUN_STATE_PAUSED)) { /* HACK */
if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
reopen_state->flags & BDRV_O_RDWR) {
error_set(errp, QERR_DEVICE_IS_READ_ONLY,
- reopen_state->bs->device_name);
+ bdrv_get_device_name(reopen_state->bs));
goto error;
}
/* It is currently mandatory to have a bdrv_reopen_prepare()
* handler for each supported drv. */
error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
- drv->format_name, reopen_state->bs->device_name,
+ drv->format_name, bdrv_get_device_name(reopen_state->bs),
"reopening of file");
ret = -1;
goto error;
}
}
- bdrv_dev_change_media_cb(bs, false);
+ if (bs->blk) {
+ blk_dev_change_media_cb(bs->blk, false);
+ }
/*throttling disk I/O limits*/
if (bs->io_limits_enabled) {
return false;
}
+static bool bdrv_drain_one(BlockDriverState *bs)
+{
+ bool bs_busy;
+
+ bdrv_flush_io_queue(bs);
+ bdrv_start_throttled_reqs(bs);
+ bs_busy = bdrv_requests_pending(bs);
+ bs_busy |= aio_poll(bdrv_get_aio_context(bs), bs_busy);
+ return bs_busy;
+}
+
+/*
+ * Wait for pending requests to complete on a single BlockDriverState subtree
+ *
+ * See the warning in bdrv_drain_all(). This function can only be called if
+ * you are sure nothing can generate I/O because you have op blockers
+ * installed.
+ *
+ * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
+ * AioContext.
+ */
+void bdrv_drain(BlockDriverState *bs)
+{
+ while (bdrv_drain_one(bs)) {
+ /* Keep iterating */
+ }
+}
+
/*
* Wait for pending requests to complete across all BlockDriverStates
*
QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
AioContext *aio_context = bdrv_get_aio_context(bs);
- bool bs_busy;
aio_context_acquire(aio_context);
- bdrv_flush_io_queue(bs);
- bdrv_start_throttled_reqs(bs);
- bs_busy = bdrv_requests_pending(bs);
- bs_busy |= aio_poll(aio_context, bs_busy);
+ busy |= bdrv_drain_one(bs);
aio_context_release(aio_context);
-
- busy |= bs_busy;
}
}
}
Also, NULL terminate the device_name to prevent double remove */
void bdrv_make_anon(BlockDriverState *bs)
{
- if (bs->device_name[0] != '\0') {
+ /*
+ * Take care to remove bs from bdrv_states only when it's actually
+ * in it. Note that bs->device_list.tqe_prev is initially null,
+ * and gets set to non-null by QTAILQ_INSERT_TAIL(). Establish
+ * the useful invariant "bs in bdrv_states iff bs->tqe_prev" by
+ * resetting it to null on remove.
+ */
+ if (bs->device_list.tqe_prev) {
QTAILQ_REMOVE(&bdrv_states, bs, device_list);
+ bs->device_list.tqe_prev = NULL;
}
- bs->device_name[0] = '\0';
if (bs->node_name[0] != '\0') {
QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
}
/* move some fields that need to stay attached to the device */
/* dev info */
- bs_dest->dev_ops = bs_src->dev_ops;
- bs_dest->dev_opaque = bs_src->dev_opaque;
- bs_dest->dev = bs_src->dev;
bs_dest->guest_block_size = bs_src->guest_block_size;
bs_dest->copy_on_read = bs_src->copy_on_read;
bs_dest->job = bs_src->job;
/* keep the same entry in bdrv_states */
- pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
- bs_src->device_name);
bs_dest->device_list = bs_src->device_list;
+ bs_dest->blk = bs_src->blk;
+
memcpy(bs_dest->op_blockers, bs_src->op_blockers,
sizeof(bs_dest->op_blockers));
}
* This will modify the BlockDriverState fields, and swap contents
* between bs_new and bs_old. Both bs_new and bs_old are modified.
*
- * bs_new is required to be anonymous.
+ * bs_new must not be attached to a BlockBackend.
*
* This function does not create any image files.
*/
QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list);
}
- /* bs_new must be anonymous and shouldn't have anything fancy enabled */
- assert(bs_new->device_name[0] == '\0');
+ /* bs_new must be unattached and shouldn't have anything fancy enabled */
+ assert(!bs_new->blk);
assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
assert(bs_new->job == NULL);
- assert(bs_new->dev == NULL);
assert(bs_new->io_limits_enabled == false);
assert(!throttle_have_timer(&bs_new->throttle_state));
bdrv_move_feature_fields(bs_old, bs_new);
bdrv_move_feature_fields(bs_new, &tmp);
- /* bs_new shouldn't be in bdrv_states even after the swap! */
- assert(bs_new->device_name[0] == '\0');
+ /* bs_new must remain unattached */
+ assert(!bs_new->blk);
/* Check a few fields that should remain attached to the device */
- assert(bs_new->dev == NULL);
assert(bs_new->job == NULL);
assert(bs_new->io_limits_enabled == false);
assert(!throttle_have_timer(&bs_new->throttle_state));
* This will modify the BlockDriverState fields, and swap contents
* between bs_new and bs_top. Both bs_new and bs_top are modified.
*
- * bs_new is required to be anonymous.
+ * bs_new must not be attached to a BlockBackend.
*
* This function does not create any image files.
*/
static void bdrv_delete(BlockDriverState *bs)
{
- assert(!bs->dev);
assert(!bs->job);
assert(bdrv_op_blocker_is_empty(bs));
assert(!bs->refcnt);
/* remove from list, if necessary */
bdrv_make_anon(bs);
- drive_info_del(drive_get_by_blockdev(bs));
g_free(bs);
}
-int bdrv_attach_dev(BlockDriverState *bs, void *dev)
-/* TODO change to DeviceState *dev when all users are qdevified */
-{
- if (bs->dev) {
- return -EBUSY;
- }
- bs->dev = dev;
- bdrv_iostatus_reset(bs);
-
- /* We're expecting I/O from the device so bump up coroutine pool size */
- qemu_coroutine_adjust_pool_size(COROUTINE_POOL_RESERVATION);
- return 0;
-}
-
-/* TODO qdevified devices don't use this, remove when devices are qdevified */
-void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
-{
- if (bdrv_attach_dev(bs, dev) < 0) {
- abort();
- }
-}
-
-void bdrv_detach_dev(BlockDriverState *bs, void *dev)
-/* TODO change to DeviceState *dev when all users are qdevified */
-{
- assert(bs->dev == dev);
- bs->dev = NULL;
- bs->dev_ops = NULL;
- bs->dev_opaque = NULL;
- bs->guest_block_size = 512;
- qemu_coroutine_adjust_pool_size(-COROUTINE_POOL_RESERVATION);
-}
-
-/* TODO change to return DeviceState * when all users are qdevified */
-void *bdrv_get_attached_dev(BlockDriverState *bs)
-{
- return bs->dev;
-}
-
-void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
- void *opaque)
-{
- bs->dev_ops = ops;
- bs->dev_opaque = opaque;
-}
-
-static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
-{
- if (bs->dev_ops && bs->dev_ops->change_media_cb) {
- bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
- bs->dev_ops->change_media_cb(bs->dev_opaque, load);
- if (tray_was_closed) {
- /* tray open */
- qapi_event_send_device_tray_moved(bdrv_get_device_name(bs),
- true, &error_abort);
- }
- if (load) {
- /* tray close */
- qapi_event_send_device_tray_moved(bdrv_get_device_name(bs),
- false, &error_abort);
- }
- }
-}
-
-bool bdrv_dev_has_removable_media(BlockDriverState *bs)
-{
- return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
-}
-
-void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
-{
- if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
- bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
- }
-}
-
-bool bdrv_dev_is_tray_open(BlockDriverState *bs)
-{
- if (bs->dev_ops && bs->dev_ops->is_tray_open) {
- return bs->dev_ops->is_tray_open(bs->dev_opaque);
- }
- return false;
-}
-
-static void bdrv_dev_resize_cb(BlockDriverState *bs)
-{
- if (bs->dev_ops && bs->dev_ops->resize_cb) {
- bs->dev_ops->resize_cb(bs->dev_opaque);
- }
-}
-
-bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
-{
- if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
- return bs->dev_ops->is_medium_locked(bs->dev_opaque);
- }
- return false;
-}
-
/*
* Run consistency checks on an image
*
int n, ro, open_flags;
int ret = 0;
uint8_t *buf = NULL;
- char filename[PATH_MAX];
if (!drv)
return -ENOMEDIUM;
return -ENOTSUP;
}
- if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT, NULL) ||
- bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, NULL)) {
+ if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, NULL) ||
+ bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT_TARGET, NULL)) {
return -EBUSY;
}
ro = bs->backing_hd->read_only;
- /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
- pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
open_flags = bs->backing_hd->open_flags;
if (ro) {
if (nb_sectors <= 0) {
return 0;
}
- if (nb_sectors > INT_MAX) {
- nb_sectors = INT_MAX;
+ if (nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
+ nb_sectors = INT_MAX / BDRV_SECTOR_SIZE;
}
ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
if (ret < 0) {
max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
align >> BDRV_SECTOR_BITS);
- if (max_nb_sectors > 0) {
+ if (nb_sectors < max_nb_sectors) {
+ ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
+ } else if (max_nb_sectors > 0) {
QEMUIOVector local_qiov;
- size_t local_sectors;
-
- max_nb_sectors = MIN(max_nb_sectors, SIZE_MAX / BDRV_SECTOR_BITS);
- local_sectors = MIN(max_nb_sectors, nb_sectors);
qemu_iovec_init(&local_qiov, qiov->niov);
qemu_iovec_concat(&local_qiov, qiov, 0,
- local_sectors * BDRV_SECTOR_SIZE);
+ max_nb_sectors * BDRV_SECTOR_SIZE);
- ret = drv->bdrv_co_readv(bs, sector_num, local_sectors,
+ ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors,
&local_qiov);
qemu_iovec_destroy(&local_qiov);
BDRV_REQ_COPY_ON_READ);
}
-/* if no limit is specified in the BlockLimits use a default
- * of 32768 512-byte sectors (16 MiB) per request.
- */
-#define MAX_WRITE_ZEROES_DEFAULT 32768
+#define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
int ret = 0;
int max_write_zeroes = bs->bl.max_write_zeroes ?
- bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
+ bs->bl.max_write_zeroes : INT_MAX;
while (nb_sectors > 0 && !ret) {
int num = nb_sectors;
if (ret == -ENOTSUP) {
/* Fall back to bounce buffer if write zeroes is unsupported */
+ int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length,
+ MAX_WRITE_ZEROES_BOUNCE_BUFFER);
+ num = MIN(num, max_xfer_len);
iov.iov_len = num * BDRV_SECTOR_SIZE;
if (iov.iov_base == NULL) {
iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE);
/* Keep bounce buffer around if it is big enough for all
* all future requests.
*/
- if (num < max_write_zeroes) {
+ if (num < max_xfer_len) {
qemu_vfree(iov.iov_base);
iov.iov_base = NULL;
}
ret = drv->bdrv_truncate(bs, offset);
if (ret == 0) {
ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
- bdrv_dev_resize_cb(bs);
+ if (bs->blk) {
+ blk_dev_resize_cb(bs->blk);
+ }
}
return ret;
}
BlockErrorAction action,
bool is_read, int error)
{
- BlockErrorAction ac;
+ IoOperationType optype;
- ac = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
- qapi_event_send_block_io_error(bdrv_get_device_name(bs), ac, action,
+ optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
+ qapi_event_send_block_io_error(bdrv_get_device_name(bs), optype, action,
bdrv_iostatus_is_enabled(bs),
error == ENOSPC, strerror(error),
&error_abort);
bs->valid_key = 0;
} else if (!bs->valid_key) {
bs->valid_key = 1;
- /* call the change callback now, we skipped it on open */
- bdrv_dev_change_media_cb(bs, true);
+ if (bs->blk) {
+ /* call the change callback now, we skipped it on open */
+ blk_dev_change_media_cb(bs->blk, true);
+ }
}
return ret;
}
}
/* This function is to find block backend bs */
+/* TODO convert callers to blk_by_name(), then remove */
BlockDriverState *bdrv_find(const char *name)
{
- BlockDriverState *bs;
+ BlockBackend *blk = blk_by_name(name);
- QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
- if (!strcmp(name, bs->device_name)) {
- return bs;
- }
- }
- return NULL;
+ return blk ? blk_bs(blk) : NULL;
}
/* This function is to find a node in the bs graph */
const char *node_name,
Error **errp)
{
- BlockDriverState *bs = NULL;
+ BlockBackend *blk;
+ BlockDriverState *bs;
if (device) {
- bs = bdrv_find(device);
+ blk = blk_by_name(device);
- if (bs) {
- return bs;
+ if (blk) {
+ return blk_bs(blk);
}
}
return top != NULL;
}
+BlockDriverState *bdrv_next_node(BlockDriverState *bs)
+{
+ if (!bs) {
+ return QTAILQ_FIRST(&graph_bdrv_states);
+ }
+ return QTAILQ_NEXT(bs, node_list);
+}
+
BlockDriverState *bdrv_next(BlockDriverState *bs)
{
if (!bs) {
return QTAILQ_NEXT(bs, device_list);
}
-void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
+const char *bdrv_get_node_name(const BlockDriverState *bs)
{
- BlockDriverState *bs;
-
- QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
- it(opaque, bs);
- }
+ return bs->node_name;
}
-const char *bdrv_get_device_name(BlockDriverState *bs)
+/* TODO check what callers really want: bs->node_name or blk_name() */
+const char *bdrv_get_device_name(const BlockDriverState *bs)
{
- return bs->device_name;
+ return bs->blk ? blk_name(bs->blk) : "";
}
int bdrv_get_flags(BlockDriverState *bs)
} BdrvCoGetBlockStatusData;
/*
- * Returns true iff the specified sector is present in the disk image. Drivers
- * not implementing the functionality are assumed to not support backing files,
- * hence all their sectors are reported as allocated.
+ * Returns the allocation status of the specified sectors.
+ * Drivers not implementing the functionality are assumed to not support
+ * backing files, hence all their sectors are reported as allocated.
*
* If 'sector_num' is beyond the end of the disk image the return value is 0
* and 'pnum' is set to 0.
if (bs->file &&
(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
(ret & BDRV_BLOCK_OFFSET_VALID)) {
+ int file_pnum;
+
ret2 = bdrv_co_get_block_status(bs->file, ret >> BDRV_SECTOR_BITS,
- *pnum, pnum);
+ *pnum, &file_pnum);
if (ret2 >= 0) {
/* Ignore errors. This is just providing extra information, it
* is useful but not necessary.
*/
- ret |= (ret2 & BDRV_BLOCK_ZERO);
+ if (!file_pnum) {
+ /* !file_pnum indicates an offset at or beyond the EOF; it is
+ * perfectly valid for the format block driver to point to such
+ * offsets, so catch it and mark everything as zero */
+ ret |= BDRV_BLOCK_ZERO;
+ } else {
+ /* Limit request to the range reported by the protocol driver */
+ *pnum = file_pnum;
+ ret |= (ret2 & BDRV_BLOCK_ZERO);
+ }
}
}
/**************************************************************/
/* async I/Os */
-BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
- QEMUIOVector *qiov, int nb_sectors,
- BlockDriverCompletionFunc *cb, void *opaque)
+BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
+ QEMUIOVector *qiov, int nb_sectors,
+ BlockCompletionFunc *cb, void *opaque)
{
trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
cb, opaque, false);
}
-BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
- QEMUIOVector *qiov, int nb_sectors,
- BlockDriverCompletionFunc *cb, void *opaque)
+BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
+ QEMUIOVector *qiov, int nb_sectors,
+ BlockCompletionFunc *cb, void *opaque)
{
trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
cb, opaque, true);
}
-BlockDriverAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
+BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
- BlockDriverCompletionFunc *cb, void *opaque)
+ BlockCompletionFunc *cb, void *opaque)
{
trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
int num_requests;
int num_callbacks;
struct {
- BlockDriverCompletionFunc *cb;
+ BlockCompletionFunc *cb;
void *opaque;
QEMUIOVector *free_qiov;
} callbacks[];
merge = 0;
}
+ if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors +
+ reqs[i].nb_sectors > bs->bl.max_transfer_length) {
+ merge = 0;
+ }
+
if (merge) {
size_t size;
QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
}
}
+ block_acct_merge_done(&bs->stats, BLOCK_ACCT_WRITE, num_reqs - outidx - 1);
+
return outidx + 1;
}
return 0;
}
-void bdrv_aio_cancel(BlockDriverAIOCB *acb)
+void bdrv_aio_cancel(BlockAIOCB *acb)
{
qemu_aio_ref(acb);
bdrv_aio_cancel_async(acb);
/* Async version of aio cancel. The caller is not blocked if the acb implements
* cancel_async, otherwise we do nothing and let the request normally complete.
* In either case the completion callback must be called. */
-void bdrv_aio_cancel_async(BlockDriverAIOCB *acb)
+void bdrv_aio_cancel_async(BlockAIOCB *acb)
{
if (acb->aiocb_info->cancel_async) {
acb->aiocb_info->cancel_async(acb);
/**************************************************************/
/* async block device emulation */
-typedef struct BlockDriverAIOCBSync {
- BlockDriverAIOCB common;
+typedef struct BlockAIOCBSync {
+ BlockAIOCB common;
QEMUBH *bh;
int ret;
/* vector translation state */
QEMUIOVector *qiov;
uint8_t *bounce;
int is_write;
-} BlockDriverAIOCBSync;
+} BlockAIOCBSync;
static const AIOCBInfo bdrv_em_aiocb_info = {
- .aiocb_size = sizeof(BlockDriverAIOCBSync),
+ .aiocb_size = sizeof(BlockAIOCBSync),
};
static void bdrv_aio_bh_cb(void *opaque)
{
- BlockDriverAIOCBSync *acb = opaque;
+ BlockAIOCBSync *acb = opaque;
if (!acb->is_write && acb->ret >= 0) {
qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
qemu_aio_unref(acb);
}
-static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
- int64_t sector_num,
- QEMUIOVector *qiov,
- int nb_sectors,
- BlockDriverCompletionFunc *cb,
- void *opaque,
- int is_write)
+static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
+ int64_t sector_num,
+ QEMUIOVector *qiov,
+ int nb_sectors,
+ BlockCompletionFunc *cb,
+ void *opaque,
+ int is_write)
{
- BlockDriverAIOCBSync *acb;
+ BlockAIOCBSync *acb;
acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
acb->is_write = is_write;
return &acb->common;
}
-static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
+static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockDriverCompletionFunc *cb, void *opaque)
+ BlockCompletionFunc *cb, void *opaque)
{
return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
}
-static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
+static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockDriverCompletionFunc *cb, void *opaque)
+ BlockCompletionFunc *cb, void *opaque)
{
return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
}
-typedef struct BlockDriverAIOCBCoroutine {
- BlockDriverAIOCB common;
+typedef struct BlockAIOCBCoroutine {
+ BlockAIOCB common;
BlockRequest req;
bool is_write;
bool *done;
QEMUBH* bh;
-} BlockDriverAIOCBCoroutine;
+} BlockAIOCBCoroutine;
static const AIOCBInfo bdrv_em_co_aiocb_info = {
- .aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
+ .aiocb_size = sizeof(BlockAIOCBCoroutine),
};
static void bdrv_co_em_bh(void *opaque)
{
- BlockDriverAIOCBCoroutine *acb = opaque;
+ BlockAIOCBCoroutine *acb = opaque;
acb->common.cb(acb->common.opaque, acb->req.error);
/* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
static void coroutine_fn bdrv_co_do_rw(void *opaque)
{
- BlockDriverAIOCBCoroutine *acb = opaque;
+ BlockAIOCBCoroutine *acb = opaque;
BlockDriverState *bs = acb->common.bs;
if (!acb->is_write) {
qemu_bh_schedule(acb->bh);
}
-static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
- int64_t sector_num,
- QEMUIOVector *qiov,
- int nb_sectors,
- BdrvRequestFlags flags,
- BlockDriverCompletionFunc *cb,
- void *opaque,
- bool is_write)
+static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
+ int64_t sector_num,
+ QEMUIOVector *qiov,
+ int nb_sectors,
+ BdrvRequestFlags flags,
+ BlockCompletionFunc *cb,
+ void *opaque,
+ bool is_write)
{
Coroutine *co;
- BlockDriverAIOCBCoroutine *acb;
+ BlockAIOCBCoroutine *acb;
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
acb->req.sector = sector_num;
static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
{
- BlockDriverAIOCBCoroutine *acb = opaque;
+ BlockAIOCBCoroutine *acb = opaque;
BlockDriverState *bs = acb->common.bs;
acb->req.error = bdrv_co_flush(bs);
qemu_bh_schedule(acb->bh);
}
-BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
- BlockDriverCompletionFunc *cb, void *opaque)
+BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
+ BlockCompletionFunc *cb, void *opaque)
{
trace_bdrv_aio_flush(bs, opaque);
Coroutine *co;
- BlockDriverAIOCBCoroutine *acb;
+ BlockAIOCBCoroutine *acb;
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
{
- BlockDriverAIOCBCoroutine *acb = opaque;
+ BlockAIOCBCoroutine *acb = opaque;
BlockDriverState *bs = acb->common.bs;
acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
qemu_bh_schedule(acb->bh);
}
-BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
+BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
int64_t sector_num, int nb_sectors,
- BlockDriverCompletionFunc *cb, void *opaque)
+ BlockCompletionFunc *cb, void *opaque)
{
Coroutine *co;
- BlockDriverAIOCBCoroutine *acb;
+ BlockAIOCBCoroutine *acb;
trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
}
void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
- BlockDriverCompletionFunc *cb, void *opaque)
+ BlockCompletionFunc *cb, void *opaque)
{
- BlockDriverAIOCB *acb;
+ BlockAIOCB *acb;
acb = g_slice_alloc(aiocb_info->aiocb_size);
acb->aiocb_info = aiocb_info;
void qemu_aio_ref(void *p)
{
- BlockDriverAIOCB *acb = p;
+ BlockAIOCB *acb = p;
acb->refcnt++;
}
void qemu_aio_unref(void *p)
{
- BlockDriverAIOCB *acb = p;
+ BlockAIOCB *acb = p;
assert(acb->refcnt > 0);
if (--acb->refcnt == 0) {
g_slice_free1(acb->aiocb_info->aiocb_size, acb);
CoroutineIOCompletion co = {
.coroutine = qemu_coroutine_self(),
};
- BlockDriverAIOCB *acb;
+ BlockAIOCB *acb;
if (is_write) {
acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
if (bs->drv->bdrv_co_flush_to_disk) {
ret = bs->drv->bdrv_co_flush_to_disk(bs);
} else if (bs->drv->bdrv_aio_flush) {
- BlockDriverAIOCB *acb;
+ BlockAIOCB *acb;
CoroutineIOCompletion co = {
.coroutine = qemu_coroutine_self(),
};
rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
}
-/* if no limit is specified in the BlockLimits use a default
- * of 32768 512-byte sectors (16 MiB) per request.
- */
-#define MAX_DISCARD_DEFAULT 32768
-
int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
int nb_sectors)
{
return 0;
}
- max_discard = bs->bl.max_discard ? bs->bl.max_discard : MAX_DISCARD_DEFAULT;
+ max_discard = bs->bl.max_discard ? bs->bl.max_discard : INT_MAX;
while (nb_sectors > 0) {
int ret;
int num = nb_sectors;
if (bs->drv->bdrv_co_discard) {
ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
} else {
- BlockDriverAIOCB *acb;
+ BlockAIOCB *acb;
CoroutineIOCompletion co = {
.coroutine = qemu_coroutine_self(),
};
void bdrv_eject(BlockDriverState *bs, bool eject_flag)
{
BlockDriver *drv = bs->drv;
+ const char *device_name;
if (drv && drv->bdrv_eject) {
drv->bdrv_eject(bs, eject_flag);
}
- if (bs->device_name[0] != '\0') {
- qapi_event_send_device_tray_moved(bdrv_get_device_name(bs),
+ device_name = bdrv_get_device_name(bs);
+ if (device_name[0] != '\0') {
+ qapi_event_send_device_tray_moved(device_name,
eject_flag, &error_abort);
}
}
return -ENOTSUP;
}
-BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
+BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
unsigned long int req, void *buf,
- BlockDriverCompletionFunc *cb, void *opaque)
+ BlockCompletionFunc *cb, void *opaque)
{
BlockDriver *drv = bs->drv;
return qemu_memalign(bdrv_opt_mem_align(bs), size);
}
+void *qemu_blockalign0(BlockDriverState *bs, size_t size)
+{
+ return memset(qemu_blockalign(bs, size), 0, size);
+}
+
void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
{
size_t align = bdrv_opt_mem_align(bs);
return qemu_try_memalign(align, size);
}
+void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
+{
+ void *mem = qemu_try_blockalign(bs, size);
+
+ if (mem) {
+ memset(mem, 0, size);
+ }
+
+ return mem;
+}
+
/*
* Check if all memory in this vector is sector aligned.
*/
hbitmap_iter_init(hbi, bitmap->bitmap, 0);
}
-void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
- int nr_sectors)
+void bdrv_set_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
+ int64_t cur_sector, int nr_sectors)
+{
+ hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors);
+}
+
+void bdrv_reset_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
+ int64_t cur_sector, int nr_sectors)
+{
+ hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors);
+}
+
+static void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector,
+ int nr_sectors)
{
BdrvDirtyBitmap *bitmap;
QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
}
}
-void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors)
+static void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
+ int nr_sectors)
{
BdrvDirtyBitmap *bitmap;
QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) {
blocker = QLIST_FIRST(&bs->op_blockers[op]);
if (errp) {
error_setg(errp, "Device '%s' is busy: %s",
- bs->device_name, error_get_pretty(blocker->reason));
+ bdrv_get_device_name(bs),
+ error_get_pretty(blocker->reason));
}
return true;
}
return;
}
+ if (!drv->create_opts) {
+ error_setg(errp, "Format driver '%s' does not support image creation",
+ drv->format_name);
+ return;
+ }
+
+ if (!proto_drv->create_opts) {
+ error_setg(errp, "Protocol driver '%s' does not support image creation",
+ proto_drv->format_name);
+ return;
+ }
+
create_opts = qemu_opts_append(create_opts, drv->create_opts);
create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
if (size == -1) {
if (backing_file) {
BlockDriverState *bs;
+ char *full_backing = g_new0(char, PATH_MAX);
int64_t size;
int back_flags;
+ bdrv_get_full_backing_filename_from_filename(filename, backing_file,
+ full_backing, PATH_MAX,
+ &local_err);
+ if (local_err) {
+ g_free(full_backing);
+ goto out;
+ }
+
/* backing files always opened read-only */
back_flags =
flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
bs = NULL;
- ret = bdrv_open(&bs, backing_file, NULL, NULL, back_flags,
+ ret = bdrv_open(&bs, full_backing, NULL, NULL, back_flags,
backing_drv, &local_err);
+ g_free(full_backing);
if (ret < 0) {
- error_setg_errno(errp, -ret, "Could not open '%s': %s",
- backing_file,
- error_get_pretty(local_err));
- error_free(local_err);
- local_err = NULL;
goto out;
}
size = bdrv_getlength(bs);
}
if (!quiet) {
- printf("Formatting '%s', fmt=%s ", filename, fmt);
- qemu_opts_print(opts);
+ printf("Formatting '%s', fmt=%s", filename, fmt);
+ qemu_opts_print(opts, " ");
puts("");
}
notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
}
-int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts)
+int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts,
+ BlockDriverAmendStatusCB *status_cb)
{
if (!bs->drv->bdrv_amend_options) {
return -ENOTSUP;
}
- return bs->drv->bdrv_amend_options(bs, opts);
+ return bs->drv->bdrv_amend_options(bs, opts, status_cb);
}
/* This function will be called by the bdrv_recurse_is_first_non_filter method
BlockDriverState *check_to_replace_node(const char *node_name, Error **errp)
{
BlockDriverState *to_replace_bs = bdrv_find_node(node_name);
+ AioContext *aio_context;
+
if (!to_replace_bs) {
error_setg(errp, "Node name '%s' not found", node_name);
return NULL;
}
+ aio_context = bdrv_get_aio_context(to_replace_bs);
+ aio_context_acquire(aio_context);
+
if (bdrv_op_is_blocked(to_replace_bs, BLOCK_OP_TYPE_REPLACE, errp)) {
- return NULL;
+ to_replace_bs = NULL;
+ goto out;
}
/* We don't want arbitrary node of the BDS chain to be replaced only the top
*/
if (!bdrv_is_first_non_filter(to_replace_bs)) {
error_setg(errp, "Only top most non filter can be replaced");
- return NULL;
+ to_replace_bs = NULL;
+ goto out;
}
+out:
+ aio_context_release(aio_context);
return to_replace_bs;
}