#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
+typedef enum {
+ BDRV_REQ_COPY_ON_READ = 0x1,
+ BDRV_REQ_ZERO_WRITE = 0x2,
+} BdrvRequestFlags;
+
static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
int64_t sector_num, int nb_sectors,
QEMUIOVector *iov);
static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
+ BdrvRequestFlags flags);
static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
int64_t sector_num,
QEMUIOVector *qiov,
void *opaque,
bool is_write);
static void coroutine_fn bdrv_co_do_rw(void *opaque);
+static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors);
static bool bdrv_exceed_bps_limits(BlockDriverState *bs, int nb_sectors,
bool is_write, double elapsed_time, uint64_t *wait);
void bdrv_close(BlockDriverState *bs)
{
+ bdrv_flush(bs);
if (bs->drv) {
+ if (bs->job) {
+ block_job_cancel_sync(bs->job);
+ }
+ bdrv_drain_all();
+
if (bs == bs_snapshots) {
bs_snapshots = NULL;
}
bs->device_name[0] = '\0';
}
+/*
+ * Add new bs contents at the top of an image chain while the chain is
+ * live, while keeping required fields on the top layer.
+ *
+ * This will modify the BlockDriverState fields, and swap contents
+ * between bs_new and bs_top. Both bs_new and bs_top are modified.
+ *
+ * bs_new is required to be anonymous.
+ *
+ * This function does not create any image files.
+ */
+void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
+{
+ BlockDriverState tmp;
+
+ /* bs_new must be anonymous */
+ assert(bs_new->device_name[0] == '\0');
+
+ tmp = *bs_new;
+
+ /* there are some fields that need to stay on the top layer: */
+
+ /* dev info */
+ tmp.dev_ops = bs_top->dev_ops;
+ tmp.dev_opaque = bs_top->dev_opaque;
+ tmp.dev = bs_top->dev;
+ tmp.buffer_alignment = bs_top->buffer_alignment;
+ tmp.copy_on_read = bs_top->copy_on_read;
+
+ /* i/o timing parameters */
+ tmp.slice_time = bs_top->slice_time;
+ tmp.slice_start = bs_top->slice_start;
+ tmp.slice_end = bs_top->slice_end;
+ tmp.io_limits = bs_top->io_limits;
+ tmp.io_base = bs_top->io_base;
+ tmp.throttled_reqs = bs_top->throttled_reqs;
+ tmp.block_timer = bs_top->block_timer;
+ tmp.io_limits_enabled = bs_top->io_limits_enabled;
+
+ /* geometry */
+ tmp.cyls = bs_top->cyls;
+ tmp.heads = bs_top->heads;
+ tmp.secs = bs_top->secs;
+ tmp.translation = bs_top->translation;
+
+ /* r/w error */
+ tmp.on_read_error = bs_top->on_read_error;
+ tmp.on_write_error = bs_top->on_write_error;
+
+ /* i/o status */
+ tmp.iostatus_enabled = bs_top->iostatus_enabled;
+ tmp.iostatus = bs_top->iostatus;
+
+ /* keep the same entry in bdrv_states */
+ pstrcpy(tmp.device_name, sizeof(tmp.device_name), bs_top->device_name);
+ tmp.list = bs_top->list;
+
+ /* The contents of 'tmp' will become bs_top, as we are
+ * swapping bs_new and bs_top contents. */
+ tmp.backing_hd = bs_new;
+ pstrcpy(tmp.backing_file, sizeof(tmp.backing_file), bs_top->filename);
+ bdrv_get_format(bs_top, tmp.backing_format, sizeof(tmp.backing_format));
+
+ /* swap contents of the fixed new bs and the current top */
+ *bs_new = *bs_top;
+ *bs_top = tmp;
+
+ /* device_name[] was carried over from the old bs_top. bs_new
+ * shouldn't be in bdrv_states, so we need to make device_name[]
+ * reflect the anonymity of bs_new
+ */
+ bs_new->device_name[0] = '\0';
+
+ /* clear the copied fields in the new backing file */
+ bdrv_detach_dev(bs_new, bs_new->dev);
+
+ qemu_co_queue_init(&bs_new->throttled_reqs);
+ memset(&bs_new->io_base, 0, sizeof(bs_new->io_base));
+ memset(&bs_new->io_limits, 0, sizeof(bs_new->io_limits));
+ bdrv_iostatus_disable(bs_new);
+
+ /* we don't use bdrv_io_limits_disable() for this, because we don't want
+ * to affect or delete the block_timer, as it has been moved to bs_top */
+ bs_new->io_limits_enabled = false;
+ bs_new->block_timer = NULL;
+ bs_new->slice_time = 0;
+ bs_new->slice_start = 0;
+ bs_new->slice_end = 0;
+}
+
void bdrv_delete(BlockDriverState *bs)
{
assert(!bs->dev);
+ assert(!bs->job);
+ assert(!bs->in_use);
/* remove from list, if necessary */
bdrv_make_anon(bs);
}
}
+void bdrv_emit_qmp_error_event(const BlockDriverState *bdrv,
+ BlockQMPEventAction action, int is_read)
+{
+ QObject *data;
+ const char *action_str;
+
+ switch (action) {
+ case BDRV_ACTION_REPORT:
+ action_str = "report";
+ break;
+ case BDRV_ACTION_IGNORE:
+ action_str = "ignore";
+ break;
+ case BDRV_ACTION_STOP:
+ action_str = "stop";
+ break;
+ default:
+ abort();
+ }
+
+ data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
+ bdrv->device_name,
+ action_str,
+ is_read ? "read" : "write");
+ monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data);
+
+ qobject_decref(data);
+}
+
+static void bdrv_emit_qmp_eject_event(BlockDriverState *bs, bool ejected)
+{
+ QObject *data;
+
+ data = qobject_from_jsonf("{ 'device': %s, 'tray-open': %i }",
+ bdrv_get_device_name(bs), ejected);
+ monitor_protocol_event(QEVENT_DEVICE_TRAY_MOVED, data);
+
+ qobject_decref(data);
+}
+
static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
{
if (bs->dev_ops && bs->dev_ops->change_media_cb) {
+ bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
bs->dev_ops->change_media_cb(bs->dev_opaque, load);
+ if (tray_was_closed) {
+ /* tray open */
+ bdrv_emit_qmp_eject_event(bs, true);
+ }
+ if (load) {
+ /* tray close */
+ bdrv_emit_qmp_eject_event(bs, false);
+ }
}
}
return ret;
}
-void bdrv_commit_all(void)
+int bdrv_commit_all(void)
{
BlockDriverState *bs;
QTAILQ_FOREACH(bs, &bdrv_states, list) {
- bdrv_commit(bs);
+ int ret = bdrv_commit(bs);
+ if (ret < 0) {
+ return ret;
+ }
}
+ return 0;
}
struct BdrvTrackedRequest {
if (!rwco->is_write) {
rwco->ret = bdrv_co_do_readv(rwco->bs, rwco->sector_num,
- rwco->nb_sectors, rwco->qiov);
+ rwco->nb_sectors, rwco->qiov, 0);
} else {
rwco->ret = bdrv_co_do_writev(rwco->bs, rwco->sector_num,
- rwco->nb_sectors, rwco->qiov);
+ rwco->nb_sectors, rwco->qiov, 0);
}
}
qemu_iovec_init_external(&qiov, &iov, 1);
+ /**
+ * In sync call context, when the vcpu is blocked, this throttling timer
+ * will not fire; so the I/O throttling function has to be disabled here
+ * if it has been enabled.
+ */
+ if (bs->io_limits_enabled) {
+ fprintf(stderr, "Disabling I/O throttling on '%s' due "
+ "to synchronous I/O.\n", bdrv_get_device_name(bs));
+ bdrv_io_limits_disable(bs);
+ }
+
if (qemu_in_coroutine()) {
/* Fast-path if already in coroutine context */
bdrv_rw_co_entry(&rwco);
return 0;
}
-static int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
+static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
{
/* Perform I/O through a temporary buffer so that users who scribble over
*/
void *bounce_buffer;
+ BlockDriver *drv = bs->drv;
struct iovec iov;
QEMUIOVector bounce_qiov;
int64_t cluster_sector_num;
round_to_clusters(bs, sector_num, nb_sectors,
&cluster_sector_num, &cluster_nb_sectors);
- trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors,
- cluster_sector_num, cluster_nb_sectors);
+ trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
+ cluster_sector_num, cluster_nb_sectors);
iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
qemu_iovec_init_external(&bounce_qiov, &iov, 1);
- ret = bs->drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
- &bounce_qiov);
+ ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
+ &bounce_qiov);
if (ret < 0) {
goto err;
}
- ret = bs->drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
+ if (drv->bdrv_co_write_zeroes &&
+ buffer_is_zero(bounce_buffer, iov.iov_len)) {
+ ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
+ cluster_nb_sectors);
+ } else {
+ ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
&bounce_qiov);
+ }
+
if (ret < 0) {
/* It might be okay to ignore write errors for guest requests. If this
* is a deliberate copy-on-read then we don't want to ignore the error.
* Handle a read request in coroutine context
*/
static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
+ BdrvRequestFlags flags)
{
BlockDriver *drv = bs->drv;
BdrvTrackedRequest req;
}
if (bs->copy_on_read) {
+ flags |= BDRV_REQ_COPY_ON_READ;
+ }
+ if (flags & BDRV_REQ_COPY_ON_READ) {
+ bs->copy_on_read_in_flight++;
+ }
+
+ if (bs->copy_on_read_in_flight) {
wait_for_overlapping_requests(bs, sector_num, nb_sectors);
}
tracked_request_begin(&req, bs, sector_num, nb_sectors, false);
- if (bs->copy_on_read) {
+ if (flags & BDRV_REQ_COPY_ON_READ) {
int pnum;
ret = bdrv_co_is_allocated(bs, sector_num, nb_sectors, &pnum);
}
if (!ret || pnum != nb_sectors) {
- ret = bdrv_co_copy_on_readv(bs, sector_num, nb_sectors, qiov);
+ ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
goto out;
}
}
out:
tracked_request_end(&req);
+
+ if (flags & BDRV_REQ_COPY_ON_READ) {
+ bs->copy_on_read_in_flight--;
+ }
+
return ret;
}
{
trace_bdrv_co_readv(bs, sector_num, nb_sectors);
- return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov);
+ return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
+}
+
+int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
+{
+ trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
+
+ return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
+ BDRV_REQ_COPY_ON_READ);
+}
+
+static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors)
+{
+ BlockDriver *drv = bs->drv;
+ QEMUIOVector qiov;
+ struct iovec iov;
+ int ret;
+
+ /* TODO Emulate only part of misaligned requests instead of letting block
+ * drivers return -ENOTSUP and emulate everything */
+
+ /* First try the efficient write zeroes operation */
+ if (drv->bdrv_co_write_zeroes) {
+ ret = drv->bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
+ if (ret != -ENOTSUP) {
+ return ret;
+ }
+ }
+
+ /* Fall back to bounce buffer if write zeroes is unsupported */
+ iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
+ iov.iov_base = qemu_blockalign(bs, iov.iov_len);
+ memset(iov.iov_base, 0, iov.iov_len);
+ qemu_iovec_init_external(&qiov, &iov, 1);
+
+ ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, &qiov);
+
+ qemu_vfree(iov.iov_base);
+ return ret;
}
/*
* Handle a write request in coroutine context
*/
static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
+ int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
+ BdrvRequestFlags flags)
{
BlockDriver *drv = bs->drv;
BdrvTrackedRequest req;
bdrv_io_limits_intercept(bs, true, nb_sectors);
}
- if (bs->copy_on_read) {
+ if (bs->copy_on_read_in_flight) {
wait_for_overlapping_requests(bs, sector_num, nb_sectors);
}
tracked_request_begin(&req, bs, sector_num, nb_sectors, true);
- ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
+ if (flags & BDRV_REQ_ZERO_WRITE) {
+ ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors);
+ } else {
+ ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
+ }
if (bs->dirty_bitmap) {
set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
{
trace_bdrv_co_writev(bs, sector_num, nb_sectors);
- return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov);
+ return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
+}
+
+int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
+ int64_t sector_num, int nb_sectors)
+{
+ trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors);
+
+ return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
+ BDRV_REQ_ZERO_WRITE);
}
/**
struct partition *p;
uint32_t nr_sects;
uint64_t nb_sectors;
+ bool enabled;
bdrv_get_geometry(bs, &nb_sectors);
+ /**
+ * The function will be invoked during startup not only in sync I/O mode,
+ * but also in async I/O mode. So the I/O throttling function has to
+ * be disabled temporarily here, not permanently.
+ */
+ enabled = bs->io_limits_enabled;
+ bs->io_limits_enabled = false;
ret = bdrv_read(bs, 0, buf, 1);
+ bs->io_limits_enabled = enabled;
if (ret < 0)
return -1;
/* test msdos magic */
uint8_t last_sect;
uint8_t max_track;
uint8_t max_head;
+ FDriveRate rate;
} FDFormat;
static const FDFormat fd_formats[] = {
/* First entry is default format */
/* 1.44 MB 3"1/2 floppy disks */
- { FDRIVE_DRV_144, 18, 80, 1, },
- { FDRIVE_DRV_144, 20, 80, 1, },
- { FDRIVE_DRV_144, 21, 80, 1, },
- { FDRIVE_DRV_144, 21, 82, 1, },
- { FDRIVE_DRV_144, 21, 83, 1, },
- { FDRIVE_DRV_144, 22, 80, 1, },
- { FDRIVE_DRV_144, 23, 80, 1, },
- { FDRIVE_DRV_144, 24, 80, 1, },
+ { FDRIVE_DRV_144, 18, 80, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_144, 20, 80, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_144, 21, 80, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_144, 21, 82, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_144, 21, 83, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_144, 22, 80, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_144, 23, 80, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_144, 24, 80, 1, FDRIVE_RATE_500K, },
/* 2.88 MB 3"1/2 floppy disks */
- { FDRIVE_DRV_288, 36, 80, 1, },
- { FDRIVE_DRV_288, 39, 80, 1, },
- { FDRIVE_DRV_288, 40, 80, 1, },
- { FDRIVE_DRV_288, 44, 80, 1, },
- { FDRIVE_DRV_288, 48, 80, 1, },
+ { FDRIVE_DRV_288, 36, 80, 1, FDRIVE_RATE_1M, },
+ { FDRIVE_DRV_288, 39, 80, 1, FDRIVE_RATE_1M, },
+ { FDRIVE_DRV_288, 40, 80, 1, FDRIVE_RATE_1M, },
+ { FDRIVE_DRV_288, 44, 80, 1, FDRIVE_RATE_1M, },
+ { FDRIVE_DRV_288, 48, 80, 1, FDRIVE_RATE_1M, },
/* 720 kB 3"1/2 floppy disks */
- { FDRIVE_DRV_144, 9, 80, 1, },
- { FDRIVE_DRV_144, 10, 80, 1, },
- { FDRIVE_DRV_144, 10, 82, 1, },
- { FDRIVE_DRV_144, 10, 83, 1, },
- { FDRIVE_DRV_144, 13, 80, 1, },
- { FDRIVE_DRV_144, 14, 80, 1, },
+ { FDRIVE_DRV_144, 9, 80, 1, FDRIVE_RATE_250K, },
+ { FDRIVE_DRV_144, 10, 80, 1, FDRIVE_RATE_250K, },
+ { FDRIVE_DRV_144, 10, 82, 1, FDRIVE_RATE_250K, },
+ { FDRIVE_DRV_144, 10, 83, 1, FDRIVE_RATE_250K, },
+ { FDRIVE_DRV_144, 13, 80, 1, FDRIVE_RATE_250K, },
+ { FDRIVE_DRV_144, 14, 80, 1, FDRIVE_RATE_250K, },
/* 1.2 MB 5"1/4 floppy disks */
- { FDRIVE_DRV_120, 15, 80, 1, },
- { FDRIVE_DRV_120, 18, 80, 1, },
- { FDRIVE_DRV_120, 18, 82, 1, },
- { FDRIVE_DRV_120, 18, 83, 1, },
- { FDRIVE_DRV_120, 20, 80, 1, },
+ { FDRIVE_DRV_120, 15, 80, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_120, 18, 80, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_120, 18, 82, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_120, 18, 83, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_120, 20, 80, 1, FDRIVE_RATE_500K, },
/* 720 kB 5"1/4 floppy disks */
- { FDRIVE_DRV_120, 9, 80, 1, },
- { FDRIVE_DRV_120, 11, 80, 1, },
+ { FDRIVE_DRV_120, 9, 80, 1, FDRIVE_RATE_250K, },
+ { FDRIVE_DRV_120, 11, 80, 1, FDRIVE_RATE_250K, },
/* 360 kB 5"1/4 floppy disks */
- { FDRIVE_DRV_120, 9, 40, 1, },
- { FDRIVE_DRV_120, 9, 40, 0, },
- { FDRIVE_DRV_120, 10, 41, 1, },
- { FDRIVE_DRV_120, 10, 42, 1, },
+ { FDRIVE_DRV_120, 9, 40, 1, FDRIVE_RATE_300K, },
+ { FDRIVE_DRV_120, 9, 40, 0, FDRIVE_RATE_300K, },
+ { FDRIVE_DRV_120, 10, 41, 1, FDRIVE_RATE_300K, },
+ { FDRIVE_DRV_120, 10, 42, 1, FDRIVE_RATE_300K, },
/* 320 kB 5"1/4 floppy disks */
- { FDRIVE_DRV_120, 8, 40, 1, },
- { FDRIVE_DRV_120, 8, 40, 0, },
+ { FDRIVE_DRV_120, 8, 40, 1, FDRIVE_RATE_250K, },
+ { FDRIVE_DRV_120, 8, 40, 0, FDRIVE_RATE_250K, },
/* 360 kB must match 5"1/4 better than 3"1/2... */
- { FDRIVE_DRV_144, 9, 80, 0, },
+ { FDRIVE_DRV_144, 9, 80, 0, FDRIVE_RATE_250K, },
/* end */
- { FDRIVE_DRV_NONE, -1, -1, 0, },
+ { FDRIVE_DRV_NONE, -1, -1, 0, 0, },
};
void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads,
int *max_track, int *last_sect,
- FDriveType drive_in, FDriveType *drive)
+ FDriveType drive_in, FDriveType *drive,
+ FDriveRate *rate)
{
const FDFormat *parse;
uint64_t nb_sectors, size;
bdrv_get_geometry_hint(bs, nb_heads, max_track, last_sect);
if (*nb_heads != 0 && *max_track != 0 && *last_sect != 0) {
/* User defined disk */
+ *rate = FDRIVE_RATE_500K;
} else {
bdrv_get_geometry(bs, &nb_sectors);
match = -1;
*max_track = parse->max_track;
*last_sect = parse->last_sect;
*drive = parse->drive;
+ *rate = parse->rate;
}
}
BlockDriverState *bs;
QTAILQ_FOREACH(bs, &bdrv_states, list) {
- if (!bdrv_is_read_only(bs) && bdrv_is_inserted(bs)) {
- bdrv_flush(bs);
- }
+ bdrv_flush(bs);
}
}
return data.ret;
}
-void bdrv_mon_event(const BlockDriverState *bdrv,
- BlockMonEventAction action, int is_read)
-{
- QObject *data;
- const char *action_str;
-
- switch (action) {
- case BDRV_ACTION_REPORT:
- action_str = "report";
- break;
- case BDRV_ACTION_IGNORE:
- action_str = "ignore";
- break;
- case BDRV_ACTION_STOP:
- action_str = "stop";
- break;
- default:
- abort();
- }
-
- data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
- bdrv->device_name,
- action_str,
- is_read ? "read" : "write");
- monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data);
-
- qobject_decref(data);
-}
-
BlockInfoList *qmp_query_block(Error **errp)
{
BlockInfoList *head = NULL, *cur_item = NULL;
return -ENOTSUP;
}
+BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
+ const char *backing_file)
+{
+ if (!bs->drv) {
+ return NULL;
+ }
+
+ if (bs->backing_hd) {
+ if (strcmp(bs->backing_file, backing_file) == 0) {
+ return bs->backing_hd;
+ } else {
+ return bdrv_find_backing_image(bs->backing_hd, backing_file);
+ }
+ }
+
+ return NULL;
+}
+
#define NB_SUFFIXES 4
char *get_human_readable_size(char *buf, int buf_size, int64_t size)
BlockDriverCompletionFunc *cb;
void *opaque;
QEMUIOVector *free_qiov;
- void *free_buf;
} callbacks[];
} MultiwriteCB;
qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
}
g_free(mcb->callbacks[i].free_qiov);
- qemu_vfree(mcb->callbacks[i].free_buf);
}
}
int merge = 0;
int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
- // This handles the cases that are valid for all block drivers, namely
- // exactly sequential writes and overlapping writes.
+ // Handle exactly sequential writes and overlapping writes.
if (reqs[i].sector <= oldreq_last) {
merge = 1;
}
- // The block driver may decide that it makes sense to combine requests
- // even if there is a gap of some sectors between them. In this case,
- // the gap is filled with zeros (therefore only applicable for yet
- // unused space in format like qcow2).
- if (!merge && bs->drv->bdrv_merge_requests) {
- merge = bs->drv->bdrv_merge_requests(bs, &reqs[outidx], &reqs[i]);
- }
-
if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
merge = 0;
}
size = (reqs[i].sector - reqs[outidx].sector) << 9;
qemu_iovec_concat(qiov, reqs[outidx].qiov, size);
- // We might need to add some zeros between the two requests
- if (reqs[i].sector > oldreq_last) {
- size_t zero_bytes = (reqs[i].sector - oldreq_last) << 9;
- uint8_t *buf = qemu_blockalign(bs, zero_bytes);
- memset(buf, 0, zero_bytes);
- qemu_iovec_add(qiov, buf, zero_bytes);
- mcb->callbacks[i].free_buf = buf;
- }
+ // We should need to add any zeros between the two requests
+ assert (reqs[i].sector <= oldreq_last);
// Add the second request
qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size);
if (!acb->is_write) {
acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
- acb->req.nb_sectors, acb->req.qiov);
+ acb->req.nb_sectors, acb->req.qiov, 0);
} else {
acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
- acb->req.nb_sectors, acb->req.qiov);
+ acb->req.nb_sectors, acb->req.qiov, 0);
}
acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
{
int ret;
- if (!bs->drv) {
+ if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
return 0;
}
}
if (bs->drv->bdrv_co_flush_to_disk) {
- return bs->drv->bdrv_co_flush_to_disk(bs);
+ ret = bs->drv->bdrv_co_flush_to_disk(bs);
} else if (bs->drv->bdrv_aio_flush) {
BlockDriverAIOCB *acb;
CoroutineIOCompletion co = {
acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
if (acb == NULL) {
- return -EIO;
+ ret = -EIO;
} else {
qemu_coroutine_yield();
- return co.ret;
+ ret = co.ret;
}
} else {
/*
*
* Let's hope the user knows what he's doing.
*/
- return 0;
+ ret = 0;
+ }
+ if (ret < 0) {
+ return ret;
}
+
+ /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
+ * in the case of cache=unsafe, so there are no useless flushes.
+ */
+ return bdrv_co_flush(bs->file);
}
void bdrv_invalidate_cache(BlockDriverState *bs)
}
}
+void bdrv_clear_incoming_migration_all(void)
+{
+ BlockDriverState *bs;
+
+ QTAILQ_FOREACH(bs, &bdrv_states, list) {
+ bs->open_flags = bs->open_flags & ~(BDRV_O_INCOMING);
+ }
+}
+
int bdrv_flush(BlockDriverState *bs)
{
Coroutine *co;
/**
* If eject_flag is TRUE, eject the media. Otherwise, close the tray
*/
-void bdrv_eject(BlockDriverState *bs, int eject_flag)
+void bdrv_eject(BlockDriverState *bs, bool eject_flag)
{
BlockDriver *drv = bs->drv;
if (drv && drv->bdrv_eject) {
drv->bdrv_eject(bs, eject_flag);
}
+
+ if (bs->device_name[0] != '\0') {
+ bdrv_emit_qmp_eject_event(bs, eject_flag);
+ }
}
/**
return ret;
}
+
+void *block_job_create(const BlockJobType *job_type, BlockDriverState *bs,
+ BlockDriverCompletionFunc *cb, void *opaque)
+{
+ BlockJob *job;
+
+ if (bs->job || bdrv_in_use(bs)) {
+ return NULL;
+ }
+ bdrv_set_in_use(bs, 1);
+
+ job = g_malloc0(job_type->instance_size);
+ job->job_type = job_type;
+ job->bs = bs;
+ job->cb = cb;
+ job->opaque = opaque;
+ bs->job = job;
+ return job;
+}
+
+void block_job_complete(BlockJob *job, int ret)
+{
+ BlockDriverState *bs = job->bs;
+
+ assert(bs->job == job);
+ job->cb(job->opaque, ret);
+ bs->job = NULL;
+ g_free(job);
+ bdrv_set_in_use(bs, 0);
+}
+
+int block_job_set_speed(BlockJob *job, int64_t value)
+{
+ int rc;
+
+ if (!job->job_type->set_speed) {
+ return -ENOTSUP;
+ }
+ rc = job->job_type->set_speed(job, value);
+ if (rc == 0) {
+ job->speed = value;
+ }
+ return rc;
+}
+
+void block_job_cancel(BlockJob *job)
+{
+ job->cancelled = true;
+}
+
+bool block_job_is_cancelled(BlockJob *job)
+{
+ return job->cancelled;
+}
+
+void block_job_cancel_sync(BlockJob *job)
+{
+ BlockDriverState *bs = job->bs;
+
+ assert(bs->job == job);
+ block_job_cancel(job);
+ while (bs->job != NULL && bs->job->busy) {
+ qemu_aio_wait();
+ }
+}