void bdrv_close(BlockDriverState *bs)
{
+ BdrvAioNotifier *ban, *ban_next;
+
if (bs->job) {
block_job_cancel_sync(bs->job);
}
if (bs->io_limits_enabled) {
bdrv_io_limits_disable(bs);
}
+
+ QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) {
+ g_free(ban);
+ }
+ QLIST_INIT(&bs->aio_notifiers);
}
void bdrv_close_all(void)
if (!drv)
return -ENOMEDIUM;
-
+
if (!bs->backing_hd) {
return -ENOTSUP;
}
bdrv_set_dirty(bs, sector_num, nb_sectors);
- if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
- bs->wr_highest_sector = sector_num + nb_sectors - 1;
- }
+ block_acct_highest_sector(&bs->stats, sector_num, nb_sectors);
+
if (bs->growable && ret >= 0) {
bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
}
}
}
+static void send_qmp_error_event(BlockDriverState *bs,
+ BlockErrorAction action,
+ bool is_read, int error)
+{
+ BlockErrorAction ac;
+
+ ac = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
+ qapi_event_send_block_io_error(bdrv_get_device_name(bs), ac, action,
+ bdrv_iostatus_is_enabled(bs),
+ error == ENOSPC, strerror(error),
+ &error_abort);
+}
+
/* This is done by device models because, while the block layer knows
* about the error, it does not know whether an operation comes from
* the device or the block layer (from a job, for example).
* also ensures that the STOP/RESUME pair of events is emitted.
*/
qemu_system_vmstop_request_prepare();
- qapi_event_send_block_io_error(bdrv_get_device_name(bs),
- is_read ? IO_OPERATION_TYPE_READ :
- IO_OPERATION_TYPE_WRITE,
- action, &error_abort);
+ send_qmp_error_event(bs, action, is_read, error);
qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
} else {
- qapi_event_send_block_io_error(bdrv_get_device_name(bs),
- is_read ? IO_OPERATION_TYPE_READ :
- IO_OPERATION_TYPE_WRITE,
- action, &error_abort);
+ send_qmp_error_event(bs, action, is_read, error);
}
}
// Add the second request
qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
+ // Add tail of first request, if necessary
+ if (qiov->size < reqs[outidx].qiov->size) {
+ qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size,
+ reqs[outidx].qiov->size - qiov->size);
+ }
+
reqs[outidx].nb_sectors = qiov->size >> 9;
reqs[outidx].qiov = qiov;
}
}
-void
-bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, int64_t bytes,
- enum BlockAcctType type)
-{
- assert(type < BDRV_MAX_IOTYPE);
-
- cookie->bytes = bytes;
- cookie->start_time_ns = get_clock();
- cookie->type = type;
-}
-
-void
-bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie)
-{
- assert(cookie->type < BDRV_MAX_IOTYPE);
-
- bs->nr_bytes[cookie->type] += cookie->bytes;
- bs->nr_ops[cookie->type]++;
- bs->total_time_ns[cookie->type] += get_clock() - cookie->start_time_ns;
-}
-
void bdrv_img_create(const char *filename, const char *fmt,
const char *base_filename, const char *base_fmt,
char *options, uint64_t img_size, int flags,
void bdrv_detach_aio_context(BlockDriverState *bs)
{
+ BdrvAioNotifier *baf;
+
if (!bs->drv) {
return;
}
+ QLIST_FOREACH(baf, &bs->aio_notifiers, list) {
+ baf->detach_aio_context(baf->opaque);
+ }
+
if (bs->io_limits_enabled) {
throttle_detach_aio_context(&bs->throttle_state);
}
void bdrv_attach_aio_context(BlockDriverState *bs,
AioContext *new_context)
{
+ BdrvAioNotifier *ban;
+
if (!bs->drv) {
return;
}
if (bs->io_limits_enabled) {
throttle_attach_aio_context(&bs->throttle_state, new_context);
}
+
+ QLIST_FOREACH(ban, &bs->aio_notifiers, list) {
+ ban->attached_aio_context(new_context, ban->opaque);
+ }
}
void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
aio_context_release(new_context);
}
+void bdrv_add_aio_context_notifier(BlockDriverState *bs,
+ void (*attached_aio_context)(AioContext *new_context, void *opaque),
+ void (*detach_aio_context)(void *opaque), void *opaque)
+{
+ BdrvAioNotifier *ban = g_new(BdrvAioNotifier, 1);
+ *ban = (BdrvAioNotifier){
+ .attached_aio_context = attached_aio_context,
+ .detach_aio_context = detach_aio_context,
+ .opaque = opaque
+ };
+
+ QLIST_INSERT_HEAD(&bs->aio_notifiers, ban, list);
+}
+
+void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
+ void (*attached_aio_context)(AioContext *,
+ void *),
+ void (*detach_aio_context)(void *),
+ void *opaque)
+{
+ BdrvAioNotifier *ban, *ban_next;
+
+ QLIST_FOREACH_SAFE(ban, &bs->aio_notifiers, list, ban_next) {
+ if (ban->attached_aio_context == attached_aio_context &&
+ ban->detach_aio_context == detach_aio_context &&
+ ban->opaque == opaque)
+ {
+ QLIST_REMOVE(ban, list);
+ g_free(ban);
+
+ return;
+ }
+ }
+
+ abort();
+}
+
void bdrv_add_before_write_notifier(BlockDriverState *bs,
NotifierWithReturn *notifier)
{
QDECREF(json);
}
}
+
+/* This accessor function purpose is to allow the device models to access the
+ * BlockAcctStats structure embedded inside a BlockDriverState without being
+ * aware of the BlockDriverState structure layout.
+ * It will go away when the BlockAcctStats structure will be moved inside
+ * the device models.
+ */
+BlockAcctStats *bdrv_get_stats(BlockDriverState *bs)
+{
+ return &bs->stats;
+}