return NULL;
}
-/* New and old BlockDriverState structs for group snapshots */
+/* New and old BlockDriverState structs for atomic group operations */
typedef struct BlkTransactionState BlkTransactionState;
typedef struct InternalSnapshotState {
BlkTransactionState common;
BlockDriverState *bs;
+ AioContext *aio_context;
QEMUSnapshotInfo sn;
} InternalSnapshotState;
return;
}
+ /* AioContext is released in .clean() */
+ state->aio_context = bdrv_get_aio_context(bs);
+ aio_context_acquire(state->aio_context);
+
if (!bdrv_is_inserted(bs)) {
error_set(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
return;
}
+ if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) {
+ return;
+ }
+
if (bdrv_is_read_only(bs)) {
error_set(errp, QERR_DEVICE_IS_READ_ONLY, device);
return;
}
}
+static void internal_snapshot_clean(BlkTransactionState *common)
+{
+ InternalSnapshotState *state = DO_UPCAST(InternalSnapshotState,
+ common, common);
+
+ if (state->aio_context) {
+ aio_context_release(state->aio_context);
+ }
+}
+
/* external snapshot private data */
typedef struct ExternalSnapshotState {
BlkTransactionState common;
BlockDriverState *old_bs;
BlockDriverState *new_bs;
+ AioContext *aio_context;
} ExternalSnapshotState;
static void external_snapshot_prepare(BlkTransactionState *common,
return;
}
+ /* Acquire AioContext now so any threads operating on old_bs stop */
+ state->aio_context = bdrv_get_aio_context(state->old_bs);
+ aio_context_acquire(state->aio_context);
+
if (!bdrv_is_inserted(state->old_bs)) {
error_set(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
return;
ExternalSnapshotState *state =
DO_UPCAST(ExternalSnapshotState, common, common);
+ bdrv_set_aio_context(state->new_bs, state->aio_context);
+
/* This removes our old bs and adds the new bs */
bdrv_append(state->new_bs, state->old_bs);
/* We don't need (or want) to use the transactional
* don't want to abort all of them if one of them fails the reopen */
bdrv_reopen(state->new_bs, state->new_bs->open_flags & ~BDRV_O_RDWR,
NULL);
+
+ aio_context_release(state->aio_context);
}
static void external_snapshot_abort(BlkTransactionState *common)
if (state->new_bs) {
bdrv_unref(state->new_bs);
}
+ if (state->aio_context) {
+ aio_context_release(state->aio_context);
+ }
}
typedef struct DriveBackupState {
BlkTransactionState common;
BlockDriverState *bs;
+ AioContext *aio_context;
BlockJob *job;
} DriveBackupState;
static void drive_backup_prepare(BlkTransactionState *common, Error **errp)
{
DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
+ BlockDriverState *bs;
DriveBackup *backup;
Error *local_err = NULL;
assert(common->action->kind == TRANSACTION_ACTION_KIND_DRIVE_BACKUP);
backup = common->action->drive_backup;
+ bs = bdrv_find(backup->device);
+ if (!bs) {
+ error_set(errp, QERR_DEVICE_NOT_FOUND, backup->device);
+ return;
+ }
+
+ /* AioContext is released in .clean() */
+ state->aio_context = bdrv_get_aio_context(bs);
+ aio_context_acquire(state->aio_context);
+
qmp_drive_backup(backup->device, backup->target,
backup->has_format, backup->format,
backup->sync,
&local_err);
if (local_err) {
error_propagate(errp, local_err);
- state->bs = NULL;
- state->job = NULL;
return;
}
- state->bs = bdrv_find(backup->device);
+ state->bs = bs;
state->job = state->bs->job;
}
}
}
+static void drive_backup_clean(BlkTransactionState *common)
+{
+ DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
+
+ if (state->aio_context) {
+ aio_context_release(state->aio_context);
+ }
+}
+
static void abort_prepare(BlkTransactionState *common, Error **errp)
{
error_setg(errp, "Transaction aborted using Abort action");
.instance_size = sizeof(DriveBackupState),
.prepare = drive_backup_prepare,
.abort = drive_backup_abort,
+ .clean = drive_backup_clean,
},
[TRANSACTION_ACTION_KIND_ABORT] = {
.instance_size = sizeof(BlkTransactionState),
.instance_size = sizeof(InternalSnapshotState),
.prepare = internal_snapshot_prepare,
.abort = internal_snapshot_abort,
+ .clean = internal_snapshot_clean,
},
};
/*
- * 'Atomic' group snapshots. The snapshots are taken as a set, and if any fail
- * then we do not pivot any of the devices in the group, and abandon the
- * snapshots
+ * 'Atomic' group operations. The operations are performed as a set, and if
+ * any fail then we roll back all operations in the group.
*/
void qmp_transaction(TransactionActionList *dev_list, Error **errp)
{
QSIMPLEQ_HEAD(snap_bdrv_states, BlkTransactionState) snap_bdrv_states;
QSIMPLEQ_INIT(&snap_bdrv_states);
- /* drain all i/o before any snapshots */
+ /* drain all i/o before any operations */
bdrv_drain_all();
- /* We don't do anything in this loop that commits us to the snapshot */
+ /* We don't do anything in this loop that commits us to the operations */
while (NULL != dev_entry) {
TransactionAction *dev_info = NULL;
const BdrvActionOps *ops;
goto exit;
delete_and_fail:
- /*
- * failure, and it is all-or-none; abandon each new bs, and keep using
- * the original bs for all images
- */
+ /* failure, and it is all-or-none; roll back all operations */
QSIMPLEQ_FOREACH(state, &snap_bdrv_states, entry) {
if (state->ops->abort) {
state->ops->abort(state);