#include "qapi/qmp/qerror.h"
#include "migration/migration.h"
-static void qed_aio_cancel(BlockDriverAIOCB *blockacb)
-{
- QEDAIOCB *acb = (QEDAIOCB *)blockacb;
- AioContext *aio_context = bdrv_get_aio_context(blockacb->bs);
- bool finished = false;
-
- /* Wait for the request to finish */
- acb->finished = &finished;
- while (!finished) {
- aio_poll(aio_context, true);
- }
-}
-
static const AIOCBInfo qed_aiocb_info = {
.aiocb_size = sizeof(QEDAIOCB),
- .cancel = qed_aio_cancel,
};
static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
* This function only updates known header fields in-place and does not affect
* extra data after the QED header.
*/
-static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb,
+static void qed_write_header(BDRVQEDState *s, BlockCompletionFunc cb,
void *opaque)
{
/* We must write full sectors for O_DIRECT but cannot necessarily generate
snprintf(buf, sizeof(buf), "%" PRIx64,
s->header.features & ~QED_FEATURE_MASK);
error_set(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
- bs->device_name, "QED", buf);
+ bdrv_get_device_name(bs), "QED", buf);
return -ENOTSUP;
}
if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
return ret;
}
-static int bdrv_qed_refresh_limits(BlockDriverState *bs)
+static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
{
BDRVQEDState *s = bs->opaque;
bs->bl.write_zeroes_alignment = s->header.cluster_size >> BDRV_SECTOR_BITS;
-
- return 0;
}
/* We have nothing to do for QED reopen, stubs just return
static int qed_create(const char *filename, uint32_t cluster_size,
uint64_t image_size, uint32_t table_size,
const char *backing_file, const char *backing_fmt,
- Error **errp)
+ QemuOpts *opts, Error **errp)
{
QEDHeader header = {
.magic = QED_MAGIC,
int ret = 0;
BlockDriverState *bs;
- ret = bdrv_create_file(filename, NULL, NULL, &local_err);
+ ret = bdrv_create_file(filename, opts, &local_err);
if (ret < 0) {
error_propagate(errp, local_err);
return ret;
char *backing_fmt = NULL;
int ret;
- image_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0);
+ image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
+ BDRV_SECTOR_SIZE);
backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT);
cluster_size = qemu_opt_get_size_del(opts,
}
ret = qed_create(filename, cluster_size, image_size, table_size,
- backing_file, backing_fmt, errp);
+ backing_file, backing_fmt, opts, errp);
finish:
g_free(backing_file);
/**
* Read from the backing file or zero-fill if no backing file
*
- * @s: QED state
- * @pos: Byte position in device
- * @qiov: Destination I/O vector
- * @cb: Completion function
- * @opaque: User data for completion function
+ * @s: QED state
+ * @pos: Byte position in device
+ * @qiov: Destination I/O vector
+ * @backing_qiov: Possibly shortened copy of qiov, to be allocated here
+ * @cb: Completion function
+ * @opaque: User data for completion function
*
* This function reads qiov->size bytes starting at pos from the backing file.
* If there is no backing file then zeroes are read.
*/
static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
QEMUIOVector *qiov,
- BlockDriverCompletionFunc *cb, void *opaque)
+ QEMUIOVector **backing_qiov,
+ BlockCompletionFunc *cb, void *opaque)
{
uint64_t backing_length = 0;
size_t size;
/* If the read straddles the end of the backing file, shorten it */
size = MIN((uint64_t)backing_length - pos, qiov->size);
+ assert(*backing_qiov == NULL);
+ *backing_qiov = g_new(QEMUIOVector, 1);
+ qemu_iovec_init(*backing_qiov, qiov->niov);
+ qemu_iovec_concat(*backing_qiov, qiov, 0, size);
+
BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE,
- qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
+ *backing_qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
}
typedef struct {
GenericCB gencb;
BDRVQEDState *s;
QEMUIOVector qiov;
+ QEMUIOVector *backing_qiov;
struct iovec iov;
uint64_t offset;
} CopyFromBackingFileCB;
CopyFromBackingFileCB *copy_cb = opaque;
BDRVQEDState *s = copy_cb->s;
+ if (copy_cb->backing_qiov) {
+ qemu_iovec_destroy(copy_cb->backing_qiov);
+ g_free(copy_cb->backing_qiov);
+ copy_cb->backing_qiov = NULL;
+ }
+
if (ret) {
qed_copy_from_backing_file_cb(copy_cb, ret);
return;
*/
static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
uint64_t len, uint64_t offset,
- BlockDriverCompletionFunc *cb,
+ BlockCompletionFunc *cb,
void *opaque)
{
CopyFromBackingFileCB *copy_cb;
copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
copy_cb->s = s;
copy_cb->offset = offset;
+ copy_cb->backing_qiov = NULL;
copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
copy_cb->iov.iov_len = len;
qemu_iovec_init_external(©_cb->qiov, ©_cb->iov, 1);
- qed_read_backing_file(s, pos, ©_cb->qiov,
+ qed_read_backing_file(s, pos, ©_cb->qiov, ©_cb->backing_qiov,
qed_copy_from_backing_file_write, copy_cb);
}
static void qed_aio_complete_bh(void *opaque)
{
QEDAIOCB *acb = opaque;
- BlockDriverCompletionFunc *cb = acb->common.cb;
+ BlockCompletionFunc *cb = acb->common.cb;
void *user_opaque = acb->common.opaque;
int ret = acb->bh_ret;
- bool *finished = acb->finished;
qemu_bh_delete(acb->bh);
- qemu_aio_release(acb);
+ qemu_aio_unref(acb);
/* Invoke callback */
cb(user_opaque, ret);
-
- /* Signal cancel completion */
- if (finished) {
- *finished = true;
- }
}
static void qed_aio_complete(QEDAIOCB *acb, int ret)
BDRVQEDState *s = acb_to_s(acb);
uint64_t offset = acb->cur_cluster +
qed_offset_into_cluster(s, acb->cur_pos);
- BlockDriverCompletionFunc *next_fn;
+ BlockCompletionFunc *next_fn;
trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
{
BDRVQEDState *s = acb_to_s(acb);
- BlockDriverCompletionFunc *cb;
+ BlockCompletionFunc *cb;
/* Cancel timer when the first allocating request comes in */
if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
struct iovec *iov = acb->qiov->iov;
if (!iov->iov_base) {
- iov->iov_base = qemu_blockalign(acb->common.bs, iov->iov_len);
+ iov->iov_base = qemu_try_blockalign(acb->common.bs, iov->iov_len);
+ if (iov->iov_base == NULL) {
+ qed_aio_complete(acb, -ENOMEM);
+ return;
+ }
memset(iov->iov_base, 0, iov->iov_len);
}
}
return;
} else if (ret != QED_CLUSTER_FOUND) {
qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
- qed_aio_next_io, acb);
+ &acb->backing_qiov, qed_aio_next_io, acb);
return;
}
trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);
+ if (acb->backing_qiov) {
+ qemu_iovec_destroy(acb->backing_qiov);
+ g_free(acb->backing_qiov);
+ acb->backing_qiov = NULL;
+ }
+
/* Handle I/O error */
if (ret) {
qed_aio_complete(acb, ret);
io_fn, acb);
}
-static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs,
- int64_t sector_num,
- QEMUIOVector *qiov, int nb_sectors,
- BlockDriverCompletionFunc *cb,
- void *opaque, int flags)
+static BlockAIOCB *qed_aio_setup(BlockDriverState *bs,
+ int64_t sector_num,
+ QEMUIOVector *qiov, int nb_sectors,
+ BlockCompletionFunc *cb,
+ void *opaque, int flags)
{
QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque);
opaque, flags);
acb->flags = flags;
- acb->finished = NULL;
acb->qiov = qiov;
acb->qiov_offset = 0;
acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
+ acb->backing_qiov = NULL;
acb->request.l2_table = NULL;
qemu_iovec_init(&acb->cur_qiov, qiov->niov);
return &acb->common;
}
-static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
- int64_t sector_num,
- QEMUIOVector *qiov, int nb_sectors,
- BlockDriverCompletionFunc *cb,
- void *opaque)
+static BlockAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
+ int64_t sector_num,
+ QEMUIOVector *qiov, int nb_sectors,
+ BlockCompletionFunc *cb,
+ void *opaque)
{
return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
}
-static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
- int64_t sector_num,
- QEMUIOVector *qiov, int nb_sectors,
- BlockDriverCompletionFunc *cb,
- void *opaque)
+static BlockAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
+ int64_t sector_num,
+ QEMUIOVector *qiov, int nb_sectors,
+ BlockCompletionFunc *cb,
+ void *opaque)
{
return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb,
opaque, QED_AIOCB_WRITE);
int nb_sectors,
BdrvRequestFlags flags)
{
- BlockDriverAIOCB *blockacb;
+ BlockAIOCB *blockacb;
BDRVQEDState *s = bs->opaque;
QEDWriteZeroesCB cb = { .done = false };
QEMUIOVector qiov;
.format_name = "qed",
.instance_size = sizeof(BDRVQEDState),
.create_opts = &qed_create_opts,
+ .supports_backing = true,
.bdrv_probe = bdrv_qed_probe,
.bdrv_rebind = bdrv_qed_rebind,
.bdrv_open = bdrv_qed_open,
.bdrv_close = bdrv_qed_close,
.bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
- .bdrv_create2 = bdrv_qed_create,
+ .bdrv_create = bdrv_qed_create,
.bdrv_has_zero_init = bdrv_has_zero_init_1,
.bdrv_co_get_block_status = bdrv_qed_co_get_block_status,
.bdrv_aio_readv = bdrv_qed_aio_readv,