*
*/
-#include "qemu-timer.h"
+#include "qemu/timer.h"
#include "trace.h"
#include "qed.h"
-#include "qerror.h"
-#include "migration.h"
+#include "qapi/qmp/qerror.h"
+#include "migration/migration.h"
static void qed_aio_cancel(BlockDriverAIOCB *blockacb)
{
}
}
-static AIOPool qed_aio_pool = {
+static const AIOCBInfo qed_aiocb_info = {
.aiocb_size = sizeof(QEDAIOCB),
.cancel = qed_aio_cancel,
};
le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
}
-static int qed_write_header_sync(BDRVQEDState *s)
+int qed_write_header_sync(BDRVQEDState *s)
{
QEDHeader le;
int ret;
s->bs = bs;
}
-static int bdrv_qed_open(BlockDriverState *bs, int flags)
+static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags)
{
BDRVQEDState *s = bs->opaque;
QEDHeader le_header;
qed_header_le_to_cpu(&le_header, &s->header);
if (s->header.magic != QED_MAGIC) {
- return -EINVAL;
+ return -EMEDIUMTYPE;
}
if (s->header.features & ~QED_FEATURE_MASK) {
/* image uses unsupported feature bits */
}
/* If image was not closed cleanly, check consistency */
- if (s->header.features & QED_F_NEED_CHECK) {
+ if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
/* Read-only images cannot be fixed. There is no risk of corruption
* since write operations are not possible. Therefore, allow
* potentially inconsistent images to be opened read-only. This can
if (ret) {
goto out;
}
- if (!result.corruptions && !result.check_errors) {
- /* Ensure fixes reach storage before clearing check bit */
- bdrv_flush(s->bs);
-
- s->header.features &= ~QED_F_NEED_CHECK;
- qed_write_header_sync(s);
- }
}
}
return ret;
}
+/* We have nothing to do for QED reopen, stubs just return
+ * success */
+static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
+ BlockReopenQueue *queue, Error **errp)
+{
+ return 0;
+}
+
static void bdrv_qed_close(BlockDriverState *bs)
{
BDRVQEDState *s = bs->opaque;
return ret;
}
- ret = bdrv_file_open(&bs, filename, BDRV_O_RDWR | BDRV_O_CACHE_WB);
+ ret = bdrv_file_open(&bs, filename, NULL, BDRV_O_RDWR | BDRV_O_CACHE_WB);
if (ret < 0) {
return ret;
}
BlockDriverCompletionFunc *cb,
void *opaque, int flags)
{
- QEDAIOCB *acb = qemu_aio_get(&qed_aio_pool, bs, cb, opaque);
+ QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque);
trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
opaque, flags);
int nb_sectors)
{
BlockDriverAIOCB *blockacb;
+ BDRVQEDState *s = bs->opaque;
QEDWriteZeroesCB cb = { .done = false };
QEMUIOVector qiov;
struct iovec iov;
+ /* Refuse if there are untouched backing file sectors */
+ if (bs->backing_hd) {
+ if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) {
+ return -ENOTSUP;
+ }
+ if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) {
+ return -ENOTSUP;
+ }
+ }
+
/* Zero writes start without an I/O buffer. If a buffer becomes necessary
* then it will be allocated during request processing.
*/
bdrv_qed_close(bs);
memset(s, 0, sizeof(BDRVQEDState));
- bdrv_qed_open(bs, bs->open_flags);
+ bdrv_qed_open(bs, NULL, bs->open_flags);
}
static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
.bdrv_rebind = bdrv_qed_rebind,
.bdrv_open = bdrv_qed_open,
.bdrv_close = bdrv_qed_close,
+ .bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
.bdrv_create = bdrv_qed_create,
+ .bdrv_has_zero_init = bdrv_has_zero_init_1,
.bdrv_co_is_allocated = bdrv_qed_co_is_allocated,
.bdrv_make_empty = bdrv_qed_make_empty,
.bdrv_aio_readv = bdrv_qed_aio_readv,