#include "trace.h"
#include "qemu/option_int.h"
#include "qemu/cutils.h"
+#include "qemu/bswap.h"
/*
Differences with QCOW:
printf("attempting to read extended header in offset %lu\n", offset);
#endif
- ret = bdrv_pread(bs->file->bs, offset, &ext, sizeof(ext));
+ ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext));
if (ret < 0) {
error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: "
"pread fail from offset %" PRIu64, offset);
sizeof(bs->backing_format));
return 2;
}
- ret = bdrv_pread(bs->file->bs, offset, bs->backing_format, ext.len);
+ ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len);
if (ret < 0) {
error_setg_errno(errp, -ret, "ERROR: ext_backing_format: "
"Could not read format name");
case QCOW2_EXT_MAGIC_FEATURE_TABLE:
if (p_feature_table != NULL) {
void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature));
- ret = bdrv_pread(bs->file->bs, offset , feature_table, ext.len);
+ ret = bdrv_pread(bs->file, offset , feature_table, ext.len);
if (ret < 0) {
error_setg_errno(errp, -ret, "ERROR: ext_feature_table: "
"Could not read table");
uext->len = ext.len;
QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next);
- ret = bdrv_pread(bs->file->bs, offset , uext->data, uext->len);
+ ret = bdrv_pread(bs->file, offset , uext->data, uext->len);
if (ret < 0) {
error_setg_errno(errp, -ret, "ERROR: unknown extension: "
"Could not read data");
}
val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY);
- ret = bdrv_pwrite(bs->file->bs, offsetof(QCowHeader, incompatible_features),
+ ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features),
&val, sizeof(val));
if (ret < 0) {
return ret;
uint64_t ext_end;
uint64_t l1_vm_state_index;
- ret = bdrv_pread(bs->file->bs, 0, &header, sizeof(header));
+ ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read qcow2 header");
goto fail;
if (header.header_length > sizeof(header)) {
s->unknown_header_fields_size = header.header_length - sizeof(header);
s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);
- ret = bdrv_pread(bs->file->bs, sizeof(header), s->unknown_header_fields,
+ ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields,
s->unknown_header_fields_size);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read unknown qcow2 header "
if (s->crypt_method_header) {
if (bdrv_uses_whitelist() &&
s->crypt_method_header == QCOW_CRYPT_AES) {
- error_report("qcow2 built-in AES encryption is deprecated");
- error_printf("Support for it will be removed in a future release.\n"
- "You can use 'qemu-img convert' to switch to an\n"
- "unencrypted qcow2 image, or a LUKS raw image.\n");
+ error_setg(errp,
+ "Use of AES-CBC encrypted qcow2 images is no longer "
+ "supported in system emulators");
+ error_append_hint(errp,
+ "You can use 'qemu-img convert' to convert your "
+ "image to an alternative supported format, such "
+ "as unencrypted qcow2, or raw with the LUKS "
+ "format instead.\n");
+ ret = -ENOSYS;
+ goto fail;
}
- bs->encrypted = 1;
+ bs->encrypted = true;
}
s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
ret = -ENOMEM;
goto fail;
}
- ret = bdrv_pread(bs->file->bs, s->l1_table_offset, s->l1_table,
+ ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
s->l1_size * sizeof(uint64_t));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read L1 table");
ret = -EINVAL;
goto fail;
}
- ret = bdrv_pread(bs->file->bs, header.backing_file_offset,
+ ret = bdrv_pread(bs->file, header.backing_file_offset,
bs->backing_file, len);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read backing file name");
{
BDRVQcow2State *s = bs->opaque;
- bs->bl.write_zeroes_alignment = s->cluster_sectors;
+ if (bs->encrypted) {
+ /* Encryption works on a sector granularity */
+ bs->bl.request_alignment = BDRV_SECTOR_SIZE;
+ }
+ bs->bl.pwrite_zeroes_alignment = s->cluster_size;
}
static int qcow2_set_key(BlockDriverState *bs, const char *key)
BDRVQcow2State *s = bs->opaque;
uint64_t cluster_offset;
int index_in_cluster, ret;
+ unsigned int bytes;
int64_t status = 0;
- *pnum = nb_sectors;
+ bytes = MIN(INT_MAX, nb_sectors * BDRV_SECTOR_SIZE);
qemu_co_mutex_lock(&s->lock);
- ret = qcow2_get_cluster_offset(bs, sector_num << 9, pnum, &cluster_offset);
+ ret = qcow2_get_cluster_offset(bs, sector_num << 9, &bytes,
+ &cluster_offset);
qemu_co_mutex_unlock(&s->lock);
if (ret < 0) {
return ret;
}
+ *pnum = bytes >> BDRV_SECTOR_BITS;
+
if (cluster_offset != 0 && ret != QCOW2_CLUSTER_COMPRESSED &&
!s->cipher) {
index_in_cluster = sector_num & (s->cluster_sectors - 1);
/* handle reading after the end of the backing file */
int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov,
- int64_t sector_num, int nb_sectors)
+ int64_t offset, int bytes)
{
+ uint64_t bs_size = bs->total_sectors * BDRV_SECTOR_SIZE;
int n1;
- if ((sector_num + nb_sectors) <= bs->total_sectors)
- return nb_sectors;
- if (sector_num >= bs->total_sectors)
+
+ if ((offset + bytes) <= bs_size) {
+ return bytes;
+ }
+
+ if (offset >= bs_size) {
n1 = 0;
- else
- n1 = bs->total_sectors - sector_num;
+ } else {
+ n1 = bs_size - offset;
+ }
- qemu_iovec_memset(qiov, 512 * n1, 0, 512 * (nb_sectors - n1));
+ qemu_iovec_memset(qiov, n1, 0, bytes - n1);
return n1;
}
-static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num,
- int remaining_sectors, QEMUIOVector *qiov)
+static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
+ uint64_t bytes, QEMUIOVector *qiov,
+ int flags)
{
BDRVQcow2State *s = bs->opaque;
- int index_in_cluster, n1;
+ int offset_in_cluster, n1;
int ret;
- int cur_nr_sectors; /* number of sectors in current iteration */
+ unsigned int cur_bytes; /* number of bytes in current iteration */
uint64_t cluster_offset = 0;
uint64_t bytes_done = 0;
QEMUIOVector hd_qiov;
qemu_co_mutex_lock(&s->lock);
- while (remaining_sectors != 0) {
+ while (bytes != 0) {
/* prepare next request */
- cur_nr_sectors = remaining_sectors;
+ cur_bytes = MIN(bytes, INT_MAX);
if (s->cipher) {
- cur_nr_sectors = MIN(cur_nr_sectors,
- QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors);
+ cur_bytes = MIN(cur_bytes,
+ QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
}
- ret = qcow2_get_cluster_offset(bs, sector_num << 9,
- &cur_nr_sectors, &cluster_offset);
+ ret = qcow2_get_cluster_offset(bs, offset, &cur_bytes, &cluster_offset);
if (ret < 0) {
goto fail;
}
- index_in_cluster = sector_num & (s->cluster_sectors - 1);
+ offset_in_cluster = offset_into_cluster(s, offset);
qemu_iovec_reset(&hd_qiov);
- qemu_iovec_concat(&hd_qiov, qiov, bytes_done,
- cur_nr_sectors * 512);
+ qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes);
switch (ret) {
case QCOW2_CLUSTER_UNALLOCATED:
if (bs->backing) {
/* read from the base image */
n1 = qcow2_backing_read1(bs->backing->bs, &hd_qiov,
- sector_num, cur_nr_sectors);
+ offset, cur_bytes);
if (n1 > 0) {
QEMUIOVector local_qiov;
qemu_iovec_init(&local_qiov, hd_qiov.niov);
- qemu_iovec_concat(&local_qiov, &hd_qiov, 0,
- n1 * BDRV_SECTOR_SIZE);
+ qemu_iovec_concat(&local_qiov, &hd_qiov, 0, n1);
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
qemu_co_mutex_unlock(&s->lock);
- ret = bdrv_co_readv(bs->backing->bs, sector_num,
- n1, &local_qiov);
+ ret = bdrv_co_preadv(bs->backing, offset, n1,
+ &local_qiov, 0);
qemu_co_mutex_lock(&s->lock);
qemu_iovec_destroy(&local_qiov);
}
} else {
/* Note: in this case, no need to wait */
- qemu_iovec_memset(&hd_qiov, 0, 0, 512 * cur_nr_sectors);
+ qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes);
}
break;
case QCOW2_CLUSTER_ZERO:
- qemu_iovec_memset(&hd_qiov, 0, 0, 512 * cur_nr_sectors);
+ qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes);
break;
case QCOW2_CLUSTER_COMPRESSED:
}
qemu_iovec_from_buf(&hd_qiov, 0,
- s->cluster_cache + index_in_cluster * 512,
- 512 * cur_nr_sectors);
+ s->cluster_cache + offset_in_cluster,
+ cur_bytes);
break;
case QCOW2_CLUSTER_NORMAL:
}
}
- assert(cur_nr_sectors <=
- QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors);
+ assert(cur_bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
qemu_iovec_reset(&hd_qiov);
- qemu_iovec_add(&hd_qiov, cluster_data,
- 512 * cur_nr_sectors);
+ qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes);
}
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
qemu_co_mutex_unlock(&s->lock);
- ret = bdrv_co_readv(bs->file->bs,
- (cluster_offset >> 9) + index_in_cluster,
- cur_nr_sectors, &hd_qiov);
+ ret = bdrv_co_preadv(bs->file,
+ cluster_offset + offset_in_cluster,
+ cur_bytes, &hd_qiov, 0);
qemu_co_mutex_lock(&s->lock);
if (ret < 0) {
goto fail;
}
if (bs->encrypted) {
assert(s->cipher);
+ assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
+ assert((cur_bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
Error *err = NULL;
- if (qcow2_encrypt_sectors(s, sector_num, cluster_data,
- cluster_data, cur_nr_sectors, false,
- &err) < 0) {
+ if (qcow2_encrypt_sectors(s, offset >> BDRV_SECTOR_BITS,
+ cluster_data, cluster_data,
+ cur_bytes >> BDRV_SECTOR_BITS,
+ false, &err) < 0) {
error_free(err);
ret = -EIO;
goto fail;
}
- qemu_iovec_from_buf(qiov, bytes_done,
- cluster_data, 512 * cur_nr_sectors);
+ qemu_iovec_from_buf(qiov, bytes_done, cluster_data, cur_bytes);
}
break;
goto fail;
}
- remaining_sectors -= cur_nr_sectors;
- sector_num += cur_nr_sectors;
- bytes_done += cur_nr_sectors * 512;
+ bytes -= cur_bytes;
+ offset += cur_bytes;
+ bytes_done += cur_bytes;
}
ret = 0;
return ret;
}
-static coroutine_fn int qcow2_co_writev(BlockDriverState *bs,
- int64_t sector_num,
- int remaining_sectors,
- QEMUIOVector *qiov)
+static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
+ uint64_t bytes, QEMUIOVector *qiov,
+ int flags)
{
BDRVQcow2State *s = bs->opaque;
- int index_in_cluster;
+ int offset_in_cluster;
int ret;
- int cur_nr_sectors; /* number of sectors in current iteration */
+ unsigned int cur_bytes; /* number of sectors in current iteration */
uint64_t cluster_offset;
QEMUIOVector hd_qiov;
uint64_t bytes_done = 0;
uint8_t *cluster_data = NULL;
QCowL2Meta *l2meta = NULL;
- trace_qcow2_writev_start_req(qemu_coroutine_self(), sector_num,
- remaining_sectors);
+ trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes);
qemu_iovec_init(&hd_qiov, qiov->niov);
qemu_co_mutex_lock(&s->lock);
- while (remaining_sectors != 0) {
+ while (bytes != 0) {
l2meta = NULL;
trace_qcow2_writev_start_part(qemu_coroutine_self());
- index_in_cluster = sector_num & (s->cluster_sectors - 1);
- cur_nr_sectors = remaining_sectors;
- if (bs->encrypted &&
- cur_nr_sectors >
- QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors - index_in_cluster) {
- cur_nr_sectors =
- QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors - index_in_cluster;
+ offset_in_cluster = offset_into_cluster(s, offset);
+ cur_bytes = MIN(bytes, INT_MAX);
+ if (bs->encrypted) {
+ cur_bytes = MIN(cur_bytes,
+ QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
+ - offset_in_cluster);
}
- ret = qcow2_alloc_cluster_offset(bs, sector_num << 9,
- &cur_nr_sectors, &cluster_offset, &l2meta);
+ ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes,
+ &cluster_offset, &l2meta);
if (ret < 0) {
goto fail;
}
assert((cluster_offset & 511) == 0);
qemu_iovec_reset(&hd_qiov);
- qemu_iovec_concat(&hd_qiov, qiov, bytes_done,
- cur_nr_sectors * 512);
+ qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes);
if (bs->encrypted) {
Error *err = NULL;
QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size);
- if (qcow2_encrypt_sectors(s, sector_num, cluster_data,
- cluster_data, cur_nr_sectors,
+ if (qcow2_encrypt_sectors(s, offset >> BDRV_SECTOR_BITS,
+ cluster_data, cluster_data,
+ cur_bytes >>BDRV_SECTOR_BITS,
true, &err) < 0) {
error_free(err);
ret = -EIO;
}
qemu_iovec_reset(&hd_qiov);
- qemu_iovec_add(&hd_qiov, cluster_data,
- cur_nr_sectors * 512);
+ qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes);
}
ret = qcow2_pre_write_overlap_check(bs, 0,
- cluster_offset + index_in_cluster * BDRV_SECTOR_SIZE,
- cur_nr_sectors * BDRV_SECTOR_SIZE);
+ cluster_offset + offset_in_cluster, cur_bytes);
if (ret < 0) {
goto fail;
}
qemu_co_mutex_unlock(&s->lock);
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
trace_qcow2_writev_data(qemu_coroutine_self(),
- (cluster_offset >> 9) + index_in_cluster);
- ret = bdrv_co_writev(bs->file->bs,
- (cluster_offset >> 9) + index_in_cluster,
- cur_nr_sectors, &hd_qiov);
+ cluster_offset + offset_in_cluster);
+ ret = bdrv_co_pwritev(bs->file,
+ cluster_offset + offset_in_cluster,
+ cur_bytes, &hd_qiov, 0);
qemu_co_mutex_lock(&s->lock);
if (ret < 0) {
goto fail;
l2meta = next;
}
- remaining_sectors -= cur_nr_sectors;
- sector_num += cur_nr_sectors;
- bytes_done += cur_nr_sectors * 512;
- trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_nr_sectors);
+ bytes -= cur_bytes;
+ offset += cur_bytes;
+ bytes_done += cur_bytes;
+ trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes);
}
ret = 0;
qcow2_close(bs);
- bdrv_invalidate_cache(bs->file->bs, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- bs->drv = NULL;
- return;
- }
-
memset(s, 0, sizeof(BDRVQcow2State));
options = qdict_clone_shallow(bs->options);
}
/* Write the new header */
- ret = bdrv_pwrite(bs->file->bs, 0, header, s->cluster_size);
+ ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size);
if (ret < 0) {
goto fail;
}
{
BDRVQcow2State *s = bs->opaque;
+ if (backing_file && strlen(backing_file) > 1023) {
+ return -EINVAL;
+ }
+
pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
static int preallocate(BlockDriverState *bs)
{
- uint64_t nb_sectors;
+ uint64_t bytes;
uint64_t offset;
uint64_t host_offset = 0;
- int num;
+ unsigned int cur_bytes;
int ret;
QCowL2Meta *meta;
- nb_sectors = bdrv_nb_sectors(bs);
+ bytes = bdrv_getlength(bs);
offset = 0;
- while (nb_sectors) {
- num = MIN(nb_sectors, INT_MAX >> BDRV_SECTOR_BITS);
- ret = qcow2_alloc_cluster_offset(bs, offset, &num,
+ while (bytes) {
+ cur_bytes = MIN(bytes, INT_MAX);
+ ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes,
&host_offset, &meta);
if (ret < 0) {
return ret;
/* TODO Preallocate data if requested */
- nb_sectors -= num;
- offset += num << BDRV_SECTOR_BITS;
+ bytes -= cur_bytes;
+ offset += cur_bytes;
}
/*
* EOF). Extend the image to the last allocated sector.
*/
if (host_offset != 0) {
- uint8_t buf[BDRV_SECTOR_SIZE];
- memset(buf, 0, BDRV_SECTOR_SIZE);
- ret = bdrv_write(bs->file->bs,
- (host_offset >> BDRV_SECTOR_BITS) + num - 1,
- buf, 1);
+ uint8_t data = 0;
+ ret = bdrv_pwrite(bs->file, (host_offset + cur_bytes) - 1,
+ &data, 1);
if (ret < 0) {
return ret;
}
}
blk = blk_new_open(filename, NULL, NULL,
- BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_PROTOCOL,
- &local_err);
+ BDRV_O_RDWR | BDRV_O_PROTOCOL, &local_err);
if (blk == NULL) {
error_propagate(errp, local_err);
return -EIO;
cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS);
}
- ret = blk_pwrite(blk, 0, header, cluster_size);
+ ret = blk_pwrite(blk, 0, header, cluster_size, 0);
g_free(header);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not write qcow2 header");
/* Write a refcount table with one refcount block */
refcount_table = g_malloc0(2 * cluster_size);
refcount_table[0] = cpu_to_be64(2 * cluster_size);
- ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size);
+ ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size, 0);
g_free(refcount_table);
if (ret < 0) {
options = qdict_new();
qdict_put(options, "driver", qstring_from_str("qcow2"));
blk = blk_new_open(filename, NULL, options,
- BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH,
- &local_err);
+ BDRV_O_RDWR | BDRV_O_NO_FLUSH, &local_err);
if (blk == NULL) {
error_propagate(errp, local_err);
ret = -EIO;
options = qdict_new();
qdict_put(options, "driver", qstring_from_str("qcow2"));
blk = blk_new_open(filename, NULL, options,
- BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_BACKING,
- &local_err);
+ BDRV_O_RDWR | BDRV_O_NO_BACKING, &local_err);
if (blk == NULL) {
error_propagate(errp, local_err);
ret = -EIO;
ret = qcow2_create2(filename, size, backing_file, backing_fmt, flags,
cluster_size, prealloc, opts, version, refcount_order,
&local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- }
+ error_propagate(errp, local_err);
finish:
g_free(backing_file);
return ret;
}
-static coroutine_fn int qcow2_co_write_zeroes(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
+
+static bool is_zero_sectors(BlockDriverState *bs, int64_t start,
+ uint32_t count)
+{
+ int nr;
+ BlockDriverState *file;
+ int64_t res;
+
+ if (!count) {
+ return true;
+ }
+ res = bdrv_get_block_status_above(bs, NULL, start, count,
+ &nr, &file);
+ return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr == count;
+}
+
+static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
+ int64_t offset, int count, BdrvRequestFlags flags)
{
int ret;
BDRVQcow2State *s = bs->opaque;
- /* Emulate misaligned zero writes */
- if (sector_num % s->cluster_sectors || nb_sectors % s->cluster_sectors) {
- return -ENOTSUP;
+ uint32_t head = offset % s->cluster_size;
+ uint32_t tail = (offset + count) % s->cluster_size;
+
+ trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, count);
+
+ if (head || tail) {
+ int64_t cl_start = (offset - head) >> BDRV_SECTOR_BITS;
+ uint64_t off;
+ unsigned int nr;
+
+ assert(head + count <= s->cluster_size);
+
+ /* check whether remainder of cluster already reads as zero */
+ if (!(is_zero_sectors(bs, cl_start,
+ DIV_ROUND_UP(head, BDRV_SECTOR_SIZE)) &&
+ is_zero_sectors(bs, (offset + count) >> BDRV_SECTOR_BITS,
+ DIV_ROUND_UP(-tail & (s->cluster_size - 1),
+ BDRV_SECTOR_SIZE)))) {
+ return -ENOTSUP;
+ }
+
+ qemu_co_mutex_lock(&s->lock);
+ /* We can have new write after previous check */
+ offset = cl_start << BDRV_SECTOR_BITS;
+ count = s->cluster_size;
+ nr = s->cluster_size;
+ ret = qcow2_get_cluster_offset(bs, offset, &nr, &off);
+ if (ret != QCOW2_CLUSTER_UNALLOCATED && ret != QCOW2_CLUSTER_ZERO) {
+ qemu_co_mutex_unlock(&s->lock);
+ return -ENOTSUP;
+ }
+ } else {
+ qemu_co_mutex_lock(&s->lock);
}
+ trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, count);
+
/* Whatever is left can use real zero clusters */
- qemu_co_mutex_lock(&s->lock);
- ret = qcow2_zero_clusters(bs, sector_num << BDRV_SECTOR_BITS,
- nb_sectors);
+ ret = qcow2_zero_clusters(bs, offset, count >> BDRV_SECTOR_BITS);
qemu_co_mutex_unlock(&s->lock);
return ret;
/* write updated header.size */
offset = cpu_to_be64(offset);
- ret = bdrv_pwrite_sync(bs->file->bs, offsetof(QCowHeader, size),
+ ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size),
&offset, sizeof(uint64_t));
if (ret < 0) {
return ret;
return 0;
}
+typedef struct Qcow2WriteCo {
+ BlockDriverState *bs;
+ int64_t sector_num;
+ const uint8_t *buf;
+ int nb_sectors;
+ int ret;
+} Qcow2WriteCo;
+
+static void qcow2_write_co_entry(void *opaque)
+{
+ Qcow2WriteCo *co = opaque;
+ QEMUIOVector qiov;
+ uint64_t offset = co->sector_num * BDRV_SECTOR_SIZE;
+ uint64_t bytes = co->nb_sectors * BDRV_SECTOR_SIZE;
+
+ struct iovec iov = (struct iovec) {
+ .iov_base = (uint8_t*) co->buf,
+ .iov_len = bytes,
+ };
+ qemu_iovec_init_external(&qiov, &iov, 1);
+
+ co->ret = qcow2_co_pwritev(co->bs, offset, bytes, &qiov, 0);
+}
+
+/* Wrapper for non-coroutine contexts */
+static int qcow2_write(BlockDriverState *bs, int64_t sector_num,
+ const uint8_t *buf, int nb_sectors)
+{
+ Coroutine *co;
+ AioContext *aio_context = bdrv_get_aio_context(bs);
+ Qcow2WriteCo data = {
+ .bs = bs,
+ .sector_num = sector_num,
+ .buf = buf,
+ .nb_sectors = nb_sectors,
+ .ret = -EINPROGRESS,
+ };
+ co = qemu_coroutine_create(qcow2_write_co_entry);
+ qemu_coroutine_enter(co, &data);
+ while (data.ret == -EINPROGRESS) {
+ aio_poll(aio_context, true);
+ }
+ return data.ret;
+}
+
/* XXX: put compressed sectors first, then all the cluster aligned
tables to avoid losing bytes in alignment */
static int qcow2_write_compressed(BlockDriverState *bs, int64_t sector_num,
if (ret != Z_STREAM_END || out_len >= s->cluster_size) {
/* could not compress: write normal cluster */
- ret = bdrv_write(bs, sector_num, buf, s->cluster_sectors);
+ ret = qcow2_write(bs, sector_num, buf, s->cluster_sectors);
if (ret < 0) {
goto fail;
}
}
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED);
- ret = bdrv_pwrite(bs->file->bs, cluster_offset, out_buf, out_len);
+ ret = bdrv_pwrite(bs->file, cluster_offset, out_buf, out_len);
if (ret < 0) {
goto fail;
}
/* After this call, neither the in-memory nor the on-disk refcount
* information accurately describe the actual references */
- ret = bdrv_write_zeroes(bs->file->bs, s->l1_table_offset / BDRV_SECTOR_SIZE,
- l1_clusters * s->cluster_sectors, 0);
+ ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset,
+ l1_clusters * s->cluster_size, 0);
if (ret < 0) {
goto fail_broken_refcounts;
}
* overwrite parts of the existing refcount and L1 table, which is not
* an issue because the dirty flag is set, complete data loss is in fact
* desired and partial data loss is consequently fine as well */
- ret = bdrv_write_zeroes(bs->file->bs, s->cluster_size / BDRV_SECTOR_SIZE,
- (2 + l1_clusters) * s->cluster_size /
- BDRV_SECTOR_SIZE, 0);
+ ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size,
+ (2 + l1_clusters) * s->cluster_size, 0);
/* This call (even if it failed overall) may have overwritten on-disk
* refcount structures; in that case, the in-memory refcount information
* will probably differ from the on-disk information which makes the BDS
/* "Create" an empty reftable (one cluster) directly after the image
* header and an empty L1 table three clusters after the image header;
* the cluster between those two will be used as the first refblock */
- cpu_to_be64w(&l1_ofs_rt_ofs_cls.l1_offset, 3 * s->cluster_size);
- cpu_to_be64w(&l1_ofs_rt_ofs_cls.reftable_offset, s->cluster_size);
- cpu_to_be32w(&l1_ofs_rt_ofs_cls.reftable_clusters, 1);
- ret = bdrv_pwrite_sync(bs->file->bs, offsetof(QCowHeader, l1_table_offset),
+ l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size);
+ l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size);
+ l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1);
+ ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset),
&l1_ofs_rt_ofs_cls, sizeof(l1_ofs_rt_ofs_cls));
if (ret < 0) {
goto fail_broken_refcounts;
/* Enter the first refblock into the reftable */
rt_entry = cpu_to_be64(2 * s->cluster_size);
- ret = bdrv_pwrite_sync(bs->file->bs, s->cluster_size,
+ ret = bdrv_pwrite_sync(bs->file, s->cluster_size,
&rt_entry, sizeof(rt_entry));
if (ret < 0) {
goto fail_broken_refcounts;
int ret;
qemu_co_mutex_lock(&s->lock);
- ret = qcow2_cache_flush(bs, s->l2_table_cache);
+ ret = qcow2_cache_write(bs, s->l2_table_cache);
if (ret < 0) {
qemu_co_mutex_unlock(&s->lock);
return ret;
}
if (qcow2_need_accurate_refcounts(s)) {
- ret = qcow2_cache_flush(bs, s->refcount_block_cache);
+ ret = qcow2_cache_write(bs, s->refcount_block_cache);
if (ret < 0) {
qemu_co_mutex_unlock(&s->lock);
return ret;
int64_t pos)
{
BDRVQcow2State *s = bs->opaque;
- int64_t total_sectors = bs->total_sectors;
- bool zero_beyond_eof = bs->zero_beyond_eof;
- int ret;
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE);
- bs->zero_beyond_eof = false;
- ret = bdrv_pwritev(bs, qcow2_vm_state_offset(s) + pos, qiov);
- bs->zero_beyond_eof = zero_beyond_eof;
-
- /* bdrv_co_do_writev will have increased the total_sectors value to include
- * the VM state - the VM state is however not an actual part of the block
- * device, therefore, we need to restore the old value. */
- bs->total_sectors = total_sectors;
-
- return ret;
+ return bs->drv->bdrv_co_pwritev(bs, qcow2_vm_state_offset(s) + pos,
+ qiov->size, qiov, 0);
}
-static int qcow2_load_vmstate(BlockDriverState *bs, uint8_t *buf,
- int64_t pos, int size)
+static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
+ int64_t pos)
{
BDRVQcow2State *s = bs->opaque;
- bool zero_beyond_eof = bs->zero_beyond_eof;
- int ret;
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
- bs->zero_beyond_eof = false;
- ret = bdrv_pread(bs, qcow2_vm_state_offset(s) + pos, buf, size);
- bs->zero_beyond_eof = zero_beyond_eof;
-
- return ret;
+ return bs->drv->bdrv_co_preadv(bs, qcow2_vm_state_offset(s) + pos,
+ qiov->size, qiov, 0);
}
/*
.bdrv_co_get_block_status = qcow2_co_get_block_status,
.bdrv_set_key = qcow2_set_key,
- .bdrv_co_readv = qcow2_co_readv,
- .bdrv_co_writev = qcow2_co_writev,
+ .bdrv_co_preadv = qcow2_co_preadv,
+ .bdrv_co_pwritev = qcow2_co_pwritev,
.bdrv_co_flush_to_os = qcow2_co_flush_to_os,
- .bdrv_co_write_zeroes = qcow2_co_write_zeroes,
+ .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes,
.bdrv_co_discard = qcow2_co_discard,
.bdrv_truncate = qcow2_truncate,
.bdrv_write_compressed = qcow2_write_compressed,