#include "qapi/error.h"
#include "qcow2.h"
#include "qemu/bswap.h"
+#include "qemu/memalign.h"
#include "trace.h"
int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset +
- new_l1_size * sizeof(uint64_t),
- (s->l1_size - new_l1_size) * sizeof(uint64_t), 0);
+ new_l1_size * L1E_SIZE,
+ (s->l1_size - new_l1_size) * L1E_SIZE, 0);
if (ret < 0) {
goto fail;
}
* l1_table in memory to avoid possible image corruption.
*/
memset(s->l1_table + new_l1_size, 0,
- (s->l1_size - new_l1_size) * sizeof(uint64_t));
+ (s->l1_size - new_l1_size) * L1E_SIZE);
return ret;
}
/* Do a sanity check on min_size before trying to calculate new_l1_size
* (this prevents overflows during the while loop for the calculation of
* new_l1_size) */
- if (min_size > INT_MAX / sizeof(uint64_t)) {
+ if (min_size > INT_MAX / L1E_SIZE) {
return -EFBIG;
}
}
QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
- if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
+ if (new_l1_size > QCOW_MAX_L1_SIZE / L1E_SIZE) {
return -EFBIG;
}
s->l1_size, new_l1_size);
#endif
- new_l1_size2 = sizeof(uint64_t) * new_l1_size;
+ new_l1_size2 = L1E_SIZE * new_l1_size;
new_l1_table = qemu_try_blockalign(bs->file->bs, new_l1_size2);
if (new_l1_table == NULL) {
return -ENOMEM;
memset(new_l1_table, 0, new_l1_size2);
if (s->l1_size) {
- memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
+ memcpy(new_l1_table, s->l1_table, s->l1_size * L1E_SIZE);
}
/* write new table (align to cluster) */
s->l1_table = new_l1_table;
old_l1_size = s->l1_size;
s->l1_size = new_l1_size;
- qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
+ qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * L1E_SIZE,
QCOW2_DISCARD_OTHER);
return 0;
fail:
BDRVQcow2State *s = bs->opaque;
int l1_start_index;
int i, ret;
- int bufsize = MAX(sizeof(uint64_t),
+ int bufsize = MAX(L1E_SIZE,
MIN(bs->file->bs->bl.request_alignment, s->cluster_size));
- int nentries = bufsize / sizeof(uint64_t);
+ int nentries = bufsize / L1E_SIZE;
g_autofree uint64_t *buf = g_try_new0(uint64_t, nentries);
if (buf == NULL) {
}
ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
- s->l1_table_offset + 8 * l1_start_index, bufsize, false);
+ s->l1_table_offset + L1E_SIZE * l1_start_index, bufsize, false);
if (ret < 0) {
return ret;
}
BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
ret = bdrv_pwrite_sync(bs->file,
- s->l1_table_offset + 8 * l1_start_index,
+ s->l1_table_offset + L1E_SIZE * l1_start_index,
buf, bufsize);
if (ret < 0) {
return ret;
return -ENOMEDIUM;
}
- /* Call .bdrv_co_readv() directly instead of using the public block-layer
+ /*
+ * We never deal with requests that don't satisfy
+ * bdrv_check_qiov_request(), and aligning requests to clusters never
+ * breaks this condition. So, do some assertions before calling
+ * bs->drv->bdrv_co_preadv_part() which has int64_t arguments.
+ */
+ assert(src_cluster_offset <= INT64_MAX);
+ assert(src_cluster_offset + offset_in_cluster <= INT64_MAX);
+ /* Cast qiov->size to uint64_t to silence a compiler warning on -m32 */
+ assert((uint64_t)qiov->size <= INT64_MAX);
+ bdrv_check_qiov_request(src_cluster_offset + offset_in_cluster, qiov->size,
+ qiov, 0, &error_abort);
+ /*
+ * Call .bdrv_co_readv() directly instead of using the public block-layer
* interface. This avoids double I/O throttling and request tracking,
* which can lead to deadlock when block layer copy-on-read is enabled.
*/
* offset needs to be aligned to a cluster boundary.
*
* If the cluster is unallocated then *host_offset will be 0.
- * If the cluster is compressed then *host_offset will contain the
- * complete compressed cluster descriptor.
+ * If the cluster is compressed then *host_offset will contain the l2 entry.
*
* On entry, *bytes is the maximum number of contiguous bytes starting at
* offset that we are interested in.
ret = -EIO;
goto fail;
}
- *host_offset = l2_entry & L2E_COMPRESSED_OFFSET_SIZE_MASK;
+ *host_offset = l2_entry;
break;
case QCOW2_SUBCLUSTER_ZERO_PLAIN:
case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
set_l2_entry(s, l2_slice, l2_index, cluster_offset);
+ if (has_subclusters(s)) {
+ set_l2_bitmap(s, l2_slice, l2_index, 0);
+ }
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
*host_offset = cluster_offset & s->cluster_offset_mask;
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
assert(l2_index + m->nb_clusters <= s->l2_slice_size);
+ assert(m->cow_end.offset + m->cow_end.nb_bytes <=
+ m->nb_clusters << s->cluster_bits);
for (i = 0; i < m->nb_clusters; i++) {
uint64_t offset = cluster_offset + ((uint64_t)i << s->cluster_bits);
/* if two concurrent writes happen to the same unallocated cluster
assert((offset & L2E_OFFSET_MASK) == offset);
set_l2_entry(s, l2_slice, l2_index + i, offset | QCOW_OFLAG_COPIED);
+
+ /* Update bitmap with the subclusters that were just written */
+ if (has_subclusters(s) && !m->prealloc) {
+ uint64_t l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i);
+ unsigned written_from = m->cow_start.offset;
+ unsigned written_to = m->cow_end.offset + m->cow_end.nb_bytes;
+ int first_sc, last_sc;
+ /* Narrow written_from and written_to down to the current cluster */
+ written_from = MAX(written_from, i << s->cluster_bits);
+ written_to = MIN(written_to, (i + 1) << s->cluster_bits);
+ assert(written_from < written_to);
+ first_sc = offset_to_sc_index(s, written_from);
+ last_sc = offset_to_sc_index(s, written_to - 1);
+ l2_bitmap |= QCOW_OFLAG_SUB_ALLOC_RANGE(first_sc, last_sc + 1);
+ l2_bitmap &= ~QCOW_OFLAG_SUB_ZERO_RANGE(first_sc, last_sc + 1);
+ set_l2_bitmap(s, l2_slice, l2_index + i, l2_bitmap);
+ }
}
*/
if (!m->keep_old_clusters && j != 0) {
for (i = 0; i < j; i++) {
- qcow2_free_any_clusters(bs, old_cluster[i], 1, QCOW2_DISCARD_NEVER);
+ qcow2_free_any_cluster(bs, old_cluster[i], QCOW2_DISCARD_NEVER);
}
}
if (l2_entry & QCOW_OFLAG_COPIED) {
return false;
}
+ /* fallthrough */
case QCOW2_CLUSTER_UNALLOCATED:
case QCOW2_CLUSTER_COMPRESSED:
case QCOW2_CLUSTER_ZERO_PLAIN:
if (end <= old_start || start >= old_end) {
/* No intersection */
+ continue;
+ }
+
+ if (old_alloc->keep_old_clusters &&
+ (end <= l2meta_cow_start(old_alloc) ||
+ start >= l2meta_cow_end(old_alloc)))
+ {
+ /*
+ * Clusters intersect but COW areas don't. And cluster itself is
+ * already allocated. So, there is no actual conflict.
+ */
+ continue;
+ }
+
+ /* Conflict */
+
+ if (start < old_start) {
+ /* Stop at the start of a running allocation */
+ bytes = old_start - start;
} else {
- if (start < old_start) {
- /* Stop at the start of a running allocation */
- bytes = old_start - start;
- } else {
- bytes = 0;
- }
+ bytes = 0;
+ }
- /* Stop if already an l2meta exists. After yielding, it wouldn't
- * be valid any more, so we'd have to clean up the old L2Metas
- * and deal with requests depending on them before starting to
- * gather new ones. Not worth the trouble. */
- if (bytes == 0 && *m) {
- *cur_bytes = 0;
- return 0;
- }
+ /*
+ * Stop if an l2meta already exists. After yielding, it wouldn't
+ * be valid any more, so we'd have to clean up the old L2Metas
+ * and deal with requests depending on them before starting to
+ * gather new ones. Not worth the trouble.
+ */
+ if (bytes == 0 && *m) {
+ *cur_bytes = 0;
+ return 0;
+ }
- if (bytes == 0) {
- /* Wait for the dependency to complete. We need to recheck
- * the free/allocated clusters when we continue. */
- qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
- return -EAGAIN;
- }
+ if (bytes == 0) {
+ /*
+ * Wait for the dependency to complete. We need to recheck
+ * the free/allocated clusters when we continue.
+ */
+ qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
+ return -EAGAIN;
}
}
out:
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
- if (ret < 0 && *m && (*m)->nb_clusters > 0) {
- QLIST_REMOVE(*m, next_in_flight);
- }
return ret;
}
/*
- * alloc_cluster_offset
+ * For a given area on the virtual disk defined by @offset and @bytes,
+ * find the corresponding area on the qcow2 image, allocating new
+ * clusters (or subclusters) if necessary. The result can span a
+ * combination of allocated and previously unallocated clusters.
*
- * For a given offset on the virtual disk, find the cluster offset in qcow2
- * file. If the offset is not found, allocate a new cluster.
+ * Note that offset may not be cluster aligned. In this case, the returned
+ * *host_offset points to exact byte referenced by offset and therefore
+ * isn't cluster aligned as well.
*
- * If the cluster was already allocated, m->nb_clusters is set to 0 and
- * other fields in m are meaningless.
+ * On return, @host_offset is set to the beginning of the requested
+ * area. This area is guaranteed to be contiguous on the qcow2 file
+ * but it can be smaller than initially requested. In this case @bytes
+ * is updated with the actual size.
*
- * If the cluster is newly allocated, m->nb_clusters is set to the number of
- * contiguous clusters that have been allocated. In this case, the other
- * fields of m are valid and contain information about the first allocated
- * cluster.
+ * If any clusters or subclusters were allocated then @m contains a
+ * list with the information of all the affected regions. Note that
+ * this can happen regardless of whether this function succeeds or
+ * not. The caller is responsible for updating the L2 metadata of the
+ * allocated clusters (on success) or freeing them (on failure), and
+ * for clearing the contents of @m afterwards in both cases.
*
* If the request conflicts with another write request in flight, the coroutine
* is queued and will be reentered when the dependency has completed.
*
* Return 0 on success and -errno in error cases
*/
-int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
- unsigned int *bytes, uint64_t *host_offset,
- QCowL2Meta **m)
+int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
+ unsigned int *bytes, uint64_t *host_offset,
+ QCowL2Meta **m)
{
BDRVQcow2State *s = bs->opaque;
uint64_t start, remaining;
while (true) {
if (*host_offset == INV_OFFSET && cluster_offset != INV_OFFSET) {
- *host_offset = start_of_cluster(s, cluster_offset);
+ *host_offset = cluster_offset;
}
assert(remaining >= cur_bytes);
*bytes -= remaining;
assert(*bytes > 0);
assert(*host_offset != INV_OFFSET);
+ assert(offset_into_cluster(s, *host_offset) ==
+ offset_into_cluster(s, offset));
return 0;
}
assert(nb_clusters <= INT_MAX);
for (i = 0; i < nb_clusters; i++) {
- uint64_t old_l2_entry;
-
- old_l2_entry = get_l2_entry(s, l2_slice, l2_index + i);
+ uint64_t old_l2_entry = get_l2_entry(s, l2_slice, l2_index + i);
+ uint64_t old_l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i);
+ uint64_t new_l2_entry = old_l2_entry;
+ uint64_t new_l2_bitmap = old_l2_bitmap;
+ QCow2ClusterType cluster_type =
+ qcow2_get_cluster_type(bs, old_l2_entry);
/*
+ * If full_discard is true, the cluster should not read back as zeroes,
+ * but rather fall through to the backing file.
+ *
* If full_discard is false, make sure that a discarded area reads back
* as zeroes for v3 images (we cannot do it for v2 without actually
* writing a zero-filled buffer). We can skip the operation if the
*
* TODO We might want to use bdrv_block_status(bs) here, but we're
* holding s->lock, so that doesn't work today.
- *
- * If full_discard is true, the sector should not read back as zeroes,
- * but rather fall through to the backing file.
*/
- switch (qcow2_get_cluster_type(bs, old_l2_entry)) {
- case QCOW2_CLUSTER_UNALLOCATED:
- if (full_discard || !bs->backing) {
- continue;
- }
- break;
-
- case QCOW2_CLUSTER_ZERO_PLAIN:
- if (!full_discard) {
- continue;
+ if (full_discard) {
+ new_l2_entry = new_l2_bitmap = 0;
+ } else if (bs->backing || qcow2_cluster_is_allocated(cluster_type)) {
+ if (has_subclusters(s)) {
+ new_l2_entry = 0;
+ new_l2_bitmap = QCOW_L2_BITMAP_ALL_ZEROES;
+ } else {
+ new_l2_entry = s->qcow_version >= 3 ? QCOW_OFLAG_ZERO : 0;
}
- break;
-
- case QCOW2_CLUSTER_ZERO_ALLOC:
- case QCOW2_CLUSTER_NORMAL:
- case QCOW2_CLUSTER_COMPRESSED:
- break;
+ }
- default:
- abort();
+ if (old_l2_entry == new_l2_entry && old_l2_bitmap == new_l2_bitmap) {
+ continue;
}
/* First remove L2 entries */
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
- if (!full_discard && s->qcow_version >= 3) {
- set_l2_entry(s, l2_slice, l2_index + i, QCOW_OFLAG_ZERO);
- } else {
- set_l2_entry(s, l2_slice, l2_index + i, 0);
+ set_l2_entry(s, l2_slice, l2_index + i, new_l2_entry);
+ if (has_subclusters(s)) {
+ set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap);
}
-
/* Then decrease the refcount */
- qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
+ qcow2_free_any_cluster(bs, old_l2_entry, type);
}
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
continue;
}
+ /* First update L2 entries */
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
- if (unmap) {
- qcow2_free_any_clusters(bs, old_l2_entry, 1, QCOW2_DISCARD_REQUEST);
- }
set_l2_entry(s, l2_slice, l2_index + i, new_l2_entry);
if (has_subclusters(s)) {
set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap);
}
+
+ /* Then decrease the refcount */
+ if (unmap) {
+ qcow2_free_any_cluster(bs, old_l2_entry, QCOW2_DISCARD_REQUEST);
+ }
}
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
return nb_clusters;
}
-int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
- uint64_t bytes, int flags)
+static int zero_l2_subclusters(BlockDriverState *bs, uint64_t offset,
+ unsigned nb_subclusters)
+{
+ BDRVQcow2State *s = bs->opaque;
+ uint64_t *l2_slice;
+ uint64_t old_l2_bitmap, l2_bitmap;
+ int l2_index, ret, sc = offset_to_sc_index(s, offset);
+
+ /* For full clusters use zero_in_l2_slice() instead */
+ assert(nb_subclusters > 0 && nb_subclusters < s->subclusters_per_cluster);
+ assert(sc + nb_subclusters <= s->subclusters_per_cluster);
+ assert(offset_into_subcluster(s, offset) == 0);
+
+ ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
+ if (ret < 0) {
+ return ret;
+ }
+
+ switch (qcow2_get_cluster_type(bs, get_l2_entry(s, l2_slice, l2_index))) {
+ case QCOW2_CLUSTER_COMPRESSED:
+ ret = -ENOTSUP; /* We cannot partially zeroize compressed clusters */
+ goto out;
+ case QCOW2_CLUSTER_NORMAL:
+ case QCOW2_CLUSTER_UNALLOCATED:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ old_l2_bitmap = l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index);
+
+ l2_bitmap |= QCOW_OFLAG_SUB_ZERO_RANGE(sc, sc + nb_subclusters);
+ l2_bitmap &= ~QCOW_OFLAG_SUB_ALLOC_RANGE(sc, sc + nb_subclusters);
+
+ if (old_l2_bitmap != l2_bitmap) {
+ set_l2_bitmap(s, l2_slice, l2_index, l2_bitmap);
+ qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
+ }
+
+ ret = 0;
+out:
+ qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
+
+ return ret;
+}
+
+int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset,
+ uint64_t bytes, int flags)
{
BDRVQcow2State *s = bs->opaque;
uint64_t end_offset = offset + bytes;
uint64_t nb_clusters;
+ unsigned head, tail;
int64_t cleared;
int ret;
}
/* Caller must pass aligned values, except at image end */
- assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
- assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
+ assert(offset_into_subcluster(s, offset) == 0);
+ assert(offset_into_subcluster(s, end_offset) == 0 ||
end_offset >= bs->total_sectors << BDRV_SECTOR_BITS);
/*
return -ENOTSUP;
}
- /* Each L2 slice is handled by its own loop iteration */
- nb_clusters = size_to_clusters(s, bytes);
+ head = MIN(end_offset, ROUND_UP(offset, s->cluster_size)) - offset;
+ offset += head;
+
+ tail = (end_offset >= bs->total_sectors << BDRV_SECTOR_BITS) ? 0 :
+ end_offset - MAX(offset, start_of_cluster(s, end_offset));
+ end_offset -= tail;
s->cache_discards = true;
+ if (head) {
+ ret = zero_l2_subclusters(bs, offset - head,
+ size_to_subclusters(s, head));
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+
+ /* Each L2 slice is handled by its own loop iteration */
+ nb_clusters = size_to_clusters(s, end_offset - offset);
+
while (nb_clusters > 0) {
cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags);
if (cleared < 0) {
offset += (cleared * s->cluster_size);
}
+ if (tail) {
+ ret = zero_l2_subclusters(bs, end_offset, size_to_subclusters(s, tail));
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+
ret = 0;
fail:
s->cache_discards = false;
int ret;
int i, j;
+ /* qcow2_downgrade() is not allowed in images with subclusters */
+ assert(!has_subclusters(s));
+
slice_size2 = s->l2_slice_size * l2_entry_size(s);
n_slices = s->cluster_size / slice_size2;
if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
if (!bs->backing) {
- /* not backed; therefore we can simply deallocate the
- * cluster */
+ /*
+ * not backed; therefore we can simply deallocate the
+ * cluster. No need to call set_l2_bitmap(), this
+ * function doesn't support images with subclusters.
+ */
set_l2_entry(s, l2_slice, j, 0);
l2_dirty = true;
continue;
} else {
set_l2_entry(s, l2_slice, j, offset);
}
+ /*
+ * No need to call set_l2_bitmap() after set_l2_entry() because
+ * this function doesn't support images with subclusters.
+ */
l2_dirty = true;
}
Error *local_err = NULL;
ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset,
- s->snapshots[i].l1_size, sizeof(uint64_t),
+ s->snapshots[i].l1_size, L1E_SIZE,
QCOW_MAX_L1_SIZE, "Snapshot L1 table",
&local_err);
if (ret < 0) {
goto fail;
}
- l1_size2 = s->snapshots[i].l1_size * sizeof(uint64_t);
+ l1_size2 = s->snapshots[i].l1_size * L1E_SIZE;
new_l1_table = g_try_realloc(l1_table, l1_size2);
if (!new_l1_table) {
g_free(l1_table);
return ret;
}
+
+void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
+ uint64_t *coffset, int *csize)
+{
+ BDRVQcow2State *s = bs->opaque;
+ int nb_csectors;
+
+ assert(qcow2_get_cluster_type(bs, l2_entry) == QCOW2_CLUSTER_COMPRESSED);
+
+ *coffset = l2_entry & s->cluster_offset_mask;
+
+ nb_csectors = ((l2_entry >> s->csize_shift) & s->csize_mask) + 1;
+ *csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE -
+ (*coffset & (QCOW2_COMPRESSED_SECTOR_SIZE - 1));
+}