#include "qapi/error.h"
#include "qemu-common.h"
#include "block/block_int.h"
-#include "block/qcow2.h"
+#include "qcow2.h"
#include "qemu/range.h"
#include "qemu/bswap.h"
#include "qemu/cutils.h"
-static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size);
+static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size,
+ uint64_t max);
static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
int64_t offset, int64_t length, uint64_t addend,
bool decrease, enum qcow2_discard_type type);
block_index = cluster_index & (s->refcount_block_size - 1);
*refcount = s->get_refcount(refcount_block, block_index);
- qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
+ qcow2_cache_put(s->refcount_block_cache, &refcount_block);
return 0;
}
}
/* Allocate the refcount block itself and mark it as used */
- int64_t new_block = alloc_clusters_noref(bs, s->cluster_size);
+ int64_t new_block = alloc_clusters_noref(bs, s->cluster_size, INT64_MAX);
if (new_block < 0) {
return new_block;
}
+ /* The offset must fit in the offset field of the refcount table entry */
+ assert((new_block & REFT_OFFSET_MASK) == new_block);
+
/* If we're allocating the block at offset 0 then something is wrong */
if (new_block == 0) {
qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid "
/* Now the new refcount block needs to be written to disk */
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE);
- qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache, *refcount_block);
+ qcow2_cache_entry_mark_dirty(s->refcount_block_cache, *refcount_block);
ret = qcow2_cache_flush(bs, s->refcount_block_cache);
if (ret < 0) {
goto fail;
return -EAGAIN;
}
- qcow2_cache_put(bs, s->refcount_block_cache, refcount_block);
+ qcow2_cache_put(s->refcount_block_cache, refcount_block);
/*
* If we come here, we need to grow the refcount table. Again, a new
fail:
if (*refcount_block != NULL) {
- qcow2_cache_put(bs, s->refcount_block_cache, refcount_block);
+ qcow2_cache_put(s->refcount_block_cache, refcount_block);
}
return ret;
}
goto fail;
}
memset(refblock_data, 0, s->cluster_size);
- qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache,
+ qcow2_cache_entry_mark_dirty(s->refcount_block_cache,
refblock_data);
new_table[i] = block_offset;
s->set_refcount(refblock_data, j, 1);
}
- qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache,
+ qcow2_cache_entry_mark_dirty(s->refcount_block_cache,
refblock_data);
}
- qcow2_cache_put(bs, s->refcount_block_cache, &refblock_data);
+ qcow2_cache_put(s->refcount_block_cache, &refblock_data);
}
assert(block_offset == table_offset);
/* Discard is optional, ignore the return value */
if (ret >= 0) {
- bdrv_pdiscard(bs->file->bs, d->offset, d->bytes);
+ bdrv_pdiscard(bs->file, d->offset, d->bytes);
}
g_free(d);
/* Load the refcount block and allocate it if needed */
if (table_index != old_table_index) {
if (refcount_block) {
- qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
+ qcow2_cache_put(s->refcount_block_cache, &refcount_block);
}
ret = alloc_refcount_block(bs, cluster_index, &refcount_block);
+ /* If the caller needs to restart the search for free clusters,
+ * try the same ones first to see if they're still free. */
+ if (ret == -EAGAIN) {
+ if (s->free_cluster_index > (start >> s->cluster_bits)) {
+ s->free_cluster_index = (start >> s->cluster_bits);
+ }
+ }
if (ret < 0) {
goto fail;
}
}
old_table_index = table_index;
- qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache,
- refcount_block);
+ qcow2_cache_entry_mark_dirty(s->refcount_block_cache, refcount_block);
/* we can update the count and save it */
block_index = cluster_index & (s->refcount_block_size - 1);
if (refcount == 0) {
void *table;
- table = qcow2_cache_is_table_offset(bs, s->refcount_block_cache,
+ table = qcow2_cache_is_table_offset(s->refcount_block_cache,
offset);
if (table != NULL) {
- qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
- qcow2_cache_discard(bs, s->refcount_block_cache, table);
+ qcow2_cache_put(s->refcount_block_cache, &refcount_block);
+ qcow2_cache_discard(s->refcount_block_cache, table);
}
- table = qcow2_cache_is_table_offset(bs, s->l2_table_cache, offset);
+ table = qcow2_cache_is_table_offset(s->l2_table_cache, offset);
if (table != NULL) {
- qcow2_cache_discard(bs, s->l2_table_cache, table);
+ qcow2_cache_discard(s->l2_table_cache, table);
}
if (s->discard_passthrough[type]) {
/* Write last changed block to disk */
if (refcount_block) {
- qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
+ qcow2_cache_put(s->refcount_block_cache, &refcount_block);
}
/*
/* return < 0 if error */
-static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size)
+static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size,
+ uint64_t max)
{
BDRVQcow2State *s = bs->opaque;
uint64_t i, nb_clusters, refcount;
}
/* Make sure that all offsets in the "allocated" range are representable
- * in an int64_t */
+ * in the requested max */
if (s->free_cluster_index > 0 &&
- s->free_cluster_index - 1 > (INT64_MAX >> s->cluster_bits))
+ s->free_cluster_index - 1 > (max >> s->cluster_bits))
{
return -EFBIG;
}
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
do {
- offset = alloc_clusters_noref(bs, size);
+ offset = alloc_clusters_noref(bs, size, QCOW_MAX_CLUSTER_OFFSET);
if (offset < 0) {
return offset;
}
free_in_cluster = s->cluster_size - offset_into_cluster(s, offset);
do {
if (!offset || free_in_cluster < size) {
- int64_t new_cluster = alloc_clusters_noref(bs, s->cluster_size);
+ int64_t new_cluster;
+
+ new_cluster = alloc_clusters_noref(bs, s->cluster_size,
+ MIN(s->cluster_offset_mask,
+ QCOW_MAX_CLUSTER_OFFSET));
if (new_cluster < 0) {
return new_cluster;
}
int nb_clusters, enum qcow2_discard_type type)
{
BDRVQcow2State *s = bs->opaque;
+ QCow2ClusterType ctype = qcow2_get_cluster_type(bs, l2_entry);
+
+ if (has_data_file(bs)) {
+ if (s->discard_passthrough[type] &&
+ (ctype == QCOW2_CLUSTER_NORMAL ||
+ ctype == QCOW2_CLUSTER_ZERO_ALLOC))
+ {
+ bdrv_pdiscard(s->data_file, l2_entry & L2E_OFFSET_MASK,
+ nb_clusters << s->cluster_bits);
+ }
+ return;
+ }
- switch (qcow2_get_cluster_type(l2_entry)) {
+ switch (ctype) {
case QCOW2_CLUSTER_COMPRESSED:
{
int nb_csectors;
}
}
+int coroutine_fn qcow2_write_caches(BlockDriverState *bs)
+{
+ BDRVQcow2State *s = bs->opaque;
+ int ret;
+
+ ret = qcow2_cache_write(bs, s->l2_table_cache);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (qcow2_need_accurate_refcounts(s)) {
+ ret = qcow2_cache_write(bs, s->refcount_block_cache);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int coroutine_fn qcow2_flush_caches(BlockDriverState *bs)
+{
+ int ret = qcow2_write_caches(bs);
+ if (ret < 0) {
+ return ret;
+ }
+ return bdrv_flush(bs->file->bs);
+}
/*********************************************************/
/* snapshots and image creation */
int64_t l1_table_offset, int l1_size, int addend)
{
BDRVQcow2State *s = bs->opaque;
- uint64_t *l1_table, *l2_table, l2_offset, entry, l1_size2, refcount;
+ uint64_t *l1_table, *l2_slice, l2_offset, entry, l1_size2, refcount;
bool l1_allocated = false;
int64_t old_entry, old_l2_offset;
+ unsigned slice, slice_size2, n_slices;
int i, j, l1_modified = 0, nb_csectors;
int ret;
assert(addend >= -1 && addend <= 1);
- l2_table = NULL;
+ l2_slice = NULL;
l1_table = NULL;
l1_size2 = l1_size * sizeof(uint64_t);
+ slice_size2 = s->l2_slice_size * sizeof(uint64_t);
+ n_slices = s->cluster_size / slice_size2;
s->cache_discards = true;
* l1_table_offset when it is the current s->l1_table_offset! Be careful
* when changing this! */
if (l1_table_offset != s->l1_table_offset) {
- l1_table = g_try_malloc0(align_offset(l1_size2, 512));
+ l1_table = g_try_malloc0(ROUND_UP(l1_size2, 512));
if (l1_size2 && l1_table == NULL) {
ret = -ENOMEM;
goto fail;
goto fail;
}
- ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
- (void**) &l2_table);
- if (ret < 0) {
- goto fail;
- }
+ for (slice = 0; slice < n_slices; slice++) {
+ ret = qcow2_cache_get(bs, s->l2_table_cache,
+ l2_offset + slice * slice_size2,
+ (void **) &l2_slice);
+ if (ret < 0) {
+ goto fail;
+ }
- for (j = 0; j < s->l2_size; j++) {
- uint64_t cluster_index;
- uint64_t offset;
-
- entry = be64_to_cpu(l2_table[j]);
- old_entry = entry;
- entry &= ~QCOW_OFLAG_COPIED;
- offset = entry & L2E_OFFSET_MASK;
-
- switch (qcow2_get_cluster_type(entry)) {
- case QCOW2_CLUSTER_COMPRESSED:
- nb_csectors = ((entry >> s->csize_shift) &
- s->csize_mask) + 1;
- if (addend != 0) {
- ret = update_refcount(bs,
- (entry & s->cluster_offset_mask) & ~511,
+ for (j = 0; j < s->l2_slice_size; j++) {
+ uint64_t cluster_index;
+ uint64_t offset;
+
+ entry = be64_to_cpu(l2_slice[j]);
+ old_entry = entry;
+ entry &= ~QCOW_OFLAG_COPIED;
+ offset = entry & L2E_OFFSET_MASK;
+
+ switch (qcow2_get_cluster_type(bs, entry)) {
+ case QCOW2_CLUSTER_COMPRESSED:
+ nb_csectors = ((entry >> s->csize_shift) &
+ s->csize_mask) + 1;
+ if (addend != 0) {
+ ret = update_refcount(
+ bs, (entry & s->cluster_offset_mask) & ~511,
nb_csectors * 512, abs(addend), addend < 0,
QCOW2_DISCARD_SNAPSHOT);
- if (ret < 0) {
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+ /* compressed clusters are never modified */
+ refcount = 2;
+ break;
+
+ case QCOW2_CLUSTER_NORMAL:
+ case QCOW2_CLUSTER_ZERO_ALLOC:
+ if (offset_into_cluster(s, offset)) {
+ /* Here l2_index means table (not slice) index */
+ int l2_index = slice * s->l2_slice_size + j;
+ qcow2_signal_corruption(
+ bs, true, -1, -1, "Cluster "
+ "allocation offset %#" PRIx64
+ " unaligned (L2 offset: %#"
+ PRIx64 ", L2 index: %#x)",
+ offset, l2_offset, l2_index);
+ ret = -EIO;
goto fail;
}
- }
- /* compressed clusters are never modified */
- refcount = 2;
- break;
-
- case QCOW2_CLUSTER_NORMAL:
- case QCOW2_CLUSTER_ZERO_ALLOC:
- if (offset_into_cluster(s, offset)) {
- qcow2_signal_corruption(bs, true, -1, -1, "Cluster "
- "allocation offset %#" PRIx64
- " unaligned (L2 offset: %#"
- PRIx64 ", L2 index: %#x)",
- offset, l2_offset, j);
- ret = -EIO;
- goto fail;
- }
- cluster_index = offset >> s->cluster_bits;
- assert(cluster_index);
- if (addend != 0) {
- ret = qcow2_update_cluster_refcount(bs,
- cluster_index, abs(addend), addend < 0,
- QCOW2_DISCARD_SNAPSHOT);
+ cluster_index = offset >> s->cluster_bits;
+ assert(cluster_index);
+ if (addend != 0) {
+ ret = qcow2_update_cluster_refcount(
+ bs, cluster_index, abs(addend), addend < 0,
+ QCOW2_DISCARD_SNAPSHOT);
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+
+ ret = qcow2_get_refcount(bs, cluster_index, &refcount);
if (ret < 0) {
goto fail;
}
- }
+ break;
- ret = qcow2_get_refcount(bs, cluster_index, &refcount);
- if (ret < 0) {
- goto fail;
- }
- break;
-
- case QCOW2_CLUSTER_ZERO_PLAIN:
- case QCOW2_CLUSTER_UNALLOCATED:
- refcount = 0;
- break;
+ case QCOW2_CLUSTER_ZERO_PLAIN:
+ case QCOW2_CLUSTER_UNALLOCATED:
+ refcount = 0;
+ break;
- default:
- abort();
- }
+ default:
+ abort();
+ }
- if (refcount == 1) {
- entry |= QCOW_OFLAG_COPIED;
- }
- if (entry != old_entry) {
- if (addend > 0) {
- qcow2_cache_set_dependency(bs, s->l2_table_cache,
- s->refcount_block_cache);
+ if (refcount == 1) {
+ entry |= QCOW_OFLAG_COPIED;
+ }
+ if (entry != old_entry) {
+ if (addend > 0) {
+ qcow2_cache_set_dependency(bs, s->l2_table_cache,
+ s->refcount_block_cache);
+ }
+ l2_slice[j] = cpu_to_be64(entry);
+ qcow2_cache_entry_mark_dirty(s->l2_table_cache,
+ l2_slice);
}
- l2_table[j] = cpu_to_be64(entry);
- qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache,
- l2_table);
}
- }
- qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
+ qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
+ }
if (addend != 0) {
ret = qcow2_update_cluster_refcount(bs, l2_offset >>
ret = bdrv_flush(bs);
fail:
- if (l2_table) {
- qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
+ if (l2_slice) {
+ qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
}
s->cache_discards = false;
for(i = 0; i < s->l2_size; i++) {
l2_entry = be64_to_cpu(l2_table[i]);
- switch (qcow2_get_cluster_type(l2_entry)) {
+ switch (qcow2_get_cluster_type(bs, l2_entry)) {
case QCOW2_CLUSTER_COMPRESSED:
/* Compressed clusters don't have QCOW_OFLAG_COPIED */
if (l2_entry & QCOW_OFLAG_COPIED) {
- fprintf(stderr, "ERROR: cluster %" PRId64 ": "
+ fprintf(stderr, "ERROR: coffset=0x%" PRIx64 ": "
"copied flag must never be set for compressed "
- "clusters\n", l2_entry >> s->cluster_bits);
+ "clusters\n", l2_entry & s->cluster_offset_mask);
l2_entry &= ~QCOW_OFLAG_COPIED;
res->corruptions++;
}
/* Correct offsets are cluster aligned */
if (offset_into_cluster(s, offset)) {
- if (qcow2_get_cluster_type(l2_entry) ==
+ if (qcow2_get_cluster_type(bs, l2_entry) ==
QCOW2_CLUSTER_ZERO_ALLOC)
{
fprintf(stderr, "%s offset=%" PRIx64 ": Preallocated zero "
l2_table[i] = cpu_to_be64(l2_entry);
ret = qcow2_pre_write_overlap_check(bs,
QCOW2_OL_ACTIVE_L2 | QCOW2_OL_INACTIVE_L2,
- l2e_offset, sizeof(uint64_t));
+ l2e_offset, sizeof(uint64_t), false);
if (ret < 0) {
fprintf(stderr, "ERROR: Overlap check failed\n");
res->check_errors++;
int ret;
uint64_t refcount;
int i, j;
+ bool repair;
+
+ if (fix & BDRV_FIX_ERRORS) {
+ /* Always repair */
+ repair = true;
+ } else if (fix & BDRV_FIX_LEAKS) {
+ /* Repair only if that seems safe: This function is always
+ * called after the refcounts have been fixed, so the refcount
+ * is accurate if that repair was successful */
+ repair = !res->check_errors && !res->corruptions && !res->leaks;
+ } else {
+ repair = false;
+ }
for (i = 0; i < s->l1_size; i++) {
uint64_t l1_entry = s->l1_table[i];
if ((refcount == 1) != ((l1_entry & QCOW_OFLAG_COPIED) != 0)) {
fprintf(stderr, "%s OFLAG_COPIED L2 cluster: l1_index=%d "
"l1_entry=%" PRIx64 " refcount=%" PRIu64 "\n",
- fix & BDRV_FIX_ERRORS ? "Repairing" :
- "ERROR",
- i, l1_entry, refcount);
- if (fix & BDRV_FIX_ERRORS) {
+ repair ? "Repairing" : "ERROR", i, l1_entry, refcount);
+ if (repair) {
s->l1_table[i] = refcount == 1
? l1_entry | QCOW_OFLAG_COPIED
: l1_entry & ~QCOW_OFLAG_COPIED;
for (j = 0; j < s->l2_size; j++) {
uint64_t l2_entry = be64_to_cpu(l2_table[j]);
uint64_t data_offset = l2_entry & L2E_OFFSET_MASK;
- QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry);
+ QCow2ClusterType cluster_type = qcow2_get_cluster_type(bs, l2_entry);
if (cluster_type == QCOW2_CLUSTER_NORMAL ||
cluster_type == QCOW2_CLUSTER_ZERO_ALLOC) {
if ((refcount == 1) != ((l2_entry & QCOW_OFLAG_COPIED) != 0)) {
fprintf(stderr, "%s OFLAG_COPIED data cluster: "
"l2_entry=%" PRIx64 " refcount=%" PRIu64 "\n",
- fix & BDRV_FIX_ERRORS ? "Repairing" :
- "ERROR",
- l2_entry, refcount);
- if (fix & BDRV_FIX_ERRORS) {
+ repair ? "Repairing" : "ERROR", l2_entry, refcount);
+ if (repair) {
l2_table[j] = cpu_to_be64(refcount == 1
? l2_entry | QCOW_OFLAG_COPIED
: l2_entry & ~QCOW_OFLAG_COPIED);
if (l2_dirty) {
ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2,
- l2_offset, s->cluster_size);
+ l2_offset, s->cluster_size,
+ false);
if (ret < 0) {
fprintf(stderr, "ERROR: Could not write L2 table; metadata "
"overlap check failed: %s\n", strerror(-ret));
/* snapshots */
for (i = 0; i < s->nb_snapshots; i++) {
sn = s->snapshots + i;
+ if (offset_into_cluster(s, sn->l1_table_offset)) {
+ fprintf(stderr, "ERROR snapshot %s (%s) l1_offset=%#" PRIx64 ": "
+ "L1 table is not cluster aligned; snapshot table entry "
+ "corrupted\n", sn->id_str, sn->name, sn->l1_table_offset);
+ res->corruptions++;
+ continue;
+ }
+ if (sn->l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
+ fprintf(stderr, "ERROR snapshot %s (%s) l1_size=%#" PRIx32 ": "
+ "L1 table is too large; snapshot table entry corrupted\n",
+ sn->id_str, sn->name, sn->l1_size);
+ res->corruptions++;
+ continue;
+ }
ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
sn->l1_table_offset, sn->l1_size, 0, fix);
if (ret < 0) {
}
ret = qcow2_pre_write_overlap_check(bs, 0, refblock_offset,
- s->cluster_size);
+ s->cluster_size, false);
if (ret < 0) {
fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
goto fail;
}
ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset,
- reftable_size * sizeof(uint64_t));
+ reftable_size * sizeof(uint64_t),
+ false);
if (ret < 0) {
fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
goto fail;
}
/* align range to test to cluster boundaries */
- size = align_offset(offset_into_cluster(s, offset) + size, s->cluster_size);
+ size = ROUND_UP(offset_into_cluster(s, offset) + size, s->cluster_size);
offset = start_of_cluster(s, offset);
if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) {
uint64_t l1_ofs = s->snapshots[i].l1_table_offset;
uint32_t l1_sz = s->snapshots[i].l1_size;
uint64_t l1_sz2 = l1_sz * sizeof(uint64_t);
- uint64_t *l1 = g_try_malloc(l1_sz2);
+ uint64_t *l1;
int ret;
+ ret = qcow2_validate_table(bs, l1_ofs, l1_sz, sizeof(uint64_t),
+ QCOW_MAX_L1_SIZE, "", NULL);
+ if (ret < 0) {
+ return ret;
+ }
+
+ l1 = g_try_malloc(l1_sz2);
+
if (l1_sz2 && l1 == NULL) {
return -ENOMEM;
}
}
}
+ if ((chk & QCOW2_OL_BITMAP_DIRECTORY) &&
+ (s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS))
+ {
+ if (overlaps_with(s->bitmap_directory_offset,
+ s->bitmap_directory_size))
+ {
+ return QCOW2_OL_BITMAP_DIRECTORY;
+ }
+ }
+
return 0;
}
static const char *metadata_ol_names[] = {
- [QCOW2_OL_MAIN_HEADER_BITNR] = "qcow2_header",
- [QCOW2_OL_ACTIVE_L1_BITNR] = "active L1 table",
- [QCOW2_OL_ACTIVE_L2_BITNR] = "active L2 table",
- [QCOW2_OL_REFCOUNT_TABLE_BITNR] = "refcount table",
- [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = "refcount block",
- [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = "snapshot table",
- [QCOW2_OL_INACTIVE_L1_BITNR] = "inactive L1 table",
- [QCOW2_OL_INACTIVE_L2_BITNR] = "inactive L2 table",
+ [QCOW2_OL_MAIN_HEADER_BITNR] = "qcow2_header",
+ [QCOW2_OL_ACTIVE_L1_BITNR] = "active L1 table",
+ [QCOW2_OL_ACTIVE_L2_BITNR] = "active L2 table",
+ [QCOW2_OL_REFCOUNT_TABLE_BITNR] = "refcount table",
+ [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = "refcount block",
+ [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = "snapshot table",
+ [QCOW2_OL_INACTIVE_L1_BITNR] = "inactive L1 table",
+ [QCOW2_OL_INACTIVE_L2_BITNR] = "inactive L2 table",
+ [QCOW2_OL_BITMAP_DIRECTORY_BITNR] = "bitmap directory",
};
+QEMU_BUILD_BUG_ON(QCOW2_OL_MAX_BITNR != ARRAY_SIZE(metadata_ol_names));
/*
* First performs a check for metadata overlaps (through
* overlaps; or a negative value (-errno) on error.
*/
int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
- int64_t size)
+ int64_t size, bool data_file)
{
- int ret = qcow2_check_metadata_overlap(bs, ign, offset, size);
+ int ret;
+
+ if (data_file && has_data_file(bs)) {
+ return 0;
+ }
+ ret = qcow2_check_metadata_overlap(bs, ign, offset, size);
if (ret < 0) {
return ret;
} else if (ret > 0) {
if (reftable_index < *reftable_size && (*reftable)[reftable_index]) {
offset = (*reftable)[reftable_index];
- ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size);
+ ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size,
+ false);
if (ret < 0) {
error_setg_errno(errp, -ret, "Overlap check failed");
return ret;
new_reftable_size, new_refblock,
new_refblock_empty, allocated, errp);
if (ret < 0) {
- qcow2_cache_put(bs, s->refcount_block_cache, &refblock);
+ qcow2_cache_put(s->refcount_block_cache, &refblock);
return ret;
}
if (new_refcount_bits < 64 && refcount >> new_refcount_bits) {
uint64_t offset;
- qcow2_cache_put(bs, s->refcount_block_cache, &refblock);
+ qcow2_cache_put(s->refcount_block_cache, &refblock);
offset = ((reftable_index << s->refcount_block_bits)
+ refblock_index) << s->cluster_bits;
new_refblock_empty = new_refblock_empty && refcount == 0;
}
- qcow2_cache_put(bs, s->refcount_block_cache, &refblock);
+ qcow2_cache_put(s->refcount_block_cache, &refblock);
} else {
/* No refblock means every refcount is 0 */
for (refblock_index = 0; refblock_index < s->refcount_block_size;
/* Write the new reftable */
ret = qcow2_pre_write_overlap_check(bs, 0, new_reftable_offset,
- new_reftable_size * sizeof(uint64_t));
+ new_reftable_size * sizeof(uint64_t),
+ false);
if (ret < 0) {
error_setg_errno(errp, -ret, "Overlap check failed");
goto done;
offset_to_reftable_index(s, discard_block_offs),
discard_block_offs,
s->get_refcount(refblock, block_index));
- qcow2_cache_put(bs, s->refcount_block_cache, &refblock);
+ qcow2_cache_put(s->refcount_block_cache, &refblock);
return -EINVAL;
}
s->set_refcount(refblock, block_index, 0);
- qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache, refblock);
+ qcow2_cache_entry_mark_dirty(s->refcount_block_cache, refblock);
- qcow2_cache_put(bs, s->refcount_block_cache, &refblock);
+ qcow2_cache_put(s->refcount_block_cache, &refblock);
if (cluster_index < s->free_cluster_index) {
s->free_cluster_index = cluster_index;
}
- refblock = qcow2_cache_is_table_offset(bs, s->refcount_block_cache,
+ refblock = qcow2_cache_is_table_offset(s->refcount_block_cache,
discard_block_offs);
if (refblock) {
/* discard refblock from the cache if refblock is cached */
- qcow2_cache_discard(bs, s->refcount_block_cache, refblock);
+ qcow2_cache_discard(s->refcount_block_cache, refblock);
}
update_refcount_discard(bs, discard_block_offs, s->cluster_size);
} else {
unused_block = buffer_is_zero(refblock, s->cluster_size);
}
- qcow2_cache_put(bs, s->refcount_block_cache, &refblock);
+ qcow2_cache_put(s->refcount_block_cache, &refblock);
reftable_tmp[i] = unused_block ? 0 : cpu_to_be64(s->refcount_table[i]);
}