/*********************************************************/
/* refcount handling */
+static void update_max_refcount_table_index(BDRVQcow2State *s)
+{
+ unsigned i = s->refcount_table_size - 1;
+ while (i > 0 && (s->refcount_table[i] & REFT_OFFSET_MASK) == 0) {
+ i--;
+ }
+ /* Set s->max_refcount_table_index to the index of the last used entry */
+ s->max_refcount_table_index = i;
+}
+
int qcow2_refcount_init(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
goto fail;
}
BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD);
- ret = bdrv_pread(bs->file->bs, s->refcount_table_offset,
+ ret = bdrv_pread(bs->file, s->refcount_table_offset,
s->refcount_table, refcount_table_size2);
if (ret < 0) {
goto fail;
}
for(i = 0; i < s->refcount_table_size; i++)
be64_to_cpus(&s->refcount_table[i]);
+ update_max_refcount_table_index(s);
}
return 0;
fail:
if (refcount_table_index < s->refcount_table_size) {
uint64_t data64 = cpu_to_be64(new_block);
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP);
- ret = bdrv_pwrite_sync(bs->file->bs,
+ ret = bdrv_pwrite_sync(bs->file,
s->refcount_table_offset + refcount_table_index * sizeof(uint64_t),
&data64, sizeof(data64));
if (ret < 0) {
}
s->refcount_table[refcount_table_index] = new_block;
+ /* If there's a hole in s->refcount_table then it can happen
+ * that refcount_table_index < s->max_refcount_table_index */
+ s->max_refcount_table_index =
+ MAX(s->max_refcount_table_index, refcount_table_index);
/* The new refcount block may be where the caller intended to put its
* data, so let it restart the search. */
/* Write refcount blocks to disk */
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS);
- ret = bdrv_pwrite_sync(bs->file->bs, meta_offset, new_blocks,
+ ret = bdrv_pwrite_sync(bs->file, meta_offset, new_blocks,
blocks_clusters * s->cluster_size);
g_free(new_blocks);
new_blocks = NULL;
}
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE);
- ret = bdrv_pwrite_sync(bs->file->bs, table_offset, new_table,
+ ret = bdrv_pwrite_sync(bs->file, table_offset, new_table,
table_size * sizeof(uint64_t));
if (ret < 0) {
goto fail_table;
uint64_t d64;
uint32_t d32;
} data;
- cpu_to_be64w(&data.d64, table_offset);
- cpu_to_be32w(&data.d32, table_clusters);
+ data.d64 = cpu_to_be64(table_offset);
+ data.d32 = cpu_to_be32(table_clusters);
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE);
- ret = bdrv_pwrite_sync(bs->file->bs,
+ ret = bdrv_pwrite_sync(bs->file,
offsetof(QCowHeader, refcount_table_offset),
&data, sizeof(data));
if (ret < 0) {
s->refcount_table = new_table;
s->refcount_table_size = table_size;
s->refcount_table_offset = table_offset;
+ update_max_refcount_table_index(s);
/* Free old table. */
qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t),
/* Discard is optional, ignore the return value */
if (ret >= 0) {
- bdrv_discard(bs->file->bs,
- d->offset >> BDRV_SECTOR_BITS,
- d->bytes >> BDRV_SECTOR_BITS);
+ bdrv_pdiscard(bs->file->bs, d->offset, d->bytes);
}
g_free(d);
}
break;
case QCOW2_CLUSTER_NORMAL:
- case QCOW2_CLUSTER_ZERO:
- if (l2_entry & L2E_OFFSET_MASK) {
- if (offset_into_cluster(s, l2_entry & L2E_OFFSET_MASK)) {
- qcow2_signal_corruption(bs, false, -1, -1,
- "Cannot free unaligned cluster %#llx",
- l2_entry & L2E_OFFSET_MASK);
- } else {
- qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK,
- nb_clusters << s->cluster_bits, type);
- }
+ case QCOW2_CLUSTER_ZERO_ALLOC:
+ if (offset_into_cluster(s, l2_entry & L2E_OFFSET_MASK)) {
+ qcow2_signal_corruption(bs, false, -1, -1,
+ "Cannot free unaligned cluster %#llx",
+ l2_entry & L2E_OFFSET_MASK);
+ } else {
+ qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK,
+ nb_clusters << s->cluster_bits, type);
}
break;
+ case QCOW2_CLUSTER_ZERO_PLAIN:
case QCOW2_CLUSTER_UNALLOCATED:
break;
default:
int64_t l1_table_offset, int l1_size, int addend)
{
BDRVQcow2State *s = bs->opaque;
- uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2, refcount;
+ uint64_t *l1_table, *l2_table, l2_offset, entry, l1_size2, refcount;
bool l1_allocated = false;
- int64_t old_offset, old_l2_offset;
+ int64_t old_entry, old_l2_offset;
int i, j, l1_modified = 0, nb_csectors;
int ret;
}
l1_allocated = true;
- ret = bdrv_pread(bs->file->bs, l1_table_offset, l1_table, l1_size2);
+ ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
if (ret < 0) {
goto fail;
}
- for(i = 0;i < l1_size; i++)
+ for (i = 0; i < l1_size; i++) {
be64_to_cpus(&l1_table[i]);
+ }
} else {
assert(l1_size == s->l1_size);
l1_table = s->l1_table;
l1_allocated = false;
}
- for(i = 0; i < l1_size; i++) {
+ for (i = 0; i < l1_size; i++) {
l2_offset = l1_table[i];
if (l2_offset) {
old_l2_offset = l2_offset;
goto fail;
}
- for(j = 0; j < s->l2_size; j++) {
+ for (j = 0; j < s->l2_size; j++) {
uint64_t cluster_index;
-
- offset = be64_to_cpu(l2_table[j]);
- old_offset = offset;
- offset &= ~QCOW_OFLAG_COPIED;
-
- switch (qcow2_get_cluster_type(offset)) {
- case QCOW2_CLUSTER_COMPRESSED:
- nb_csectors = ((offset >> s->csize_shift) &
- s->csize_mask) + 1;
- if (addend != 0) {
- ret = update_refcount(bs,
- (offset & s->cluster_offset_mask) & ~511,
+ uint64_t offset;
+
+ entry = be64_to_cpu(l2_table[j]);
+ old_entry = entry;
+ entry &= ~QCOW_OFLAG_COPIED;
+ offset = entry & L2E_OFFSET_MASK;
+
+ switch (qcow2_get_cluster_type(entry)) {
+ case QCOW2_CLUSTER_COMPRESSED:
+ nb_csectors = ((entry >> s->csize_shift) &
+ s->csize_mask) + 1;
+ if (addend != 0) {
+ ret = update_refcount(bs,
+ (entry & s->cluster_offset_mask) & ~511,
nb_csectors * 512, abs(addend), addend < 0,
QCOW2_DISCARD_SNAPSHOT);
- if (ret < 0) {
- goto fail;
- }
- }
- /* compressed clusters are never modified */
- refcount = 2;
- break;
-
- case QCOW2_CLUSTER_NORMAL:
- case QCOW2_CLUSTER_ZERO:
- if (offset_into_cluster(s, offset & L2E_OFFSET_MASK)) {
- qcow2_signal_corruption(bs, true, -1, -1, "Data "
- "cluster offset %#llx "
- "unaligned (L2 offset: %#"
- PRIx64 ", L2 index: %#x)",
- offset & L2E_OFFSET_MASK,
- l2_offset, j);
- ret = -EIO;
+ if (ret < 0) {
goto fail;
}
+ }
+ /* compressed clusters are never modified */
+ refcount = 2;
+ break;
+
+ case QCOW2_CLUSTER_NORMAL:
+ case QCOW2_CLUSTER_ZERO_ALLOC:
+ if (offset_into_cluster(s, offset)) {
+ qcow2_signal_corruption(bs, true, -1, -1, "Cluster "
+ "allocation offset %#" PRIx64
+ " unaligned (L2 offset: %#"
+ PRIx64 ", L2 index: %#x)",
+ offset, l2_offset, j);
+ ret = -EIO;
+ goto fail;
+ }
- cluster_index = (offset & L2E_OFFSET_MASK) >> s->cluster_bits;
- if (!cluster_index) {
- /* unallocated */
- refcount = 0;
- break;
- }
- if (addend != 0) {
- ret = qcow2_update_cluster_refcount(bs,
+ cluster_index = offset >> s->cluster_bits;
+ assert(cluster_index);
+ if (addend != 0) {
+ ret = qcow2_update_cluster_refcount(bs,
cluster_index, abs(addend), addend < 0,
QCOW2_DISCARD_SNAPSHOT);
- if (ret < 0) {
- goto fail;
- }
- }
-
- ret = qcow2_get_refcount(bs, cluster_index, &refcount);
if (ret < 0) {
goto fail;
}
- break;
+ }
+
+ ret = qcow2_get_refcount(bs, cluster_index, &refcount);
+ if (ret < 0) {
+ goto fail;
+ }
+ break;
- case QCOW2_CLUSTER_UNALLOCATED:
- refcount = 0;
- break;
+ case QCOW2_CLUSTER_ZERO_PLAIN:
+ case QCOW2_CLUSTER_UNALLOCATED:
+ refcount = 0;
+ break;
- default:
- abort();
+ default:
+ abort();
}
if (refcount == 1) {
- offset |= QCOW_OFLAG_COPIED;
+ entry |= QCOW_OFLAG_COPIED;
}
- if (offset != old_offset) {
+ if (entry != old_entry) {
if (addend > 0) {
qcow2_cache_set_dependency(bs, s->l2_table_cache,
s->refcount_block_cache);
}
- l2_table[j] = cpu_to_be64(offset);
+ l2_table[j] = cpu_to_be64(entry);
qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache,
l2_table);
}
cpu_to_be64s(&l1_table[i]);
}
- ret = bdrv_pwrite_sync(bs->file->bs, l1_table_offset,
+ ret = bdrv_pwrite_sync(bs->file, l1_table_offset,
l1_table, l1_size2);
for (i = 0; i < l1_size; i++) {
*
* Modifies the number of errors in res.
*/
-static int inc_refcounts(BlockDriverState *bs,
- BdrvCheckResult *res,
- void **refcount_table,
- int64_t *refcount_table_size,
- int64_t offset, int64_t size)
+int qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
+ void **refcount_table,
+ int64_t *refcount_table_size,
+ int64_t offset, int64_t size)
{
BDRVQcow2State *s = bs->opaque;
uint64_t start, last, cluster_offset, k, refcount;
l2_size = s->l2_size * sizeof(uint64_t);
l2_table = g_malloc(l2_size);
- ret = bdrv_pread(bs->file->bs, l2_offset, l2_table, l2_size);
+ ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size);
if (ret < 0) {
fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n");
res->check_errors++;
nb_csectors = ((l2_entry >> s->csize_shift) &
s->csize_mask) + 1;
l2_entry &= s->cluster_offset_mask;
- ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
- l2_entry & ~511, nb_csectors * 512);
+ ret = qcow2_inc_refcounts_imrt(bs, res,
+ refcount_table, refcount_table_size,
+ l2_entry & ~511, nb_csectors * 512);
if (ret < 0) {
goto fail;
}
}
break;
- case QCOW2_CLUSTER_ZERO:
- if ((l2_entry & L2E_OFFSET_MASK) == 0) {
- break;
- }
- /* fall through */
-
+ case QCOW2_CLUSTER_ZERO_ALLOC:
case QCOW2_CLUSTER_NORMAL:
{
uint64_t offset = l2_entry & L2E_OFFSET_MASK;
}
/* Mark cluster as used */
- ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
- offset, s->cluster_size);
+ ret = qcow2_inc_refcounts_imrt(bs, res,
+ refcount_table, refcount_table_size,
+ offset, s->cluster_size);
if (ret < 0) {
goto fail;
}
break;
}
+ case QCOW2_CLUSTER_ZERO_PLAIN:
case QCOW2_CLUSTER_UNALLOCATED:
break;
l1_size2 = l1_size * sizeof(uint64_t);
/* Mark L1 table as used */
- ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
- l1_table_offset, l1_size2);
+ ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, refcount_table_size,
+ l1_table_offset, l1_size2);
if (ret < 0) {
goto fail;
}
res->check_errors++;
goto fail;
}
- ret = bdrv_pread(bs->file->bs, l1_table_offset, l1_table, l1_size2);
+ ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
if (ret < 0) {
fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
res->check_errors++;
if (l2_offset) {
/* Mark L2 table as used */
l2_offset &= L1E_OFFSET_MASK;
- ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
- l2_offset, s->cluster_size);
+ ret = qcow2_inc_refcounts_imrt(bs, res,
+ refcount_table, refcount_table_size,
+ l2_offset, s->cluster_size);
if (ret < 0) {
goto fail;
}
}
}
- ret = bdrv_pread(bs->file->bs, l2_offset, l2_table,
+ ret = bdrv_pread(bs->file, l2_offset, l2_table,
s->l2_size * sizeof(uint64_t));
if (ret < 0) {
fprintf(stderr, "ERROR: Could not read L2 table: %s\n",
for (j = 0; j < s->l2_size; j++) {
uint64_t l2_entry = be64_to_cpu(l2_table[j]);
uint64_t data_offset = l2_entry & L2E_OFFSET_MASK;
- int cluster_type = qcow2_get_cluster_type(l2_entry);
+ QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry);
- if ((cluster_type == QCOW2_CLUSTER_NORMAL) ||
- ((cluster_type == QCOW2_CLUSTER_ZERO) && (data_offset != 0))) {
+ if (cluster_type == QCOW2_CLUSTER_NORMAL ||
+ cluster_type == QCOW2_CLUSTER_ZERO_ALLOC) {
ret = qcow2_get_refcount(bs,
data_offset >> s->cluster_bits,
&refcount);
goto fail;
}
- ret = bdrv_pwrite(bs->file->bs, l2_offset, l2_table,
+ ret = bdrv_pwrite(bs->file, l2_offset, l2_table,
s->cluster_size);
if (ret < 0) {
fprintf(stderr, "ERROR: Could not write L2 table: %s\n",
if (fix & BDRV_FIX_ERRORS) {
int64_t new_nb_clusters;
+ Error *local_err = NULL;
if (offset > INT64_MAX - s->cluster_size) {
ret = -EINVAL;
goto resize_fail;
}
- ret = bdrv_truncate(bs->file->bs, offset + s->cluster_size);
+ ret = bdrv_truncate(bs->file, offset + s->cluster_size,
+ PREALLOC_MODE_OFF, &local_err);
if (ret < 0) {
+ error_report_err(local_err);
goto resize_fail;
}
size = bdrv_getlength(bs->file->bs);
}
res->corruptions_fixed++;
- ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
- offset, s->cluster_size);
+ ret = qcow2_inc_refcounts_imrt(bs, res,
+ refcount_table, nb_clusters,
+ offset, s->cluster_size);
if (ret < 0) {
return ret;
}
/* No need to check whether the refcount is now greater than 1:
* This area was just allocated and zeroed, so it can only be
- * exactly 1 after inc_refcounts() */
+ * exactly 1 after qcow2_inc_refcounts_imrt() */
continue;
resize_fail:
}
if (offset != 0) {
- ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
- offset, s->cluster_size);
+ ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, nb_clusters,
+ offset, s->cluster_size);
if (ret < 0) {
return ret;
}
}
/* header */
- ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
- 0, s->cluster_size);
+ ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, nb_clusters,
+ 0, s->cluster_size);
if (ret < 0) {
return ret;
}
return ret;
}
}
- ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
- s->snapshots_offset, s->snapshots_size);
+ ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, nb_clusters,
+ s->snapshots_offset, s->snapshots_size);
if (ret < 0) {
return ret;
}
/* refcount data */
- ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
- s->refcount_table_offset,
- s->refcount_table_size * sizeof(uint64_t));
+ ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, nb_clusters,
+ s->refcount_table_offset,
+ s->refcount_table_size * sizeof(uint64_t));
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* encryption */
+ if (s->crypto_header.length) {
+ ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, nb_clusters,
+ s->crypto_header.offset,
+ s->crypto_header.length);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ /* bitmaps */
+ ret = qcow2_check_bitmaps_refcounts(bs, res, refcount_table, nb_clusters);
if (ret < 0) {
return ret;
}
on_disk_refblock = (void *)((char *) *refcount_table +
refblock_index * s->cluster_size);
- ret = bdrv_write(bs->file->bs, refblock_offset / BDRV_SECTOR_SIZE,
+ ret = bdrv_write(bs->file, refblock_offset / BDRV_SECTOR_SIZE,
on_disk_refblock, s->cluster_sectors);
if (ret < 0) {
fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
}
assert(reftable_size < INT_MAX / sizeof(uint64_t));
- ret = bdrv_pwrite(bs->file->bs, reftable_offset, on_disk_reftable,
+ ret = bdrv_pwrite(bs->file, reftable_offset, on_disk_reftable,
reftable_size * sizeof(uint64_t));
if (ret < 0) {
fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
}
/* Enter new reftable into the image header */
- cpu_to_be64w(&reftable_offset_and_clusters.reftable_offset,
- reftable_offset);
- cpu_to_be32w(&reftable_offset_and_clusters.reftable_clusters,
- size_to_clusters(s, reftable_size * sizeof(uint64_t)));
- ret = bdrv_pwrite_sync(bs->file->bs, offsetof(QCowHeader,
- refcount_table_offset),
+ reftable_offset_and_clusters.reftable_offset = cpu_to_be64(reftable_offset);
+ reftable_offset_and_clusters.reftable_clusters =
+ cpu_to_be32(size_to_clusters(s, reftable_size * sizeof(uint64_t)));
+ ret = bdrv_pwrite_sync(bs->file,
+ offsetof(QCowHeader, refcount_table_offset),
&reftable_offset_and_clusters,
sizeof(reftable_offset_and_clusters));
if (ret < 0) {
s->refcount_table = on_disk_reftable;
s->refcount_table_offset = reftable_offset;
s->refcount_table_size = reftable_size;
+ update_max_refcount_table_index(s);
return 0;
}
if ((chk & QCOW2_OL_REFCOUNT_BLOCK) && s->refcount_table) {
- for (i = 0; i < s->refcount_table_size; i++) {
+ unsigned last_entry = s->max_refcount_table_index;
+ assert(last_entry < s->refcount_table_size);
+ assert(last_entry + 1 == s->refcount_table_size ||
+ (s->refcount_table[last_entry + 1] & REFT_OFFSET_MASK) == 0);
+ for (i = 0; i <= last_entry; i++) {
if ((s->refcount_table[i] & REFT_OFFSET_MASK) &&
overlaps_with(s->refcount_table[i] & REFT_OFFSET_MASK,
s->cluster_size)) {
return -ENOMEM;
}
- ret = bdrv_pread(bs->file->bs, l1_ofs, l1, l1_sz2);
+ ret = bdrv_pread(bs->file, l1_ofs, l1, l1_sz2);
if (ret < 0) {
g_free(l1);
return ret;
return ret;
}
- ret = bdrv_pwrite(bs->file->bs, offset, refblock, s->cluster_size);
+ ret = bdrv_pwrite(bs->file, offset, refblock, s->cluster_size);
if (ret < 0) {
error_setg_errno(errp, -ret, "Failed to write refblock");
return ret;
cpu_to_be64s(&new_reftable[i]);
}
- ret = bdrv_pwrite(bs->file->bs, new_reftable_offset, new_reftable,
+ ret = bdrv_pwrite(bs->file, new_reftable_offset, new_reftable,
new_reftable_size * sizeof(uint64_t));
for (i = 0; i < new_reftable_size; i++) {
/* Now update the rest of the in-memory information */
old_reftable = s->refcount_table;
s->refcount_table = new_reftable;
+ update_max_refcount_table_index(s);
s->refcount_bits = 1 << refcount_order;
s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1);