2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu-common.h"
26 #include "block/block_int.h"
27 #include "block/qcow2.h"
28 #include "qemu/range.h"
30 static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size);
31 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
32 int64_t offset, int64_t length,
33 int addend, enum qcow2_discard_type type);
36 /*********************************************************/
37 /* refcount handling */
39 int qcow2_refcount_init(BlockDriverState *bs)
41 BDRVQcowState *s = bs->opaque;
42 unsigned int refcount_table_size2, i;
45 assert(s->refcount_table_size <= INT_MAX / sizeof(uint64_t));
46 refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
47 s->refcount_table = g_try_malloc(refcount_table_size2);
49 if (s->refcount_table_size > 0) {
50 if (s->refcount_table == NULL) {
54 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD);
55 ret = bdrv_pread(bs->file, s->refcount_table_offset,
56 s->refcount_table, refcount_table_size2);
60 for(i = 0; i < s->refcount_table_size; i++)
61 be64_to_cpus(&s->refcount_table[i]);
68 void qcow2_refcount_close(BlockDriverState *bs)
70 BDRVQcowState *s = bs->opaque;
71 g_free(s->refcount_table);
75 static int load_refcount_block(BlockDriverState *bs,
76 int64_t refcount_block_offset,
77 void **refcount_block)
79 BDRVQcowState *s = bs->opaque;
82 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_LOAD);
83 ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset,
90 * Retrieves the refcount of the cluster given by its index and stores it in
91 * *refcount. Returns 0 on success and -errno on failure.
93 int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index,
96 BDRVQcowState *s = bs->opaque;
97 uint64_t refcount_table_index, block_index;
98 int64_t refcount_block_offset;
100 uint16_t *refcount_block;
102 refcount_table_index = cluster_index >> s->refcount_block_bits;
103 if (refcount_table_index >= s->refcount_table_size) {
107 refcount_block_offset =
108 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK;
109 if (!refcount_block_offset) {
114 if (offset_into_cluster(s, refcount_block_offset)) {
115 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#" PRIx64
116 " unaligned (reftable index: %#" PRIx64 ")",
117 refcount_block_offset, refcount_table_index);
121 ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset,
122 (void**) &refcount_block);
127 block_index = cluster_index & (s->refcount_block_size - 1);
128 *refcount = be16_to_cpu(refcount_block[block_index]);
130 ret = qcow2_cache_put(bs, s->refcount_block_cache,
131 (void**) &refcount_block);
140 * Rounds the refcount table size up to avoid growing the table for each single
141 * refcount block that is allocated.
143 static unsigned int next_refcount_table_size(BDRVQcowState *s,
144 unsigned int min_size)
146 unsigned int min_clusters = (min_size >> (s->cluster_bits - 3)) + 1;
147 unsigned int refcount_table_clusters =
148 MAX(1, s->refcount_table_size >> (s->cluster_bits - 3));
150 while (min_clusters > refcount_table_clusters) {
151 refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2;
154 return refcount_table_clusters << (s->cluster_bits - 3);
158 /* Checks if two offsets are described by the same refcount block */
159 static int in_same_refcount_block(BDRVQcowState *s, uint64_t offset_a,
162 uint64_t block_a = offset_a >> (s->cluster_bits + s->refcount_block_bits);
163 uint64_t block_b = offset_b >> (s->cluster_bits + s->refcount_block_bits);
165 return (block_a == block_b);
169 * Loads a refcount block. If it doesn't exist yet, it is allocated first
170 * (including growing the refcount table if needed).
172 * Returns 0 on success or -errno in error case
174 static int alloc_refcount_block(BlockDriverState *bs,
175 int64_t cluster_index, uint16_t **refcount_block)
177 BDRVQcowState *s = bs->opaque;
178 unsigned int refcount_table_index;
181 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC);
183 /* Find the refcount block for the given cluster */
184 refcount_table_index = cluster_index >> s->refcount_block_bits;
186 if (refcount_table_index < s->refcount_table_size) {
188 uint64_t refcount_block_offset =
189 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK;
191 /* If it's already there, we're done */
192 if (refcount_block_offset) {
193 if (offset_into_cluster(s, refcount_block_offset)) {
194 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#"
195 PRIx64 " unaligned (reftable index: "
196 "%#x)", refcount_block_offset,
197 refcount_table_index);
201 return load_refcount_block(bs, refcount_block_offset,
202 (void**) refcount_block);
207 * If we came here, we need to allocate something. Something is at least
208 * a cluster for the new refcount block. It may also include a new refcount
209 * table if the old refcount table is too small.
211 * Note that allocating clusters here needs some special care:
213 * - We can't use the normal qcow2_alloc_clusters(), it would try to
214 * increase the refcount and very likely we would end up with an endless
215 * recursion. Instead we must place the refcount blocks in a way that
216 * they can describe them themselves.
218 * - We need to consider that at this point we are inside update_refcounts
219 * and potentially doing an initial refcount increase. This means that
220 * some clusters have already been allocated by the caller, but their
221 * refcount isn't accurate yet. If we allocate clusters for metadata, we
222 * need to return -EAGAIN to signal the caller that it needs to restart
223 * the search for free clusters.
225 * - alloc_clusters_noref and qcow2_free_clusters may load a different
226 * refcount block into the cache
229 *refcount_block = NULL;
231 /* We write to the refcount table, so we might depend on L2 tables */
232 ret = qcow2_cache_flush(bs, s->l2_table_cache);
237 /* Allocate the refcount block itself and mark it as used */
238 int64_t new_block = alloc_clusters_noref(bs, s->cluster_size);
244 fprintf(stderr, "qcow2: Allocate refcount block %d for %" PRIx64
246 refcount_table_index, cluster_index << s->cluster_bits, new_block);
249 if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) {
250 /* Zero the new refcount block before updating it */
251 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block,
252 (void**) refcount_block);
257 memset(*refcount_block, 0, s->cluster_size);
259 /* The block describes itself, need to update the cache */
260 int block_index = (new_block >> s->cluster_bits) &
261 (s->refcount_block_size - 1);
262 (*refcount_block)[block_index] = cpu_to_be16(1);
264 /* Described somewhere else. This can recurse at most twice before we
265 * arrive at a block that describes itself. */
266 ret = update_refcount(bs, new_block, s->cluster_size, 1,
267 QCOW2_DISCARD_NEVER);
272 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
277 /* Initialize the new refcount block only after updating its refcount,
278 * update_refcount uses the refcount cache itself */
279 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block,
280 (void**) refcount_block);
285 memset(*refcount_block, 0, s->cluster_size);
288 /* Now the new refcount block needs to be written to disk */
289 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE);
290 qcow2_cache_entry_mark_dirty(s->refcount_block_cache, *refcount_block);
291 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
296 /* If the refcount table is big enough, just hook the block up there */
297 if (refcount_table_index < s->refcount_table_size) {
298 uint64_t data64 = cpu_to_be64(new_block);
299 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP);
300 ret = bdrv_pwrite_sync(bs->file,
301 s->refcount_table_offset + refcount_table_index * sizeof(uint64_t),
302 &data64, sizeof(data64));
307 s->refcount_table[refcount_table_index] = new_block;
309 /* The new refcount block may be where the caller intended to put its
310 * data, so let it restart the search. */
314 ret = qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block);
320 * If we come here, we need to grow the refcount table. Again, a new
321 * refcount table needs some space and we can't simply allocate to avoid
324 * Therefore let's grab new refcount blocks at the end of the image, which
325 * will describe themselves and the new refcount table. This way we can
326 * reference them only in the new table and do the switch to the new
327 * refcount table at once without producing an inconsistent state in
330 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW);
332 /* Calculate the number of refcount blocks needed so far */
333 uint64_t blocks_used = DIV_ROUND_UP(cluster_index, s->refcount_block_size);
335 if (blocks_used > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) {
339 /* And now we need at least one block more for the new metadata */
340 uint64_t table_size = next_refcount_table_size(s, blocks_used + 1);
341 uint64_t last_table_size;
342 uint64_t blocks_clusters;
344 uint64_t table_clusters =
345 size_to_clusters(s, table_size * sizeof(uint64_t));
346 blocks_clusters = 1 +
347 ((table_clusters + s->refcount_block_size - 1)
348 / s->refcount_block_size);
349 uint64_t meta_clusters = table_clusters + blocks_clusters;
351 last_table_size = table_size;
352 table_size = next_refcount_table_size(s, blocks_used +
353 ((meta_clusters + s->refcount_block_size - 1)
354 / s->refcount_block_size));
356 } while (last_table_size != table_size);
359 fprintf(stderr, "qcow2: Grow refcount table %" PRId32 " => %" PRId64 "\n",
360 s->refcount_table_size, table_size);
363 /* Create the new refcount table and blocks */
364 uint64_t meta_offset = (blocks_used * s->refcount_block_size) *
366 uint64_t table_offset = meta_offset + blocks_clusters * s->cluster_size;
367 uint64_t *new_table = g_try_new0(uint64_t, table_size);
368 uint16_t *new_blocks = g_try_malloc0(blocks_clusters * s->cluster_size);
370 assert(table_size > 0 && blocks_clusters > 0);
371 if (new_table == NULL || new_blocks == NULL) {
376 /* Fill the new refcount table */
377 memcpy(new_table, s->refcount_table,
378 s->refcount_table_size * sizeof(uint64_t));
379 new_table[refcount_table_index] = new_block;
382 for (i = 0; i < blocks_clusters; i++) {
383 new_table[blocks_used + i] = meta_offset + (i * s->cluster_size);
386 /* Fill the refcount blocks */
387 uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t));
389 for (i = 0; i < table_clusters + blocks_clusters; i++) {
390 new_blocks[block++] = cpu_to_be16(1);
393 /* Write refcount blocks to disk */
394 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS);
395 ret = bdrv_pwrite_sync(bs->file, meta_offset, new_blocks,
396 blocks_clusters * s->cluster_size);
403 /* Write refcount table to disk */
404 for(i = 0; i < table_size; i++) {
405 cpu_to_be64s(&new_table[i]);
408 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE);
409 ret = bdrv_pwrite_sync(bs->file, table_offset, new_table,
410 table_size * sizeof(uint64_t));
415 for(i = 0; i < table_size; i++) {
416 be64_to_cpus(&new_table[i]);
419 /* Hook up the new refcount table in the qcow2 header */
421 cpu_to_be64w((uint64_t*)data, table_offset);
422 cpu_to_be32w((uint32_t*)(data + 8), table_clusters);
423 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE);
424 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, refcount_table_offset),
430 /* And switch it in memory */
431 uint64_t old_table_offset = s->refcount_table_offset;
432 uint64_t old_table_size = s->refcount_table_size;
434 g_free(s->refcount_table);
435 s->refcount_table = new_table;
436 s->refcount_table_size = table_size;
437 s->refcount_table_offset = table_offset;
439 /* Free old table. */
440 qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t),
441 QCOW2_DISCARD_OTHER);
443 ret = load_refcount_block(bs, new_block, (void**) refcount_block);
448 /* If we were trying to do the initial refcount update for some cluster
449 * allocation, we might have used the same clusters to store newly
450 * allocated metadata. Make the caller search some new space. */
457 if (*refcount_block != NULL) {
458 qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block);
463 void qcow2_process_discards(BlockDriverState *bs, int ret)
465 BDRVQcowState *s = bs->opaque;
466 Qcow2DiscardRegion *d, *next;
468 QTAILQ_FOREACH_SAFE(d, &s->discards, next, next) {
469 QTAILQ_REMOVE(&s->discards, d, next);
471 /* Discard is optional, ignore the return value */
473 bdrv_discard(bs->file,
474 d->offset >> BDRV_SECTOR_BITS,
475 d->bytes >> BDRV_SECTOR_BITS);
482 static void update_refcount_discard(BlockDriverState *bs,
483 uint64_t offset, uint64_t length)
485 BDRVQcowState *s = bs->opaque;
486 Qcow2DiscardRegion *d, *p, *next;
488 QTAILQ_FOREACH(d, &s->discards, next) {
489 uint64_t new_start = MIN(offset, d->offset);
490 uint64_t new_end = MAX(offset + length, d->offset + d->bytes);
492 if (new_end - new_start <= length + d->bytes) {
493 /* There can't be any overlap, areas ending up here have no
494 * references any more and therefore shouldn't get freed another
496 assert(d->bytes + length == new_end - new_start);
497 d->offset = new_start;
498 d->bytes = new_end - new_start;
503 d = g_malloc(sizeof(*d));
504 *d = (Qcow2DiscardRegion) {
509 QTAILQ_INSERT_TAIL(&s->discards, d, next);
512 /* Merge discard requests if they are adjacent now */
513 QTAILQ_FOREACH_SAFE(p, &s->discards, next, next) {
515 || p->offset > d->offset + d->bytes
516 || d->offset > p->offset + p->bytes)
521 /* Still no overlap possible */
522 assert(p->offset == d->offset + d->bytes
523 || d->offset == p->offset + p->bytes);
525 QTAILQ_REMOVE(&s->discards, p, next);
526 d->offset = MIN(d->offset, p->offset);
527 d->bytes += p->bytes;
532 /* XXX: cache several refcount block clusters ? */
533 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
534 int64_t offset, int64_t length, int addend, enum qcow2_discard_type type)
536 BDRVQcowState *s = bs->opaque;
537 int64_t start, last, cluster_offset;
538 uint16_t *refcount_block = NULL;
539 int64_t old_table_index = -1;
543 fprintf(stderr, "update_refcount: offset=%" PRId64 " size=%" PRId64 " addend=%d\n",
544 offset, length, addend);
548 } else if (length == 0) {
553 qcow2_cache_set_dependency(bs, s->refcount_block_cache,
557 start = start_of_cluster(s, offset);
558 last = start_of_cluster(s, offset + length - 1);
559 for(cluster_offset = start; cluster_offset <= last;
560 cluster_offset += s->cluster_size)
562 int block_index, refcount;
563 int64_t cluster_index = cluster_offset >> s->cluster_bits;
564 int64_t table_index = cluster_index >> s->refcount_block_bits;
566 /* Load the refcount block and allocate it if needed */
567 if (table_index != old_table_index) {
568 if (refcount_block) {
569 ret = qcow2_cache_put(bs, s->refcount_block_cache,
570 (void**) &refcount_block);
576 ret = alloc_refcount_block(bs, cluster_index, &refcount_block);
581 old_table_index = table_index;
583 qcow2_cache_entry_mark_dirty(s->refcount_block_cache, refcount_block);
585 /* we can update the count and save it */
586 block_index = cluster_index & (s->refcount_block_size - 1);
588 refcount = be16_to_cpu(refcount_block[block_index]);
590 if (refcount < 0 || refcount > s->refcount_max) {
594 if (refcount == 0 && cluster_index < s->free_cluster_index) {
595 s->free_cluster_index = cluster_index;
597 refcount_block[block_index] = cpu_to_be16(refcount);
599 if (refcount == 0 && s->discard_passthrough[type]) {
600 update_refcount_discard(bs, cluster_offset, s->cluster_size);
606 if (!s->cache_discards) {
607 qcow2_process_discards(bs, ret);
610 /* Write last changed block to disk */
611 if (refcount_block) {
613 wret = qcow2_cache_put(bs, s->refcount_block_cache,
614 (void**) &refcount_block);
616 return ret < 0 ? ret : wret;
621 * Try do undo any updates if an error is returned (This may succeed in
622 * some cases like ENOSPC for allocating a new refcount block)
626 dummy = update_refcount(bs, offset, cluster_offset - offset, -addend,
627 QCOW2_DISCARD_NEVER);
635 * Increases or decreases the refcount of a given cluster.
637 * On success 0 is returned; on failure -errno is returned.
639 int qcow2_update_cluster_refcount(BlockDriverState *bs,
640 int64_t cluster_index,
642 enum qcow2_discard_type type)
644 BDRVQcowState *s = bs->opaque;
647 ret = update_refcount(bs, cluster_index << s->cluster_bits, 1, addend,
658 /*********************************************************/
659 /* cluster allocation functions */
663 /* return < 0 if error */
664 static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size)
666 BDRVQcowState *s = bs->opaque;
667 uint64_t i, nb_clusters;
671 nb_clusters = size_to_clusters(s, size);
673 for(i = 0; i < nb_clusters; i++) {
674 uint64_t next_cluster_index = s->free_cluster_index++;
675 ret = qcow2_get_refcount(bs, next_cluster_index, &refcount);
679 } else if (refcount != 0) {
684 /* Make sure that all offsets in the "allocated" range are representable
686 if (s->free_cluster_index > 0 &&
687 s->free_cluster_index - 1 > (INT64_MAX >> s->cluster_bits))
693 fprintf(stderr, "alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n",
695 (s->free_cluster_index - nb_clusters) << s->cluster_bits);
697 return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
700 int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size)
705 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
707 offset = alloc_clusters_noref(bs, size);
712 ret = update_refcount(bs, offset, size, 1, QCOW2_DISCARD_NEVER);
713 } while (ret == -EAGAIN);
722 int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
725 BDRVQcowState *s = bs->opaque;
726 uint64_t cluster_index;
731 assert(nb_clusters >= 0);
732 if (nb_clusters == 0) {
737 /* Check how many clusters there are free */
738 cluster_index = offset >> s->cluster_bits;
739 for(i = 0; i < nb_clusters; i++) {
740 ret = qcow2_get_refcount(bs, cluster_index++, &refcount);
743 } else if (refcount != 0) {
748 /* And then allocate them */
749 ret = update_refcount(bs, offset, i << s->cluster_bits, 1,
750 QCOW2_DISCARD_NEVER);
751 } while (ret == -EAGAIN);
760 /* only used to allocate compressed sectors. We try to allocate
761 contiguous sectors. size must be <= cluster_size */
762 int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size)
764 BDRVQcowState *s = bs->opaque;
766 size_t free_in_cluster;
769 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES);
770 assert(size > 0 && size <= s->cluster_size);
771 assert(!s->free_byte_offset || offset_into_cluster(s, s->free_byte_offset));
773 offset = s->free_byte_offset;
777 ret = qcow2_get_refcount(bs, offset >> s->cluster_bits, &refcount);
782 if (refcount == s->refcount_max) {
787 free_in_cluster = s->cluster_size - offset_into_cluster(s, offset);
788 if (!offset || free_in_cluster < size) {
789 int64_t new_cluster = alloc_clusters_noref(bs, s->cluster_size);
790 if (new_cluster < 0) {
794 if (!offset || ROUND_UP(offset, s->cluster_size) != new_cluster) {
795 offset = new_cluster;
800 ret = update_refcount(bs, offset, size, 1, QCOW2_DISCARD_NEVER);
805 /* The cluster refcount was incremented; refcount blocks must be flushed
806 * before the caller's L2 table updates. */
807 qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache);
809 s->free_byte_offset = offset + size;
810 if (!offset_into_cluster(s, s->free_byte_offset)) {
811 s->free_byte_offset = 0;
817 void qcow2_free_clusters(BlockDriverState *bs,
818 int64_t offset, int64_t size,
819 enum qcow2_discard_type type)
823 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_FREE);
824 ret = update_refcount(bs, offset, size, -1, type);
826 fprintf(stderr, "qcow2_free_clusters failed: %s\n", strerror(-ret));
827 /* TODO Remember the clusters to free them later and avoid leaking */
832 * Free a cluster using its L2 entry (handles clusters of all types, e.g.
833 * normal cluster, compressed cluster, etc.)
835 void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry,
836 int nb_clusters, enum qcow2_discard_type type)
838 BDRVQcowState *s = bs->opaque;
840 switch (qcow2_get_cluster_type(l2_entry)) {
841 case QCOW2_CLUSTER_COMPRESSED:
844 nb_csectors = ((l2_entry >> s->csize_shift) &
846 qcow2_free_clusters(bs,
847 (l2_entry & s->cluster_offset_mask) & ~511,
848 nb_csectors * 512, type);
851 case QCOW2_CLUSTER_NORMAL:
852 case QCOW2_CLUSTER_ZERO:
853 if (l2_entry & L2E_OFFSET_MASK) {
854 if (offset_into_cluster(s, l2_entry & L2E_OFFSET_MASK)) {
855 qcow2_signal_corruption(bs, false, -1, -1,
856 "Cannot free unaligned cluster %#llx",
857 l2_entry & L2E_OFFSET_MASK);
859 qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK,
860 nb_clusters << s->cluster_bits, type);
864 case QCOW2_CLUSTER_UNALLOCATED:
873 /*********************************************************/
874 /* snapshots and image creation */
878 /* update the refcounts of snapshots and the copied flag */
879 int qcow2_update_snapshot_refcount(BlockDriverState *bs,
880 int64_t l1_table_offset, int l1_size, int addend)
882 BDRVQcowState *s = bs->opaque;
883 uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2;
884 bool l1_allocated = false;
885 int64_t old_offset, old_l2_offset;
886 int i, j, l1_modified = 0, nb_csectors;
892 l1_size2 = l1_size * sizeof(uint64_t);
894 s->cache_discards = true;
896 /* WARNING: qcow2_snapshot_goto relies on this function not using the
897 * l1_table_offset when it is the current s->l1_table_offset! Be careful
898 * when changing this! */
899 if (l1_table_offset != s->l1_table_offset) {
900 l1_table = g_try_malloc0(align_offset(l1_size2, 512));
901 if (l1_size2 && l1_table == NULL) {
907 ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
912 for(i = 0;i < l1_size; i++)
913 be64_to_cpus(&l1_table[i]);
915 assert(l1_size == s->l1_size);
916 l1_table = s->l1_table;
917 l1_allocated = false;
920 for(i = 0; i < l1_size; i++) {
921 l2_offset = l1_table[i];
923 old_l2_offset = l2_offset;
924 l2_offset &= L1E_OFFSET_MASK;
926 if (offset_into_cluster(s, l2_offset)) {
927 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
928 PRIx64 " unaligned (L1 index: %#x)",
934 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
940 for(j = 0; j < s->l2_size; j++) {
941 uint64_t cluster_index;
943 offset = be64_to_cpu(l2_table[j]);
945 offset &= ~QCOW_OFLAG_COPIED;
947 switch (qcow2_get_cluster_type(offset)) {
948 case QCOW2_CLUSTER_COMPRESSED:
949 nb_csectors = ((offset >> s->csize_shift) &
952 ret = update_refcount(bs,
953 (offset & s->cluster_offset_mask) & ~511,
954 nb_csectors * 512, addend,
955 QCOW2_DISCARD_SNAPSHOT);
960 /* compressed clusters are never modified */
964 case QCOW2_CLUSTER_NORMAL:
965 case QCOW2_CLUSTER_ZERO:
966 if (offset_into_cluster(s, offset & L2E_OFFSET_MASK)) {
967 qcow2_signal_corruption(bs, true, -1, -1, "Data "
968 "cluster offset %#llx "
969 "unaligned (L2 offset: %#"
970 PRIx64 ", L2 index: %#x)",
971 offset & L2E_OFFSET_MASK,
977 cluster_index = (offset & L2E_OFFSET_MASK) >> s->cluster_bits;
978 if (!cluster_index) {
984 ret = qcow2_update_cluster_refcount(bs,
985 cluster_index, addend,
986 QCOW2_DISCARD_SNAPSHOT);
992 ret = qcow2_get_refcount(bs, cluster_index, &refcount);
998 case QCOW2_CLUSTER_UNALLOCATED:
1006 if (refcount == 1) {
1007 offset |= QCOW_OFLAG_COPIED;
1009 if (offset != old_offset) {
1011 qcow2_cache_set_dependency(bs, s->l2_table_cache,
1012 s->refcount_block_cache);
1014 l2_table[j] = cpu_to_be64(offset);
1015 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1019 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1026 ret = qcow2_update_cluster_refcount(bs, l2_offset >>
1029 QCOW2_DISCARD_SNAPSHOT);
1034 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1038 } else if (refcount == 1) {
1039 l2_offset |= QCOW_OFLAG_COPIED;
1041 if (l2_offset != old_l2_offset) {
1042 l1_table[i] = l2_offset;
1048 ret = bdrv_flush(bs);
1051 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1054 s->cache_discards = false;
1055 qcow2_process_discards(bs, ret);
1057 /* Update L1 only if it isn't deleted anyway (addend = -1) */
1058 if (ret == 0 && addend >= 0 && l1_modified) {
1059 for (i = 0; i < l1_size; i++) {
1060 cpu_to_be64s(&l1_table[i]);
1063 ret = bdrv_pwrite_sync(bs->file, l1_table_offset, l1_table, l1_size2);
1065 for (i = 0; i < l1_size; i++) {
1066 be64_to_cpus(&l1_table[i]);
1077 /*********************************************************/
1078 /* refcount checking functions */
1083 * Increases the refcount for a range of clusters in a given refcount table.
1084 * This is used to construct a temporary refcount table out of L1 and L2 tables
1085 * which can be compared the the refcount table saved in the image.
1087 * Modifies the number of errors in res.
1089 static int inc_refcounts(BlockDriverState *bs,
1090 BdrvCheckResult *res,
1091 uint16_t **refcount_table,
1092 int64_t *refcount_table_size,
1093 int64_t offset, int64_t size)
1095 BDRVQcowState *s = bs->opaque;
1096 uint64_t start, last, cluster_offset, k;
1102 start = start_of_cluster(s, offset);
1103 last = start_of_cluster(s, offset + size - 1);
1104 for(cluster_offset = start; cluster_offset <= last;
1105 cluster_offset += s->cluster_size) {
1106 k = cluster_offset >> s->cluster_bits;
1107 if (k >= *refcount_table_size) {
1108 int64_t old_refcount_table_size = *refcount_table_size;
1109 uint16_t *new_refcount_table;
1111 *refcount_table_size = k + 1;
1112 new_refcount_table = g_try_realloc(*refcount_table,
1113 *refcount_table_size *
1114 sizeof(**refcount_table));
1115 if (!new_refcount_table) {
1116 *refcount_table_size = old_refcount_table_size;
1117 res->check_errors++;
1120 *refcount_table = new_refcount_table;
1122 memset(*refcount_table + old_refcount_table_size, 0,
1123 (*refcount_table_size - old_refcount_table_size) *
1124 sizeof(**refcount_table));
1127 if (++(*refcount_table)[k] == 0) {
1128 fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64
1129 "\n", cluster_offset);
1137 /* Flags for check_refcounts_l1() and check_refcounts_l2() */
1139 CHECK_FRAG_INFO = 0x2, /* update BlockFragInfo counters */
1143 * Increases the refcount in the given refcount table for the all clusters
1144 * referenced in the L2 table. While doing so, performs some checks on L2
1147 * Returns the number of errors found by the checks or -errno if an internal
1150 static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
1151 uint16_t **refcount_table, int64_t *refcount_table_size, int64_t l2_offset,
1154 BDRVQcowState *s = bs->opaque;
1155 uint64_t *l2_table, l2_entry;
1156 uint64_t next_contiguous_offset = 0;
1157 int i, l2_size, nb_csectors, ret;
1159 /* Read L2 table from disk */
1160 l2_size = s->l2_size * sizeof(uint64_t);
1161 l2_table = g_malloc(l2_size);
1163 ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size);
1165 fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n");
1166 res->check_errors++;
1170 /* Do the actual checks */
1171 for(i = 0; i < s->l2_size; i++) {
1172 l2_entry = be64_to_cpu(l2_table[i]);
1174 switch (qcow2_get_cluster_type(l2_entry)) {
1175 case QCOW2_CLUSTER_COMPRESSED:
1176 /* Compressed clusters don't have QCOW_OFLAG_COPIED */
1177 if (l2_entry & QCOW_OFLAG_COPIED) {
1178 fprintf(stderr, "ERROR: cluster %" PRId64 ": "
1179 "copied flag must never be set for compressed "
1180 "clusters\n", l2_entry >> s->cluster_bits);
1181 l2_entry &= ~QCOW_OFLAG_COPIED;
1185 /* Mark cluster as used */
1186 nb_csectors = ((l2_entry >> s->csize_shift) &
1188 l2_entry &= s->cluster_offset_mask;
1189 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1190 l2_entry & ~511, nb_csectors * 512);
1195 if (flags & CHECK_FRAG_INFO) {
1196 res->bfi.allocated_clusters++;
1197 res->bfi.compressed_clusters++;
1199 /* Compressed clusters are fragmented by nature. Since they
1200 * take up sub-sector space but we only have sector granularity
1201 * I/O we need to re-read the same sectors even for adjacent
1202 * compressed clusters.
1204 res->bfi.fragmented_clusters++;
1208 case QCOW2_CLUSTER_ZERO:
1209 if ((l2_entry & L2E_OFFSET_MASK) == 0) {
1214 case QCOW2_CLUSTER_NORMAL:
1216 uint64_t offset = l2_entry & L2E_OFFSET_MASK;
1218 if (flags & CHECK_FRAG_INFO) {
1219 res->bfi.allocated_clusters++;
1220 if (next_contiguous_offset &&
1221 offset != next_contiguous_offset) {
1222 res->bfi.fragmented_clusters++;
1224 next_contiguous_offset = offset + s->cluster_size;
1227 /* Mark cluster as used */
1228 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1229 offset, s->cluster_size);
1234 /* Correct offsets are cluster aligned */
1235 if (offset_into_cluster(s, offset)) {
1236 fprintf(stderr, "ERROR offset=%" PRIx64 ": Cluster is not "
1237 "properly aligned; L2 entry corrupted.\n", offset);
1243 case QCOW2_CLUSTER_UNALLOCATED:
1260 * Increases the refcount for the L1 table, its L2 tables and all referenced
1261 * clusters in the given refcount table. While doing so, performs some checks
1262 * on L1 and L2 entries.
1264 * Returns the number of errors found by the checks or -errno if an internal
1267 static int check_refcounts_l1(BlockDriverState *bs,
1268 BdrvCheckResult *res,
1269 uint16_t **refcount_table,
1270 int64_t *refcount_table_size,
1271 int64_t l1_table_offset, int l1_size,
1274 BDRVQcowState *s = bs->opaque;
1275 uint64_t *l1_table = NULL, l2_offset, l1_size2;
1278 l1_size2 = l1_size * sizeof(uint64_t);
1280 /* Mark L1 table as used */
1281 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1282 l1_table_offset, l1_size2);
1287 /* Read L1 table entries from disk */
1289 l1_table = g_try_malloc(l1_size2);
1290 if (l1_table == NULL) {
1292 res->check_errors++;
1295 ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
1297 fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
1298 res->check_errors++;
1301 for(i = 0;i < l1_size; i++)
1302 be64_to_cpus(&l1_table[i]);
1305 /* Do the actual checks */
1306 for(i = 0; i < l1_size; i++) {
1307 l2_offset = l1_table[i];
1309 /* Mark L2 table as used */
1310 l2_offset &= L1E_OFFSET_MASK;
1311 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size,
1312 l2_offset, s->cluster_size);
1317 /* L2 tables are cluster aligned */
1318 if (offset_into_cluster(s, l2_offset)) {
1319 fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
1320 "cluster aligned; L1 entry corrupted\n", l2_offset);
1324 /* Process and check L2 entries */
1325 ret = check_refcounts_l2(bs, res, refcount_table,
1326 refcount_table_size, l2_offset, flags);
1341 * Checks the OFLAG_COPIED flag for all L1 and L2 entries.
1343 * This function does not print an error message nor does it increment
1344 * check_errors if qcow2_get_refcount fails (this is because such an error will
1345 * have been already detected and sufficiently signaled by the calling function
1346 * (qcow2_check_refcounts) by the time this function is called).
1348 static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
1351 BDRVQcowState *s = bs->opaque;
1352 uint64_t *l2_table = qemu_blockalign(bs, s->cluster_size);
1357 for (i = 0; i < s->l1_size; i++) {
1358 uint64_t l1_entry = s->l1_table[i];
1359 uint64_t l2_offset = l1_entry & L1E_OFFSET_MASK;
1360 bool l2_dirty = false;
1366 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1369 /* don't print message nor increment check_errors */
1372 if ((refcount == 1) != ((l1_entry & QCOW_OFLAG_COPIED) != 0)) {
1373 fprintf(stderr, "%s OFLAG_COPIED L2 cluster: l1_index=%d "
1374 "l1_entry=%" PRIx64 " refcount=%d\n",
1375 fix & BDRV_FIX_ERRORS ? "Repairing" :
1377 i, l1_entry, refcount);
1378 if (fix & BDRV_FIX_ERRORS) {
1379 s->l1_table[i] = refcount == 1
1380 ? l1_entry | QCOW_OFLAG_COPIED
1381 : l1_entry & ~QCOW_OFLAG_COPIED;
1382 ret = qcow2_write_l1_entry(bs, i);
1384 res->check_errors++;
1387 res->corruptions_fixed++;
1393 ret = bdrv_pread(bs->file, l2_offset, l2_table,
1394 s->l2_size * sizeof(uint64_t));
1396 fprintf(stderr, "ERROR: Could not read L2 table: %s\n",
1398 res->check_errors++;
1402 for (j = 0; j < s->l2_size; j++) {
1403 uint64_t l2_entry = be64_to_cpu(l2_table[j]);
1404 uint64_t data_offset = l2_entry & L2E_OFFSET_MASK;
1405 int cluster_type = qcow2_get_cluster_type(l2_entry);
1407 if ((cluster_type == QCOW2_CLUSTER_NORMAL) ||
1408 ((cluster_type == QCOW2_CLUSTER_ZERO) && (data_offset != 0))) {
1409 ret = qcow2_get_refcount(bs,
1410 data_offset >> s->cluster_bits,
1413 /* don't print message nor increment check_errors */
1416 if ((refcount == 1) != ((l2_entry & QCOW_OFLAG_COPIED) != 0)) {
1417 fprintf(stderr, "%s OFLAG_COPIED data cluster: "
1418 "l2_entry=%" PRIx64 " refcount=%d\n",
1419 fix & BDRV_FIX_ERRORS ? "Repairing" :
1421 l2_entry, refcount);
1422 if (fix & BDRV_FIX_ERRORS) {
1423 l2_table[j] = cpu_to_be64(refcount == 1
1424 ? l2_entry | QCOW_OFLAG_COPIED
1425 : l2_entry & ~QCOW_OFLAG_COPIED);
1427 res->corruptions_fixed++;
1436 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2,
1437 l2_offset, s->cluster_size);
1439 fprintf(stderr, "ERROR: Could not write L2 table; metadata "
1440 "overlap check failed: %s\n", strerror(-ret));
1441 res->check_errors++;
1445 ret = bdrv_pwrite(bs->file, l2_offset, l2_table, s->cluster_size);
1447 fprintf(stderr, "ERROR: Could not write L2 table: %s\n",
1449 res->check_errors++;
1458 qemu_vfree(l2_table);
1463 * Checks consistency of refblocks and accounts for each refblock in
1466 static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res,
1467 BdrvCheckMode fix, bool *rebuild,
1468 uint16_t **refcount_table, int64_t *nb_clusters)
1470 BDRVQcowState *s = bs->opaque;
1474 for(i = 0; i < s->refcount_table_size; i++) {
1475 uint64_t offset, cluster;
1476 offset = s->refcount_table[i];
1477 cluster = offset >> s->cluster_bits;
1479 /* Refcount blocks are cluster aligned */
1480 if (offset_into_cluster(s, offset)) {
1481 fprintf(stderr, "ERROR refcount block %" PRId64 " is not "
1482 "cluster aligned; refcount table entry corrupted\n", i);
1488 if (cluster >= *nb_clusters) {
1489 fprintf(stderr, "%s refcount block %" PRId64 " is outside image\n",
1490 fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", i);
1492 if (fix & BDRV_FIX_ERRORS) {
1493 int64_t old_nb_clusters = *nb_clusters;
1494 uint16_t *new_refcount_table;
1496 if (offset > INT64_MAX - s->cluster_size) {
1501 ret = bdrv_truncate(bs->file, offset + s->cluster_size);
1505 size = bdrv_getlength(bs->file);
1511 *nb_clusters = size_to_clusters(s, size);
1512 assert(*nb_clusters >= old_nb_clusters);
1514 new_refcount_table = g_try_realloc(*refcount_table,
1516 sizeof(**refcount_table));
1517 if (!new_refcount_table) {
1518 *nb_clusters = old_nb_clusters;
1519 res->check_errors++;
1522 *refcount_table = new_refcount_table;
1524 memset(*refcount_table + old_nb_clusters, 0,
1525 (*nb_clusters - old_nb_clusters) *
1526 sizeof(**refcount_table));
1528 if (cluster >= *nb_clusters) {
1533 res->corruptions_fixed++;
1534 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1535 offset, s->cluster_size);
1539 /* No need to check whether the refcount is now greater than 1:
1540 * This area was just allocated and zeroed, so it can only be
1541 * exactly 1 after inc_refcounts() */
1547 fprintf(stderr, "ERROR could not resize image: %s\n",
1556 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1557 offset, s->cluster_size);
1561 if ((*refcount_table)[cluster] != 1) {
1562 fprintf(stderr, "ERROR refcount block %" PRId64
1563 " refcount=%d\n", i, (*refcount_table)[cluster]);
1574 * Calculates an in-memory refcount table.
1576 static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
1577 BdrvCheckMode fix, bool *rebuild,
1578 uint16_t **refcount_table, int64_t *nb_clusters)
1580 BDRVQcowState *s = bs->opaque;
1585 if (!*refcount_table) {
1586 *refcount_table = g_try_new0(uint16_t, *nb_clusters);
1587 if (*nb_clusters && *refcount_table == NULL) {
1588 res->check_errors++;
1594 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1595 0, s->cluster_size);
1600 /* current L1 table */
1601 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
1602 s->l1_table_offset, s->l1_size, CHECK_FRAG_INFO);
1608 for (i = 0; i < s->nb_snapshots; i++) {
1609 sn = s->snapshots + i;
1610 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
1611 sn->l1_table_offset, sn->l1_size, 0);
1616 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1617 s->snapshots_offset, s->snapshots_size);
1623 ret = inc_refcounts(bs, res, refcount_table, nb_clusters,
1624 s->refcount_table_offset,
1625 s->refcount_table_size * sizeof(uint64_t));
1630 return check_refblocks(bs, res, fix, rebuild, refcount_table, nb_clusters);
1634 * Compares the actual reference count for each cluster in the image against the
1635 * refcount as reported by the refcount structures on-disk.
1637 static void compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
1638 BdrvCheckMode fix, bool *rebuild,
1639 int64_t *highest_cluster,
1640 uint16_t *refcount_table, int64_t nb_clusters)
1642 BDRVQcowState *s = bs->opaque;
1644 uint16_t refcount1, refcount2;
1647 for (i = 0, *highest_cluster = 0; i < nb_clusters; i++) {
1648 ret = qcow2_get_refcount(bs, i, &refcount1);
1650 fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n",
1652 res->check_errors++;
1656 refcount2 = refcount_table[i];
1658 if (refcount1 > 0 || refcount2 > 0) {
1659 *highest_cluster = i;
1662 if (refcount1 != refcount2) {
1663 /* Check if we're allowed to fix the mismatch */
1664 int *num_fixed = NULL;
1665 if (refcount1 == 0) {
1667 } else if (refcount1 > refcount2 && (fix & BDRV_FIX_LEAKS)) {
1668 num_fixed = &res->leaks_fixed;
1669 } else if (refcount1 < refcount2 && (fix & BDRV_FIX_ERRORS)) {
1670 num_fixed = &res->corruptions_fixed;
1673 fprintf(stderr, "%s cluster %" PRId64 " refcount=%d reference=%d\n",
1674 num_fixed != NULL ? "Repairing" :
1675 refcount1 < refcount2 ? "ERROR" :
1677 i, refcount1, refcount2);
1680 ret = update_refcount(bs, i << s->cluster_bits, 1,
1681 (int)refcount2 - (int)refcount1,
1682 QCOW2_DISCARD_ALWAYS);
1689 /* And if we couldn't, print an error */
1690 if (refcount1 < refcount2) {
1700 * Allocates clusters using an in-memory refcount table (IMRT) in contrast to
1701 * the on-disk refcount structures.
1703 * On input, *first_free_cluster tells where to start looking, and need not
1704 * actually be a free cluster; the returned offset will not be before that
1705 * cluster. On output, *first_free_cluster points to the first gap found, even
1706 * if that gap was too small to be used as the returned offset.
1708 * Note that *first_free_cluster is a cluster index whereas the return value is
1711 static int64_t alloc_clusters_imrt(BlockDriverState *bs,
1713 uint16_t **refcount_table,
1714 int64_t *imrt_nb_clusters,
1715 int64_t *first_free_cluster)
1717 BDRVQcowState *s = bs->opaque;
1718 int64_t cluster = *first_free_cluster, i;
1719 bool first_gap = true;
1720 int contiguous_free_clusters;
1722 /* Starting at *first_free_cluster, find a range of at least cluster_count
1723 * continuously free clusters */
1724 for (contiguous_free_clusters = 0;
1725 cluster < *imrt_nb_clusters &&
1726 contiguous_free_clusters < cluster_count;
1729 if (!(*refcount_table)[cluster]) {
1730 contiguous_free_clusters++;
1732 /* If this is the first free cluster found, update
1733 * *first_free_cluster accordingly */
1734 *first_free_cluster = cluster;
1737 } else if (contiguous_free_clusters) {
1738 contiguous_free_clusters = 0;
1742 /* If contiguous_free_clusters is greater than zero, it contains the number
1743 * of continuously free clusters until the current cluster; the first free
1744 * cluster in the current "gap" is therefore
1745 * cluster - contiguous_free_clusters */
1747 /* If no such range could be found, grow the in-memory refcount table
1748 * accordingly to append free clusters at the end of the image */
1749 if (contiguous_free_clusters < cluster_count) {
1750 int64_t old_imrt_nb_clusters = *imrt_nb_clusters;
1751 uint16_t *new_refcount_table;
1753 /* contiguous_free_clusters clusters are already empty at the image end;
1754 * we need cluster_count clusters; therefore, we have to allocate
1755 * cluster_count - contiguous_free_clusters new clusters at the end of
1756 * the image (which is the current value of cluster; note that cluster
1757 * may exceed old_imrt_nb_clusters if *first_free_cluster pointed beyond
1759 *imrt_nb_clusters = cluster + cluster_count - contiguous_free_clusters;
1760 new_refcount_table = g_try_realloc(*refcount_table,
1762 sizeof(**refcount_table));
1763 if (!new_refcount_table) {
1764 *imrt_nb_clusters = old_imrt_nb_clusters;
1767 *refcount_table = new_refcount_table;
1769 memset(*refcount_table + old_imrt_nb_clusters, 0,
1770 (*imrt_nb_clusters - old_imrt_nb_clusters) *
1771 sizeof(**refcount_table));
1774 /* Go back to the first free cluster */
1775 cluster -= contiguous_free_clusters;
1776 for (i = 0; i < cluster_count; i++) {
1777 (*refcount_table)[cluster + i] = 1;
1780 return cluster << s->cluster_bits;
1784 * Creates a new refcount structure based solely on the in-memory information
1785 * given through *refcount_table. All necessary allocations will be reflected
1788 * On success, the old refcount structure is leaked (it will be covered by the
1789 * new refcount structure).
1791 static int rebuild_refcount_structure(BlockDriverState *bs,
1792 BdrvCheckResult *res,
1793 uint16_t **refcount_table,
1794 int64_t *nb_clusters)
1796 BDRVQcowState *s = bs->opaque;
1797 int64_t first_free_cluster = 0, reftable_offset = -1, cluster = 0;
1798 int64_t refblock_offset, refblock_start, refblock_index;
1799 uint32_t reftable_size = 0;
1800 uint64_t *on_disk_reftable = NULL;
1801 uint16_t *on_disk_refblock;
1804 uint64_t reftable_offset;
1805 uint32_t reftable_clusters;
1806 } QEMU_PACKED reftable_offset_and_clusters;
1808 qcow2_cache_empty(bs, s->refcount_block_cache);
1811 for (; cluster < *nb_clusters; cluster++) {
1812 if (!(*refcount_table)[cluster]) {
1816 refblock_index = cluster >> s->refcount_block_bits;
1817 refblock_start = refblock_index << s->refcount_block_bits;
1819 /* Don't allocate a cluster in a refblock already written to disk */
1820 if (first_free_cluster < refblock_start) {
1821 first_free_cluster = refblock_start;
1823 refblock_offset = alloc_clusters_imrt(bs, 1, refcount_table,
1824 nb_clusters, &first_free_cluster);
1825 if (refblock_offset < 0) {
1826 fprintf(stderr, "ERROR allocating refblock: %s\n",
1827 strerror(-refblock_offset));
1828 res->check_errors++;
1829 ret = refblock_offset;
1833 if (reftable_size <= refblock_index) {
1834 uint32_t old_reftable_size = reftable_size;
1835 uint64_t *new_on_disk_reftable;
1837 reftable_size = ROUND_UP((refblock_index + 1) * sizeof(uint64_t),
1838 s->cluster_size) / sizeof(uint64_t);
1839 new_on_disk_reftable = g_try_realloc(on_disk_reftable,
1842 if (!new_on_disk_reftable) {
1843 res->check_errors++;
1847 on_disk_reftable = new_on_disk_reftable;
1849 memset(on_disk_reftable + old_reftable_size, 0,
1850 (reftable_size - old_reftable_size) * sizeof(uint64_t));
1852 /* The offset we have for the reftable is now no longer valid;
1853 * this will leak that range, but we can easily fix that by running
1854 * a leak-fixing check after this rebuild operation */
1855 reftable_offset = -1;
1857 on_disk_reftable[refblock_index] = refblock_offset;
1859 /* If this is apparently the last refblock (for now), try to squeeze the
1861 if (refblock_index == (*nb_clusters - 1) >> s->refcount_block_bits &&
1862 reftable_offset < 0)
1864 uint64_t reftable_clusters = size_to_clusters(s, reftable_size *
1866 reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
1867 refcount_table, nb_clusters,
1868 &first_free_cluster);
1869 if (reftable_offset < 0) {
1870 fprintf(stderr, "ERROR allocating reftable: %s\n",
1871 strerror(-reftable_offset));
1872 res->check_errors++;
1873 ret = reftable_offset;
1878 ret = qcow2_pre_write_overlap_check(bs, 0, refblock_offset,
1881 fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
1885 on_disk_refblock = qemu_blockalign0(bs->file, s->cluster_size);
1886 for (i = 0; i < s->refcount_block_size &&
1887 refblock_start + i < *nb_clusters; i++)
1889 on_disk_refblock[i] =
1890 cpu_to_be16((*refcount_table)[refblock_start + i]);
1893 ret = bdrv_write(bs->file, refblock_offset / BDRV_SECTOR_SIZE,
1894 (void *)on_disk_refblock, s->cluster_sectors);
1895 qemu_vfree(on_disk_refblock);
1897 fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
1901 /* Go to the end of this refblock */
1902 cluster = refblock_start + s->refcount_block_size - 1;
1905 if (reftable_offset < 0) {
1906 uint64_t post_refblock_start, reftable_clusters;
1908 post_refblock_start = ROUND_UP(*nb_clusters, s->refcount_block_size);
1909 reftable_clusters = size_to_clusters(s,
1910 reftable_size * sizeof(uint64_t));
1911 /* Not pretty but simple */
1912 if (first_free_cluster < post_refblock_start) {
1913 first_free_cluster = post_refblock_start;
1915 reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
1916 refcount_table, nb_clusters,
1917 &first_free_cluster);
1918 if (reftable_offset < 0) {
1919 fprintf(stderr, "ERROR allocating reftable: %s\n",
1920 strerror(-reftable_offset));
1921 res->check_errors++;
1922 ret = reftable_offset;
1926 goto write_refblocks;
1929 assert(on_disk_reftable);
1931 for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) {
1932 cpu_to_be64s(&on_disk_reftable[refblock_index]);
1935 ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset,
1936 reftable_size * sizeof(uint64_t));
1938 fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
1942 assert(reftable_size < INT_MAX / sizeof(uint64_t));
1943 ret = bdrv_pwrite(bs->file, reftable_offset, on_disk_reftable,
1944 reftable_size * sizeof(uint64_t));
1946 fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
1950 /* Enter new reftable into the image header */
1951 cpu_to_be64w(&reftable_offset_and_clusters.reftable_offset,
1953 cpu_to_be32w(&reftable_offset_and_clusters.reftable_clusters,
1954 size_to_clusters(s, reftable_size * sizeof(uint64_t)));
1955 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader,
1956 refcount_table_offset),
1957 &reftable_offset_and_clusters,
1958 sizeof(reftable_offset_and_clusters));
1960 fprintf(stderr, "ERROR setting reftable: %s\n", strerror(-ret));
1964 for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) {
1965 be64_to_cpus(&on_disk_reftable[refblock_index]);
1967 s->refcount_table = on_disk_reftable;
1968 s->refcount_table_offset = reftable_offset;
1969 s->refcount_table_size = reftable_size;
1974 g_free(on_disk_reftable);
1979 * Checks an image for refcount consistency.
1981 * Returns 0 if no errors are found, the number of errors in case the image is
1982 * detected as corrupted, and -errno when an internal error occurred.
1984 int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
1987 BDRVQcowState *s = bs->opaque;
1988 BdrvCheckResult pre_compare_res;
1989 int64_t size, highest_cluster, nb_clusters;
1990 uint16_t *refcount_table = NULL;
1991 bool rebuild = false;
1994 size = bdrv_getlength(bs->file);
1996 res->check_errors++;
2000 nb_clusters = size_to_clusters(s, size);
2001 if (nb_clusters > INT_MAX) {
2002 res->check_errors++;
2006 res->bfi.total_clusters =
2007 size_to_clusters(s, bs->total_sectors * BDRV_SECTOR_SIZE);
2009 ret = calculate_refcounts(bs, res, fix, &rebuild, &refcount_table,
2015 /* In case we don't need to rebuild the refcount structure (but want to fix
2016 * something), this function is immediately called again, in which case the
2017 * result should be ignored */
2018 pre_compare_res = *res;
2019 compare_refcounts(bs, res, 0, &rebuild, &highest_cluster, refcount_table,
2022 if (rebuild && (fix & BDRV_FIX_ERRORS)) {
2023 BdrvCheckResult old_res = *res;
2024 int fresh_leaks = 0;
2026 fprintf(stderr, "Rebuilding refcount structure\n");
2027 ret = rebuild_refcount_structure(bs, res, &refcount_table,
2033 res->corruptions = 0;
2036 /* Because the old reftable has been exchanged for a new one the
2037 * references have to be recalculated */
2039 memset(refcount_table, 0, nb_clusters * sizeof(uint16_t));
2040 ret = calculate_refcounts(bs, res, 0, &rebuild, &refcount_table,
2046 if (fix & BDRV_FIX_LEAKS) {
2047 /* The old refcount structures are now leaked, fix it; the result
2048 * can be ignored, aside from leaks which were introduced by
2049 * rebuild_refcount_structure() that could not be fixed */
2050 BdrvCheckResult saved_res = *res;
2051 *res = (BdrvCheckResult){ 0 };
2053 compare_refcounts(bs, res, BDRV_FIX_LEAKS, &rebuild,
2054 &highest_cluster, refcount_table, nb_clusters);
2056 fprintf(stderr, "ERROR rebuilt refcount structure is still "
2060 /* Any leaks accounted for here were introduced by
2061 * rebuild_refcount_structure() because that function has created a
2062 * new refcount structure from scratch */
2063 fresh_leaks = res->leaks;
2067 if (res->corruptions < old_res.corruptions) {
2068 res->corruptions_fixed += old_res.corruptions - res->corruptions;
2070 if (res->leaks < old_res.leaks) {
2071 res->leaks_fixed += old_res.leaks - res->leaks;
2073 res->leaks += fresh_leaks;
2076 fprintf(stderr, "ERROR need to rebuild refcount structures\n");
2077 res->check_errors++;
2082 if (res->leaks || res->corruptions) {
2083 *res = pre_compare_res;
2084 compare_refcounts(bs, res, fix, &rebuild, &highest_cluster,
2085 refcount_table, nb_clusters);
2089 /* check OFLAG_COPIED */
2090 ret = check_oflag_copied(bs, res, fix);
2095 res->image_end_offset = (highest_cluster + 1) * s->cluster_size;
2099 g_free(refcount_table);
2104 #define overlaps_with(ofs, sz) \
2105 ranges_overlap(offset, size, ofs, sz)
2108 * Checks if the given offset into the image file is actually free to use by
2109 * looking for overlaps with important metadata sections (L1/L2 tables etc.),
2110 * i.e. a sanity check without relying on the refcount tables.
2112 * The ign parameter specifies what checks not to perform (being a bitmask of
2113 * QCow2MetadataOverlap values), i.e., what sections to ignore.
2116 * - 0 if writing to this offset will not affect the mentioned metadata
2117 * - a positive QCow2MetadataOverlap value indicating one overlapping section
2118 * - a negative value (-errno) indicating an error while performing a check,
2119 * e.g. when bdrv_read failed on QCOW2_OL_INACTIVE_L2
2121 int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
2124 BDRVQcowState *s = bs->opaque;
2125 int chk = s->overlap_check & ~ign;
2132 if (chk & QCOW2_OL_MAIN_HEADER) {
2133 if (offset < s->cluster_size) {
2134 return QCOW2_OL_MAIN_HEADER;
2138 /* align range to test to cluster boundaries */
2139 size = align_offset(offset_into_cluster(s, offset) + size, s->cluster_size);
2140 offset = start_of_cluster(s, offset);
2142 if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) {
2143 if (overlaps_with(s->l1_table_offset, s->l1_size * sizeof(uint64_t))) {
2144 return QCOW2_OL_ACTIVE_L1;
2148 if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) {
2149 if (overlaps_with(s->refcount_table_offset,
2150 s->refcount_table_size * sizeof(uint64_t))) {
2151 return QCOW2_OL_REFCOUNT_TABLE;
2155 if ((chk & QCOW2_OL_SNAPSHOT_TABLE) && s->snapshots_size) {
2156 if (overlaps_with(s->snapshots_offset, s->snapshots_size)) {
2157 return QCOW2_OL_SNAPSHOT_TABLE;
2161 if ((chk & QCOW2_OL_INACTIVE_L1) && s->snapshots) {
2162 for (i = 0; i < s->nb_snapshots; i++) {
2163 if (s->snapshots[i].l1_size &&
2164 overlaps_with(s->snapshots[i].l1_table_offset,
2165 s->snapshots[i].l1_size * sizeof(uint64_t))) {
2166 return QCOW2_OL_INACTIVE_L1;
2171 if ((chk & QCOW2_OL_ACTIVE_L2) && s->l1_table) {
2172 for (i = 0; i < s->l1_size; i++) {
2173 if ((s->l1_table[i] & L1E_OFFSET_MASK) &&
2174 overlaps_with(s->l1_table[i] & L1E_OFFSET_MASK,
2176 return QCOW2_OL_ACTIVE_L2;
2181 if ((chk & QCOW2_OL_REFCOUNT_BLOCK) && s->refcount_table) {
2182 for (i = 0; i < s->refcount_table_size; i++) {
2183 if ((s->refcount_table[i] & REFT_OFFSET_MASK) &&
2184 overlaps_with(s->refcount_table[i] & REFT_OFFSET_MASK,
2186 return QCOW2_OL_REFCOUNT_BLOCK;
2191 if ((chk & QCOW2_OL_INACTIVE_L2) && s->snapshots) {
2192 for (i = 0; i < s->nb_snapshots; i++) {
2193 uint64_t l1_ofs = s->snapshots[i].l1_table_offset;
2194 uint32_t l1_sz = s->snapshots[i].l1_size;
2195 uint64_t l1_sz2 = l1_sz * sizeof(uint64_t);
2196 uint64_t *l1 = g_try_malloc(l1_sz2);
2199 if (l1_sz2 && l1 == NULL) {
2203 ret = bdrv_pread(bs->file, l1_ofs, l1, l1_sz2);
2209 for (j = 0; j < l1_sz; j++) {
2210 uint64_t l2_ofs = be64_to_cpu(l1[j]) & L1E_OFFSET_MASK;
2211 if (l2_ofs && overlaps_with(l2_ofs, s->cluster_size)) {
2213 return QCOW2_OL_INACTIVE_L2;
2224 static const char *metadata_ol_names[] = {
2225 [QCOW2_OL_MAIN_HEADER_BITNR] = "qcow2_header",
2226 [QCOW2_OL_ACTIVE_L1_BITNR] = "active L1 table",
2227 [QCOW2_OL_ACTIVE_L2_BITNR] = "active L2 table",
2228 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = "refcount table",
2229 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = "refcount block",
2230 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = "snapshot table",
2231 [QCOW2_OL_INACTIVE_L1_BITNR] = "inactive L1 table",
2232 [QCOW2_OL_INACTIVE_L2_BITNR] = "inactive L2 table",
2236 * First performs a check for metadata overlaps (through
2237 * qcow2_check_metadata_overlap); if that fails with a negative value (error
2238 * while performing a check), that value is returned. If an impending overlap
2239 * is detected, the BDS will be made unusable, the qcow2 file marked corrupt
2240 * and -EIO returned.
2242 * Returns 0 if there were neither overlaps nor errors while checking for
2243 * overlaps; or a negative value (-errno) on error.
2245 int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
2248 int ret = qcow2_check_metadata_overlap(bs, ign, offset, size);
2252 } else if (ret > 0) {
2253 int metadata_ol_bitnr = ffs(ret) - 1;
2254 assert(metadata_ol_bitnr < QCOW2_OL_MAX_BITNR);
2256 qcow2_signal_corruption(bs, true, offset, size, "Preventing invalid "
2257 "write on metadata (overlaps with %s)",
2258 metadata_ol_names[metadata_ol_bitnr]);