2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
28 #include "qapi/error.h"
30 #include "qemu/bswap.h"
33 int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
35 BDRVQcow2State *s = bs->opaque;
36 int new_l1_size, i, ret;
38 if (exact_size >= s->l1_size) {
42 new_l1_size = exact_size;
45 fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size);
48 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
49 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset +
50 new_l1_size * sizeof(uint64_t),
51 (s->l1_size - new_l1_size) * sizeof(uint64_t), 0);
56 ret = bdrv_flush(bs->file->bs);
61 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS);
62 for (i = s->l1_size - 1; i > new_l1_size - 1; i--) {
63 if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) {
66 qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK,
67 s->cluster_size, QCOW2_DISCARD_ALWAYS);
74 * If the write in the l1_table failed the image may contain a partially
75 * overwritten l1_table. In this case it would be better to clear the
76 * l1_table in memory to avoid possible image corruption.
78 memset(s->l1_table + new_l1_size, 0,
79 (s->l1_size - new_l1_size) * sizeof(uint64_t));
83 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
86 BDRVQcow2State *s = bs->opaque;
87 int new_l1_size2, ret, i;
88 uint64_t *new_l1_table;
89 int64_t old_l1_table_offset, old_l1_size;
90 int64_t new_l1_table_offset, new_l1_size;
93 if (min_size <= s->l1_size)
96 /* Do a sanity check on min_size before trying to calculate new_l1_size
97 * (this prevents overflows during the while loop for the calculation of
99 if (min_size > INT_MAX / sizeof(uint64_t)) {
104 new_l1_size = min_size;
106 /* Bump size up to reduce the number of times we have to grow */
107 new_l1_size = s->l1_size;
108 if (new_l1_size == 0) {
111 while (min_size > new_l1_size) {
112 new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2);
116 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
117 if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
122 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
123 s->l1_size, new_l1_size);
126 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
127 new_l1_table = qemu_try_blockalign(bs->file->bs,
128 ROUND_UP(new_l1_size2, 512));
129 if (new_l1_table == NULL) {
132 memset(new_l1_table, 0, ROUND_UP(new_l1_size2, 512));
135 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
138 /* write new table (align to cluster) */
139 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
140 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
141 if (new_l1_table_offset < 0) {
142 qemu_vfree(new_l1_table);
143 return new_l1_table_offset;
146 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
151 /* the L1 position has not yet been updated, so these clusters must
152 * indeed be completely free */
153 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
154 new_l1_size2, false);
159 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
160 for(i = 0; i < s->l1_size; i++)
161 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
162 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset,
163 new_l1_table, new_l1_size2);
166 for(i = 0; i < s->l1_size; i++)
167 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
170 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
171 stl_be_p(data, new_l1_size);
172 stq_be_p(data + 4, new_l1_table_offset);
173 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size),
178 qemu_vfree(s->l1_table);
179 old_l1_table_offset = s->l1_table_offset;
180 s->l1_table_offset = new_l1_table_offset;
181 s->l1_table = new_l1_table;
182 old_l1_size = s->l1_size;
183 s->l1_size = new_l1_size;
184 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
185 QCOW2_DISCARD_OTHER);
188 qemu_vfree(new_l1_table);
189 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
190 QCOW2_DISCARD_OTHER);
197 * @bs: The BlockDriverState
198 * @offset: A guest offset, used to calculate what slice of the L2
200 * @l2_offset: Offset to the L2 table in the image file.
201 * @l2_slice: Location to store the pointer to the L2 slice.
203 * Loads a L2 slice into memory (L2 slices are the parts of L2 tables
204 * that are loaded by the qcow2 cache). If the slice is in the cache,
205 * the cache is used; otherwise the L2 slice is loaded from the image
208 static int l2_load(BlockDriverState *bs, uint64_t offset,
209 uint64_t l2_offset, uint64_t **l2_slice)
211 BDRVQcow2State *s = bs->opaque;
212 int start_of_slice = sizeof(uint64_t) *
213 (offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset));
215 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice,
220 * Writes one sector of the L1 table to the disk (can't update single entries
221 * and we really don't want bdrv_pread to perform a read-modify-write)
223 #define L1_ENTRIES_PER_SECTOR (512 / 8)
224 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
226 BDRVQcow2State *s = bs->opaque;
227 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 };
231 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
232 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size;
235 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
238 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
239 s->l1_table_offset + 8 * l1_start_index, sizeof(buf), false);
244 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
245 ret = bdrv_pwrite_sync(bs->file,
246 s->l1_table_offset + 8 * l1_start_index,
258 * Allocate a new l2 entry in the file. If l1_index points to an already
259 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
260 * table) copy the contents of the old L2 table into the newly allocated one.
261 * Otherwise the new table is initialized with zeros.
265 static int l2_allocate(BlockDriverState *bs, int l1_index)
267 BDRVQcow2State *s = bs->opaque;
268 uint64_t old_l2_offset;
269 uint64_t *l2_slice = NULL;
270 unsigned slice, slice_size2, n_slices;
274 old_l2_offset = s->l1_table[l1_index];
276 trace_qcow2_l2_allocate(bs, l1_index);
278 /* allocate a new l2 entry */
280 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
286 /* The offset must fit in the offset field of the L1 table entry */
287 assert((l2_offset & L1E_OFFSET_MASK) == l2_offset);
289 /* If we're allocating the table at offset 0 then something is wrong */
290 if (l2_offset == 0) {
291 qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid "
292 "allocation of L2 table at offset 0");
297 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
302 /* allocate a new entry in the l2 cache */
304 slice_size2 = s->l2_slice_size * sizeof(uint64_t);
305 n_slices = s->cluster_size / slice_size2;
307 trace_qcow2_l2_allocate_get_empty(bs, l1_index);
308 for (slice = 0; slice < n_slices; slice++) {
309 ret = qcow2_cache_get_empty(bs, s->l2_table_cache,
310 l2_offset + slice * slice_size2,
311 (void **) &l2_slice);
316 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
317 /* if there was no old l2 table, clear the new slice */
318 memset(l2_slice, 0, slice_size2);
321 uint64_t old_l2_slice_offset =
322 (old_l2_offset & L1E_OFFSET_MASK) + slice * slice_size2;
324 /* if there was an old l2 table, read a slice from the disk */
325 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
326 ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_slice_offset,
327 (void **) &old_slice);
332 memcpy(l2_slice, old_slice, slice_size2);
334 qcow2_cache_put(s->l2_table_cache, (void **) &old_slice);
337 /* write the l2 slice to the file */
338 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
340 trace_qcow2_l2_allocate_write_l2(bs, l1_index);
341 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
342 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
345 ret = qcow2_cache_flush(bs, s->l2_table_cache);
350 /* update the L1 entry */
351 trace_qcow2_l2_allocate_write_l1(bs, l1_index);
352 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
353 ret = qcow2_write_l1_entry(bs, l1_index);
358 trace_qcow2_l2_allocate_done(bs, l1_index, 0);
362 trace_qcow2_l2_allocate_done(bs, l1_index, ret);
363 if (l2_slice != NULL) {
364 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
366 s->l1_table[l1_index] = old_l2_offset;
368 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
369 QCOW2_DISCARD_ALWAYS);
375 * Checks how many clusters in a given L2 slice are contiguous in the image
376 * file. As soon as one of the flags in the bitmask stop_flags changes compared
377 * to the first cluster, the search is stopped and the cluster is not counted
378 * as contiguous. (This allows it, for example, to stop at the first compressed
379 * cluster which may require a different handling)
381 static int count_contiguous_clusters(BlockDriverState *bs, int nb_clusters,
382 int cluster_size, uint64_t *l2_slice, uint64_t stop_flags)
385 QCow2ClusterType first_cluster_type;
386 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
387 uint64_t first_entry = be64_to_cpu(l2_slice[0]);
388 uint64_t offset = first_entry & mask;
390 first_cluster_type = qcow2_get_cluster_type(bs, first_entry);
391 if (first_cluster_type == QCOW2_CLUSTER_UNALLOCATED) {
395 /* must be allocated */
396 assert(first_cluster_type == QCOW2_CLUSTER_NORMAL ||
397 first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC);
399 for (i = 0; i < nb_clusters; i++) {
400 uint64_t l2_entry = be64_to_cpu(l2_slice[i]) & mask;
401 if (offset + (uint64_t) i * cluster_size != l2_entry) {
410 * Checks how many consecutive unallocated clusters in a given L2
411 * slice have the same cluster type.
413 static int count_contiguous_clusters_unallocated(BlockDriverState *bs,
416 QCow2ClusterType wanted_type)
420 assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN ||
421 wanted_type == QCOW2_CLUSTER_UNALLOCATED);
422 for (i = 0; i < nb_clusters; i++) {
423 uint64_t entry = be64_to_cpu(l2_slice[i]);
424 QCow2ClusterType type = qcow2_get_cluster_type(bs, entry);
426 if (type != wanted_type) {
434 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs,
435 uint64_t src_cluster_offset,
436 unsigned offset_in_cluster,
441 if (qiov->size == 0) {
445 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
451 /* Call .bdrv_co_readv() directly instead of using the public block-layer
452 * interface. This avoids double I/O throttling and request tracking,
453 * which can lead to deadlock when block layer copy-on-read is enabled.
455 ret = bs->drv->bdrv_co_preadv_part(bs,
456 src_cluster_offset + offset_in_cluster,
457 qiov->size, qiov, 0, 0);
465 static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs,
466 uint64_t src_cluster_offset,
467 uint64_t cluster_offset,
468 unsigned offset_in_cluster,
472 if (bytes && bs->encrypted) {
473 BDRVQcow2State *s = bs->opaque;
474 assert(QEMU_IS_ALIGNED(offset_in_cluster, BDRV_SECTOR_SIZE));
475 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
477 if (qcow2_co_encrypt(bs, cluster_offset,
478 src_cluster_offset + offset_in_cluster,
479 buffer, bytes) < 0) {
486 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
487 uint64_t cluster_offset,
488 unsigned offset_in_cluster,
491 BDRVQcow2State *s = bs->opaque;
494 if (qiov->size == 0) {
498 ret = qcow2_pre_write_overlap_check(bs, 0,
499 cluster_offset + offset_in_cluster, qiov->size, true);
504 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
505 ret = bdrv_co_pwritev(s->data_file, cluster_offset + offset_in_cluster,
506 qiov->size, qiov, 0);
518 * For a given offset of the virtual disk, find the cluster type and offset in
519 * the qcow2 file. The offset is stored in *cluster_offset.
521 * On entry, *bytes is the maximum number of contiguous bytes starting at
522 * offset that we are interested in.
524 * On exit, *bytes is the number of bytes starting at offset that have the same
525 * cluster type and (if applicable) are stored contiguously in the image file.
526 * Compressed clusters are always returned one by one.
528 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
531 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
532 unsigned int *bytes, uint64_t *cluster_offset)
534 BDRVQcow2State *s = bs->opaque;
535 unsigned int l2_index;
536 uint64_t l1_index, l2_offset, *l2_slice;
538 unsigned int offset_in_cluster;
539 uint64_t bytes_available, bytes_needed, nb_clusters;
540 QCow2ClusterType type;
543 offset_in_cluster = offset_into_cluster(s, offset);
544 bytes_needed = (uint64_t) *bytes + offset_in_cluster;
546 /* compute how many bytes there are between the start of the cluster
547 * containing offset and the end of the l2 slice that contains
548 * the entry pointing to it */
550 ((uint64_t) (s->l2_slice_size - offset_to_l2_slice_index(s, offset)))
553 if (bytes_needed > bytes_available) {
554 bytes_needed = bytes_available;
559 /* seek to the l2 offset in the l1 table */
561 l1_index = offset_to_l1_index(s, offset);
562 if (l1_index >= s->l1_size) {
563 type = QCOW2_CLUSTER_UNALLOCATED;
567 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
569 type = QCOW2_CLUSTER_UNALLOCATED;
573 if (offset_into_cluster(s, l2_offset)) {
574 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
575 " unaligned (L1 index: %#" PRIx64 ")",
576 l2_offset, l1_index);
580 /* load the l2 slice in memory */
582 ret = l2_load(bs, offset, l2_offset, &l2_slice);
587 /* find the cluster offset for the given disk offset */
589 l2_index = offset_to_l2_slice_index(s, offset);
590 *cluster_offset = be64_to_cpu(l2_slice[l2_index]);
592 nb_clusters = size_to_clusters(s, bytes_needed);
593 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
594 * integers; the minimum cluster size is 512, so this assertion is always
596 assert(nb_clusters <= INT_MAX);
598 type = qcow2_get_cluster_type(bs, *cluster_offset);
599 if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN ||
600 type == QCOW2_CLUSTER_ZERO_ALLOC)) {
601 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found"
602 " in pre-v3 image (L2 offset: %#" PRIx64
603 ", L2 index: %#x)", l2_offset, l2_index);
608 case QCOW2_CLUSTER_COMPRESSED:
609 if (has_data_file(bs)) {
610 qcow2_signal_corruption(bs, true, -1, -1, "Compressed cluster "
611 "entry found in image with external data "
612 "file (L2 offset: %#" PRIx64 ", L2 index: "
613 "%#x)", l2_offset, l2_index);
617 /* Compressed clusters can only be processed one by one */
619 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
621 case QCOW2_CLUSTER_ZERO_PLAIN:
622 case QCOW2_CLUSTER_UNALLOCATED:
623 /* how many empty clusters ? */
624 c = count_contiguous_clusters_unallocated(bs, nb_clusters,
625 &l2_slice[l2_index], type);
628 case QCOW2_CLUSTER_ZERO_ALLOC:
629 case QCOW2_CLUSTER_NORMAL:
630 /* how many allocated clusters ? */
631 c = count_contiguous_clusters(bs, nb_clusters, s->cluster_size,
632 &l2_slice[l2_index], QCOW_OFLAG_ZERO);
633 *cluster_offset &= L2E_OFFSET_MASK;
634 if (offset_into_cluster(s, *cluster_offset)) {
635 qcow2_signal_corruption(bs, true, -1, -1,
636 "Cluster allocation offset %#"
637 PRIx64 " unaligned (L2 offset: %#" PRIx64
638 ", L2 index: %#x)", *cluster_offset,
639 l2_offset, l2_index);
643 if (has_data_file(bs) && *cluster_offset != offset - offset_in_cluster)
645 qcow2_signal_corruption(bs, true, -1, -1,
646 "External data file host cluster offset %#"
647 PRIx64 " does not match guest cluster "
649 ", L2 index: %#x)", *cluster_offset,
650 offset - offset_in_cluster, l2_index);
659 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
661 bytes_available = (int64_t)c * s->cluster_size;
664 if (bytes_available > bytes_needed) {
665 bytes_available = bytes_needed;
668 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster;
669 * subtracting offset_in_cluster will therefore definitely yield something
670 * not exceeding UINT_MAX */
671 assert(bytes_available - offset_in_cluster <= UINT_MAX);
672 *bytes = bytes_available - offset_in_cluster;
677 qcow2_cache_put(s->l2_table_cache, (void **)&l2_slice);
684 * for a given disk offset, load (and allocate if needed)
685 * the appropriate slice of its l2 table.
687 * the cluster index in the l2 slice is given to the caller.
689 * Returns 0 on success, -errno in failure case
691 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
692 uint64_t **new_l2_slice,
695 BDRVQcow2State *s = bs->opaque;
696 unsigned int l2_index;
697 uint64_t l1_index, l2_offset;
698 uint64_t *l2_slice = NULL;
701 /* seek to the l2 offset in the l1 table */
703 l1_index = offset_to_l1_index(s, offset);
704 if (l1_index >= s->l1_size) {
705 ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
711 assert(l1_index < s->l1_size);
712 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
713 if (offset_into_cluster(s, l2_offset)) {
714 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
715 " unaligned (L1 index: %#" PRIx64 ")",
716 l2_offset, l1_index);
720 if (!(s->l1_table[l1_index] & QCOW_OFLAG_COPIED)) {
721 /* First allocate a new L2 table (and do COW if needed) */
722 ret = l2_allocate(bs, l1_index);
727 /* Then decrease the refcount of the old table */
729 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
730 QCOW2_DISCARD_OTHER);
733 /* Get the offset of the newly-allocated l2 table */
734 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
735 assert(offset_into_cluster(s, l2_offset) == 0);
738 /* load the l2 slice in memory */
739 ret = l2_load(bs, offset, l2_offset, &l2_slice);
744 /* find the cluster offset for the given disk offset */
746 l2_index = offset_to_l2_slice_index(s, offset);
748 *new_l2_slice = l2_slice;
749 *new_l2_index = l2_index;
755 * alloc_compressed_cluster_offset
757 * For a given offset on the virtual disk, allocate a new compressed cluster
758 * and put the host offset of the cluster into *host_offset. If a cluster is
759 * already allocated at the offset, return an error.
761 * Return 0 on success and -errno in error cases
763 int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
766 uint64_t *host_offset)
768 BDRVQcow2State *s = bs->opaque;
771 int64_t cluster_offset;
774 if (has_data_file(bs)) {
778 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
783 /* Compression can't overwrite anything. Fail if the cluster was already
785 cluster_offset = be64_to_cpu(l2_slice[l2_index]);
786 if (cluster_offset & L2E_OFFSET_MASK) {
787 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
791 cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
792 if (cluster_offset < 0) {
793 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
794 return cluster_offset;
798 (cluster_offset + compressed_size - 1) / QCOW2_COMPRESSED_SECTOR_SIZE -
799 (cluster_offset / QCOW2_COMPRESSED_SECTOR_SIZE);
801 cluster_offset |= QCOW_OFLAG_COMPRESSED |
802 ((uint64_t)nb_csectors << s->csize_shift);
804 /* update L2 table */
806 /* compressed clusters never have the copied flag */
808 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
809 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
810 l2_slice[l2_index] = cpu_to_be64(cluster_offset);
811 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
813 *host_offset = cluster_offset & s->cluster_offset_mask;
817 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m)
819 BDRVQcow2State *s = bs->opaque;
820 Qcow2COWRegion *start = &m->cow_start;
821 Qcow2COWRegion *end = &m->cow_end;
822 unsigned buffer_size;
823 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes);
825 uint8_t *start_buffer, *end_buffer;
829 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes);
830 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes);
831 assert(start->offset + start->nb_bytes <= end->offset);
833 if ((start->nb_bytes == 0 && end->nb_bytes == 0) || m->skip_cow) {
837 /* If we have to read both the start and end COW regions and the
838 * middle region is not too large then perform just one read
840 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384;
842 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes;
844 /* If we have to do two reads, add some padding in the middle
845 * if necessary to make sure that the end region is optimally
847 size_t align = bdrv_opt_mem_align(bs);
848 assert(align > 0 && align <= UINT_MAX);
849 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <=
850 UINT_MAX - end->nb_bytes);
851 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes;
854 /* Reserve a buffer large enough to store all the data that we're
856 start_buffer = qemu_try_blockalign(bs, buffer_size);
857 if (start_buffer == NULL) {
860 /* The part of the buffer where the end region is located */
861 end_buffer = start_buffer + buffer_size - end->nb_bytes;
863 qemu_iovec_init(&qiov, 2 + (m->data_qiov ?
864 qemu_iovec_subvec_niov(m->data_qiov,
869 qemu_co_mutex_unlock(&s->lock);
870 /* First we read the existing data from both COW regions. We
871 * either read the whole region in one go, or the start and end
872 * regions separately. */
874 qemu_iovec_add(&qiov, start_buffer, buffer_size);
875 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
877 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
878 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
883 qemu_iovec_reset(&qiov);
884 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
885 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov);
891 /* Encrypt the data if necessary before writing it */
893 if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
894 start->offset, start_buffer,
896 !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
897 end->offset, end_buffer, end->nb_bytes)) {
903 /* And now we can write everything. If we have the guest data we
904 * can write everything in one single operation */
906 qemu_iovec_reset(&qiov);
907 if (start->nb_bytes) {
908 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
910 qemu_iovec_concat(&qiov, m->data_qiov, m->data_qiov_offset, data_bytes);
912 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
914 /* NOTE: we have a write_aio blkdebug event here followed by
915 * a cow_write one in do_perform_cow_write(), but there's only
916 * one single I/O operation */
917 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
918 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
920 /* If there's no guest data then write both COW regions separately */
921 qemu_iovec_reset(&qiov);
922 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
923 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
928 qemu_iovec_reset(&qiov);
929 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
930 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov);
934 qemu_co_mutex_lock(&s->lock);
937 * Before we update the L2 table to actually point to the new cluster, we
938 * need to be sure that the refcounts have been increased and COW was
942 qcow2_cache_depends_on_flush(s->l2_table_cache);
945 qemu_vfree(start_buffer);
946 qemu_iovec_destroy(&qiov);
950 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
952 BDRVQcow2State *s = bs->opaque;
953 int i, j = 0, l2_index, ret;
954 uint64_t *old_cluster, *l2_slice;
955 uint64_t cluster_offset = m->alloc_offset;
957 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
958 assert(m->nb_clusters > 0);
960 old_cluster = g_try_new(uint64_t, m->nb_clusters);
961 if (old_cluster == NULL) {
966 /* copy content of unmodified sectors */
967 ret = perform_cow(bs, m);
972 /* Update L2 table. */
973 if (s->use_lazy_refcounts) {
974 qcow2_mark_dirty(bs);
976 if (qcow2_need_accurate_refcounts(s)) {
977 qcow2_cache_set_dependency(bs, s->l2_table_cache,
978 s->refcount_block_cache);
981 ret = get_cluster_table(bs, m->offset, &l2_slice, &l2_index);
985 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
987 assert(l2_index + m->nb_clusters <= s->l2_slice_size);
988 for (i = 0; i < m->nb_clusters; i++) {
989 /* if two concurrent writes happen to the same unallocated cluster
990 * each write allocates separate cluster and writes data concurrently.
991 * The first one to complete updates l2 table with pointer to its
992 * cluster the second one has to do RMW (which is done above by
993 * perform_cow()), update l2 table with its cluster pointer and free
994 * old cluster. This is what this loop does */
995 if (l2_slice[l2_index + i] != 0) {
996 old_cluster[j++] = l2_slice[l2_index + i];
999 l2_slice[l2_index + i] = cpu_to_be64((cluster_offset +
1000 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
1004 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1007 * If this was a COW, we need to decrease the refcount of the old cluster.
1009 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
1010 * clusters), the next write will reuse them anyway.
1012 if (!m->keep_old_clusters && j != 0) {
1013 for (i = 0; i < j; i++) {
1014 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
1015 QCOW2_DISCARD_NEVER);
1021 g_free(old_cluster);
1026 * Frees the allocated clusters because the request failed and they won't
1027 * actually be linked.
1029 void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m)
1031 BDRVQcow2State *s = bs->opaque;
1032 qcow2_free_clusters(bs, m->alloc_offset, m->nb_clusters << s->cluster_bits,
1033 QCOW2_DISCARD_NEVER);
1037 * Returns the number of contiguous clusters that can be used for an allocating
1038 * write, but require COW to be performed (this includes yet unallocated space,
1039 * which must copy from the backing file)
1041 static int count_cow_clusters(BlockDriverState *bs, int nb_clusters,
1042 uint64_t *l2_slice, int l2_index)
1046 for (i = 0; i < nb_clusters; i++) {
1047 uint64_t l2_entry = be64_to_cpu(l2_slice[l2_index + i]);
1048 QCow2ClusterType cluster_type = qcow2_get_cluster_type(bs, l2_entry);
1050 switch(cluster_type) {
1051 case QCOW2_CLUSTER_NORMAL:
1052 if (l2_entry & QCOW_OFLAG_COPIED) {
1056 case QCOW2_CLUSTER_UNALLOCATED:
1057 case QCOW2_CLUSTER_COMPRESSED:
1058 case QCOW2_CLUSTER_ZERO_PLAIN:
1059 case QCOW2_CLUSTER_ZERO_ALLOC:
1067 assert(i <= nb_clusters);
1072 * Check if there already is an AIO write request in flight which allocates
1073 * the same cluster. In this case we need to wait until the previous
1074 * request has completed and updated the L2 table accordingly.
1077 * 0 if there was no dependency. *cur_bytes indicates the number of
1078 * bytes from guest_offset that can be read before the next
1079 * dependency must be processed (or the request is complete)
1081 * -EAGAIN if we had to wait for another request, previously gathered
1082 * information on cluster allocation may be invalid now. The caller
1083 * must start over anyway, so consider *cur_bytes undefined.
1085 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
1086 uint64_t *cur_bytes, QCowL2Meta **m)
1088 BDRVQcow2State *s = bs->opaque;
1089 QCowL2Meta *old_alloc;
1090 uint64_t bytes = *cur_bytes;
1092 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
1094 uint64_t start = guest_offset;
1095 uint64_t end = start + bytes;
1096 uint64_t old_start = l2meta_cow_start(old_alloc);
1097 uint64_t old_end = l2meta_cow_end(old_alloc);
1099 if (end <= old_start || start >= old_end) {
1100 /* No intersection */
1102 if (start < old_start) {
1103 /* Stop at the start of a running allocation */
1104 bytes = old_start - start;
1109 /* Stop if already an l2meta exists. After yielding, it wouldn't
1110 * be valid any more, so we'd have to clean up the old L2Metas
1111 * and deal with requests depending on them before starting to
1112 * gather new ones. Not worth the trouble. */
1113 if (bytes == 0 && *m) {
1119 /* Wait for the dependency to complete. We need to recheck
1120 * the free/allocated clusters when we continue. */
1121 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
1127 /* Make sure that existing clusters and new allocations are only used up to
1128 * the next dependency if we shortened the request above */
1135 * Checks how many already allocated clusters that don't require a copy on
1136 * write there are at the given guest_offset (up to *bytes). If *host_offset is
1137 * not INV_OFFSET, only physically contiguous clusters beginning at this host
1138 * offset are counted.
1140 * Note that guest_offset may not be cluster aligned. In this case, the
1141 * returned *host_offset points to exact byte referenced by guest_offset and
1142 * therefore isn't cluster aligned as well.
1145 * 0: if no allocated clusters are available at the given offset.
1146 * *bytes is normally unchanged. It is set to 0 if the cluster
1147 * is allocated and doesn't need COW, but doesn't have the right
1150 * 1: if allocated clusters that don't require a COW are available at
1151 * the requested offset. *bytes may have decreased and describes
1152 * the length of the area that can be written to.
1154 * -errno: in error cases
1156 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
1157 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1159 BDRVQcow2State *s = bs->opaque;
1161 uint64_t cluster_offset;
1163 uint64_t nb_clusters;
1164 unsigned int keep_clusters;
1167 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
1170 assert(*host_offset == INV_OFFSET || offset_into_cluster(s, guest_offset)
1171 == offset_into_cluster(s, *host_offset));
1174 * Calculate the number of clusters to look for. We stop at L2 slice
1175 * boundaries to keep things simple.
1178 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1180 l2_index = offset_to_l2_slice_index(s, guest_offset);
1181 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1182 assert(nb_clusters <= INT_MAX);
1184 /* Find L2 entry for the first involved cluster */
1185 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index);
1190 cluster_offset = be64_to_cpu(l2_slice[l2_index]);
1192 /* Check how many clusters are already allocated and don't need COW */
1193 if (qcow2_get_cluster_type(bs, cluster_offset) == QCOW2_CLUSTER_NORMAL
1194 && (cluster_offset & QCOW_OFLAG_COPIED))
1196 /* If a specific host_offset is required, check it */
1197 bool offset_matches =
1198 (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
1200 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) {
1201 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
1202 "%#llx unaligned (guest offset: %#" PRIx64
1203 ")", cluster_offset & L2E_OFFSET_MASK,
1209 if (*host_offset != INV_OFFSET && !offset_matches) {
1215 /* We keep all QCOW_OFLAG_COPIED clusters */
1217 count_contiguous_clusters(bs, nb_clusters, s->cluster_size,
1218 &l2_slice[l2_index],
1219 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
1220 assert(keep_clusters <= nb_clusters);
1222 *bytes = MIN(*bytes,
1223 keep_clusters * s->cluster_size
1224 - offset_into_cluster(s, guest_offset));
1233 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1235 /* Only return a host offset if we actually made progress. Otherwise we
1236 * would make requirements for handle_alloc() that it can't fulfill */
1238 *host_offset = (cluster_offset & L2E_OFFSET_MASK)
1239 + offset_into_cluster(s, guest_offset);
1246 * Allocates new clusters for the given guest_offset.
1248 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1249 * contain the number of clusters that have been allocated and are contiguous
1250 * in the image file.
1252 * If *host_offset is not INV_OFFSET, it specifies the offset in the image file
1253 * at which the new clusters must start. *nb_clusters can be 0 on return in
1254 * this case if the cluster at host_offset is already in use. If *host_offset
1255 * is INV_OFFSET, the clusters can be allocated anywhere in the image file.
1257 * *host_offset is updated to contain the offset into the image file at which
1258 * the first allocated cluster starts.
1260 * Return 0 on success and -errno in error cases. -EAGAIN means that the
1261 * function has been waiting for another request and the allocation must be
1262 * restarted, but the whole request should not be failed.
1264 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
1265 uint64_t *host_offset, uint64_t *nb_clusters)
1267 BDRVQcow2State *s = bs->opaque;
1269 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1270 *host_offset, *nb_clusters);
1272 if (has_data_file(bs)) {
1273 assert(*host_offset == INV_OFFSET ||
1274 *host_offset == start_of_cluster(s, guest_offset));
1275 *host_offset = start_of_cluster(s, guest_offset);
1279 /* Allocate new clusters */
1280 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1281 if (*host_offset == INV_OFFSET) {
1282 int64_t cluster_offset =
1283 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1284 if (cluster_offset < 0) {
1285 return cluster_offset;
1287 *host_offset = cluster_offset;
1290 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1300 * Allocates new clusters for an area that either is yet unallocated or needs a
1301 * copy on write. If *host_offset is not INV_OFFSET, clusters are only
1302 * allocated if the new allocation can match the specified host offset.
1304 * Note that guest_offset may not be cluster aligned. In this case, the
1305 * returned *host_offset points to exact byte referenced by guest_offset and
1306 * therefore isn't cluster aligned as well.
1309 * 0: if no clusters could be allocated. *bytes is set to 0,
1310 * *host_offset is left unchanged.
1312 * 1: if new clusters were allocated. *bytes may be decreased if the
1313 * new allocation doesn't cover all of the requested area.
1314 * *host_offset is updated to contain the host offset of the first
1315 * newly allocated cluster.
1317 * -errno: in error cases
1319 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1320 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1322 BDRVQcow2State *s = bs->opaque;
1326 uint64_t nb_clusters;
1328 bool keep_old_clusters = false;
1330 uint64_t alloc_cluster_offset = INV_OFFSET;
1332 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1337 * Calculate the number of clusters to look for. We stop at L2 slice
1338 * boundaries to keep things simple.
1341 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1343 l2_index = offset_to_l2_slice_index(s, guest_offset);
1344 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1345 assert(nb_clusters <= INT_MAX);
1347 /* Find L2 entry for the first involved cluster */
1348 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index);
1353 entry = be64_to_cpu(l2_slice[l2_index]);
1354 nb_clusters = count_cow_clusters(bs, nb_clusters, l2_slice, l2_index);
1356 /* This function is only called when there were no non-COW clusters, so if
1357 * we can't find any unallocated or COW clusters either, something is
1358 * wrong with our code. */
1359 assert(nb_clusters > 0);
1361 if (qcow2_get_cluster_type(bs, entry) == QCOW2_CLUSTER_ZERO_ALLOC &&
1362 (entry & QCOW_OFLAG_COPIED) &&
1363 (*host_offset == INV_OFFSET ||
1364 start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK)))
1366 int preallocated_nb_clusters;
1368 if (offset_into_cluster(s, entry & L2E_OFFSET_MASK)) {
1369 qcow2_signal_corruption(bs, true, -1, -1, "Preallocated zero "
1370 "cluster offset %#llx unaligned (guest "
1371 "offset: %#" PRIx64 ")",
1372 entry & L2E_OFFSET_MASK, guest_offset);
1377 /* Try to reuse preallocated zero clusters; contiguous normal clusters
1378 * would be fine, too, but count_cow_clusters() above has limited
1379 * nb_clusters already to a range of COW clusters */
1380 preallocated_nb_clusters =
1381 count_contiguous_clusters(bs, nb_clusters, s->cluster_size,
1382 &l2_slice[l2_index], QCOW_OFLAG_COPIED);
1383 assert(preallocated_nb_clusters > 0);
1385 nb_clusters = preallocated_nb_clusters;
1386 alloc_cluster_offset = entry & L2E_OFFSET_MASK;
1388 /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2()
1389 * should not free them. */
1390 keep_old_clusters = true;
1393 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1395 if (alloc_cluster_offset == INV_OFFSET) {
1396 /* Allocate, if necessary at a given offset in the image file */
1397 alloc_cluster_offset = *host_offset == INV_OFFSET ? INV_OFFSET :
1398 start_of_cluster(s, *host_offset);
1399 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1405 /* Can't extend contiguous allocation */
1406 if (nb_clusters == 0) {
1411 assert(alloc_cluster_offset != INV_OFFSET);
1415 * Save info needed for meta data update.
1417 * requested_bytes: Number of bytes from the start of the first
1418 * newly allocated cluster to the end of the (possibly shortened
1419 * before) write request.
1421 * avail_bytes: Number of bytes from the start of the first
1422 * newly allocated to the end of the last newly allocated cluster.
1424 * nb_bytes: The number of bytes from the start of the first
1425 * newly allocated cluster to the end of the area that the write
1426 * request actually writes to (excluding COW at the end)
1428 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset);
1429 int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits);
1430 int nb_bytes = MIN(requested_bytes, avail_bytes);
1431 QCowL2Meta *old_m = *m;
1433 *m = g_malloc0(sizeof(**m));
1435 **m = (QCowL2Meta) {
1438 .alloc_offset = alloc_cluster_offset,
1439 .offset = start_of_cluster(s, guest_offset),
1440 .nb_clusters = nb_clusters,
1442 .keep_old_clusters = keep_old_clusters,
1446 .nb_bytes = offset_into_cluster(s, guest_offset),
1450 .nb_bytes = avail_bytes - nb_bytes,
1453 qemu_co_queue_init(&(*m)->dependent_requests);
1454 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1456 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1457 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset));
1458 assert(*bytes != 0);
1463 if (*m && (*m)->nb_clusters > 0) {
1464 QLIST_REMOVE(*m, next_in_flight);
1470 * alloc_cluster_offset
1472 * For a given offset on the virtual disk, find the cluster offset in qcow2
1473 * file. If the offset is not found, allocate a new cluster.
1475 * If the cluster was already allocated, m->nb_clusters is set to 0 and
1476 * other fields in m are meaningless.
1478 * If the cluster is newly allocated, m->nb_clusters is set to the number of
1479 * contiguous clusters that have been allocated. In this case, the other
1480 * fields of m are valid and contain information about the first allocated
1483 * If the request conflicts with another write request in flight, the coroutine
1484 * is queued and will be reentered when the dependency has completed.
1486 * Return 0 on success and -errno in error cases
1488 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1489 unsigned int *bytes, uint64_t *host_offset,
1492 BDRVQcow2State *s = bs->opaque;
1493 uint64_t start, remaining;
1494 uint64_t cluster_offset;
1498 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes);
1503 cluster_offset = INV_OFFSET;
1504 *host_offset = INV_OFFSET;
1510 if (*host_offset == INV_OFFSET && cluster_offset != INV_OFFSET) {
1511 *host_offset = start_of_cluster(s, cluster_offset);
1514 assert(remaining >= cur_bytes);
1517 remaining -= cur_bytes;
1519 if (cluster_offset != INV_OFFSET) {
1520 cluster_offset += cur_bytes;
1523 if (remaining == 0) {
1527 cur_bytes = remaining;
1530 * Now start gathering as many contiguous clusters as possible:
1532 * 1. Check for overlaps with in-flight allocations
1534 * a) Overlap not in the first cluster -> shorten this request and
1535 * let the caller handle the rest in its next loop iteration.
1537 * b) Real overlaps of two requests. Yield and restart the search
1538 * for contiguous clusters (the situation could have changed
1539 * while we were sleeping)
1541 * c) TODO: Request starts in the same cluster as the in-flight
1542 * allocation ends. Shorten the COW of the in-fight allocation,
1543 * set cluster_offset to write to the same cluster and set up
1544 * the right synchronisation between the in-flight request and
1547 ret = handle_dependencies(bs, start, &cur_bytes, m);
1548 if (ret == -EAGAIN) {
1549 /* Currently handle_dependencies() doesn't yield if we already had
1550 * an allocation. If it did, we would have to clean up the L2Meta
1551 * structs before starting over. */
1554 } else if (ret < 0) {
1556 } else if (cur_bytes == 0) {
1559 /* handle_dependencies() may have decreased cur_bytes (shortened
1560 * the allocations below) so that the next dependency is processed
1561 * correctly during the next loop iteration. */
1565 * 2. Count contiguous COPIED clusters.
1567 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1572 } else if (cur_bytes == 0) {
1577 * 3. If the request still hasn't completed, allocate new clusters,
1578 * considering any cluster_offset of steps 1c or 2.
1580 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1586 assert(cur_bytes == 0);
1591 *bytes -= remaining;
1593 assert(*host_offset != INV_OFFSET);
1599 * This discards as many clusters of nb_clusters as possible at once (i.e.
1600 * all clusters in the same L2 slice) and returns the number of discarded
1603 static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset,
1604 uint64_t nb_clusters,
1605 enum qcow2_discard_type type, bool full_discard)
1607 BDRVQcow2State *s = bs->opaque;
1613 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
1618 /* Limit nb_clusters to one L2 slice */
1619 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1620 assert(nb_clusters <= INT_MAX);
1622 for (i = 0; i < nb_clusters; i++) {
1623 uint64_t old_l2_entry;
1625 old_l2_entry = be64_to_cpu(l2_slice[l2_index + i]);
1628 * If full_discard is false, make sure that a discarded area reads back
1629 * as zeroes for v3 images (we cannot do it for v2 without actually
1630 * writing a zero-filled buffer). We can skip the operation if the
1631 * cluster is already marked as zero, or if it's unallocated and we
1632 * don't have a backing file.
1634 * TODO We might want to use bdrv_block_status(bs) here, but we're
1635 * holding s->lock, so that doesn't work today.
1637 * If full_discard is true, the sector should not read back as zeroes,
1638 * but rather fall through to the backing file.
1640 switch (qcow2_get_cluster_type(bs, old_l2_entry)) {
1641 case QCOW2_CLUSTER_UNALLOCATED:
1642 if (full_discard || !bs->backing) {
1647 case QCOW2_CLUSTER_ZERO_PLAIN:
1648 if (!full_discard) {
1653 case QCOW2_CLUSTER_ZERO_ALLOC:
1654 case QCOW2_CLUSTER_NORMAL:
1655 case QCOW2_CLUSTER_COMPRESSED:
1662 /* First remove L2 entries */
1663 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1664 if (!full_discard && s->qcow_version >= 3) {
1665 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1667 l2_slice[l2_index + i] = cpu_to_be64(0);
1670 /* Then decrease the refcount */
1671 qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
1674 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1679 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
1680 uint64_t bytes, enum qcow2_discard_type type,
1683 BDRVQcow2State *s = bs->opaque;
1684 uint64_t end_offset = offset + bytes;
1685 uint64_t nb_clusters;
1689 /* Caller must pass aligned values, except at image end */
1690 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1691 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1692 end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1694 nb_clusters = size_to_clusters(s, bytes);
1696 s->cache_discards = true;
1698 /* Each L2 slice is handled by its own loop iteration */
1699 while (nb_clusters > 0) {
1700 cleared = discard_in_l2_slice(bs, offset, nb_clusters, type,
1707 nb_clusters -= cleared;
1708 offset += (cleared * s->cluster_size);
1713 s->cache_discards = false;
1714 qcow2_process_discards(bs, ret);
1720 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1721 * all clusters in the same L2 slice) and returns the number of zeroed
1724 static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
1725 uint64_t nb_clusters, int flags)
1727 BDRVQcow2State *s = bs->opaque;
1732 bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP);
1734 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
1739 /* Limit nb_clusters to one L2 slice */
1740 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
1741 assert(nb_clusters <= INT_MAX);
1743 for (i = 0; i < nb_clusters; i++) {
1744 uint64_t old_offset;
1745 QCow2ClusterType cluster_type;
1747 old_offset = be64_to_cpu(l2_slice[l2_index + i]);
1750 * Minimize L2 changes if the cluster already reads back as
1751 * zeroes with correct allocation.
1753 cluster_type = qcow2_get_cluster_type(bs, old_offset);
1754 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN ||
1755 (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) {
1759 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1760 if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) {
1761 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1762 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1764 l2_slice[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1768 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1773 int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
1774 uint64_t bytes, int flags)
1776 BDRVQcow2State *s = bs->opaque;
1777 uint64_t end_offset = offset + bytes;
1778 uint64_t nb_clusters;
1782 /* If we have to stay in sync with an external data file, zero out
1783 * s->data_file first. */
1784 if (data_file_is_raw(bs)) {
1785 assert(has_data_file(bs));
1786 ret = bdrv_co_pwrite_zeroes(s->data_file, offset, bytes, flags);
1792 /* Caller must pass aligned values, except at image end */
1793 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1794 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1795 end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1797 /* The zero flag is only supported by version 3 and newer */
1798 if (s->qcow_version < 3) {
1802 /* Each L2 slice is handled by its own loop iteration */
1803 nb_clusters = size_to_clusters(s, bytes);
1805 s->cache_discards = true;
1807 while (nb_clusters > 0) {
1808 cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags);
1814 nb_clusters -= cleared;
1815 offset += (cleared * s->cluster_size);
1820 s->cache_discards = false;
1821 qcow2_process_discards(bs, ret);
1827 * Expands all zero clusters in a specific L1 table (or deallocates them, for
1828 * non-backed non-pre-allocated zero clusters).
1830 * l1_entries and *visited_l1_entries are used to keep track of progress for
1831 * status_cb(). l1_entries contains the total number of L1 entries and
1832 * *visited_l1_entries counts all visited L1 entries.
1834 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
1835 int l1_size, int64_t *visited_l1_entries,
1837 BlockDriverAmendStatusCB *status_cb,
1840 BDRVQcow2State *s = bs->opaque;
1841 bool is_active_l1 = (l1_table == s->l1_table);
1842 uint64_t *l2_slice = NULL;
1843 unsigned slice, slice_size2, n_slices;
1847 slice_size2 = s->l2_slice_size * sizeof(uint64_t);
1848 n_slices = s->cluster_size / slice_size2;
1850 if (!is_active_l1) {
1851 /* inactive L2 tables require a buffer to be stored in when loading
1853 l2_slice = qemu_try_blockalign(bs->file->bs, slice_size2);
1854 if (l2_slice == NULL) {
1859 for (i = 0; i < l1_size; i++) {
1860 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1861 uint64_t l2_refcount;
1865 (*visited_l1_entries)++;
1867 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
1872 if (offset_into_cluster(s, l2_offset)) {
1873 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1874 PRIx64 " unaligned (L1 index: %#x)",
1880 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1886 for (slice = 0; slice < n_slices; slice++) {
1887 uint64_t slice_offset = l2_offset + slice * slice_size2;
1888 bool l2_dirty = false;
1890 /* get active L2 tables from cache */
1891 ret = qcow2_cache_get(bs, s->l2_table_cache, slice_offset,
1892 (void **)&l2_slice);
1894 /* load inactive L2 tables from disk */
1895 ret = bdrv_pread(bs->file, slice_offset, l2_slice, slice_size2);
1901 for (j = 0; j < s->l2_slice_size; j++) {
1902 uint64_t l2_entry = be64_to_cpu(l2_slice[j]);
1903 int64_t offset = l2_entry & L2E_OFFSET_MASK;
1904 QCow2ClusterType cluster_type =
1905 qcow2_get_cluster_type(bs, l2_entry);
1907 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN &&
1908 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) {
1912 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1914 /* not backed; therefore we can simply deallocate the
1921 offset = qcow2_alloc_clusters(bs, s->cluster_size);
1927 if (l2_refcount > 1) {
1928 /* For shared L2 tables, set the refcount accordingly
1929 * (it is already 1 and needs to be l2_refcount) */
1930 ret = qcow2_update_cluster_refcount(
1931 bs, offset >> s->cluster_bits,
1932 refcount_diff(1, l2_refcount), false,
1933 QCOW2_DISCARD_OTHER);
1935 qcow2_free_clusters(bs, offset, s->cluster_size,
1936 QCOW2_DISCARD_OTHER);
1942 if (offset_into_cluster(s, offset)) {
1943 int l2_index = slice * s->l2_slice_size + j;
1944 qcow2_signal_corruption(
1946 "Cluster allocation offset "
1947 "%#" PRIx64 " unaligned (L2 offset: %#"
1948 PRIx64 ", L2 index: %#x)", offset,
1949 l2_offset, l2_index);
1950 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1951 qcow2_free_clusters(bs, offset, s->cluster_size,
1952 QCOW2_DISCARD_ALWAYS);
1958 ret = qcow2_pre_write_overlap_check(bs, 0, offset,
1959 s->cluster_size, true);
1961 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1962 qcow2_free_clusters(bs, offset, s->cluster_size,
1963 QCOW2_DISCARD_ALWAYS);
1968 ret = bdrv_pwrite_zeroes(s->data_file, offset,
1969 s->cluster_size, 0);
1971 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
1972 qcow2_free_clusters(bs, offset, s->cluster_size,
1973 QCOW2_DISCARD_ALWAYS);
1978 if (l2_refcount == 1) {
1979 l2_slice[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
1981 l2_slice[j] = cpu_to_be64(offset);
1988 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
1989 qcow2_cache_depends_on_flush(s->l2_table_cache);
1991 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
1994 ret = qcow2_pre_write_overlap_check(
1995 bs, QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2,
1996 slice_offset, slice_size2, false);
2001 ret = bdrv_pwrite(bs->file, slice_offset,
2002 l2_slice, slice_size2);
2010 (*visited_l1_entries)++;
2012 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
2020 if (!is_active_l1) {
2021 qemu_vfree(l2_slice);
2023 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
2030 * For backed images, expands all zero clusters on the image. For non-backed
2031 * images, deallocates all non-pre-allocated zero clusters (and claims the
2032 * allocation for pre-allocated ones). This is important for downgrading to a
2033 * qcow2 version which doesn't yet support metadata zero clusters.
2035 int qcow2_expand_zero_clusters(BlockDriverState *bs,
2036 BlockDriverAmendStatusCB *status_cb,
2039 BDRVQcow2State *s = bs->opaque;
2040 uint64_t *l1_table = NULL;
2041 int64_t l1_entries = 0, visited_l1_entries = 0;
2046 l1_entries = s->l1_size;
2047 for (i = 0; i < s->nb_snapshots; i++) {
2048 l1_entries += s->snapshots[i].l1_size;
2052 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
2053 &visited_l1_entries, l1_entries,
2054 status_cb, cb_opaque);
2059 /* Inactive L1 tables may point to active L2 tables - therefore it is
2060 * necessary to flush the L2 table cache before trying to access the L2
2061 * tables pointed to by inactive L1 entries (else we might try to expand
2062 * zero clusters that have already been expanded); furthermore, it is also
2063 * necessary to empty the L2 table cache, since it may contain tables which
2064 * are now going to be modified directly on disk, bypassing the cache.
2065 * qcow2_cache_empty() does both for us. */
2066 ret = qcow2_cache_empty(bs, s->l2_table_cache);
2071 for (i = 0; i < s->nb_snapshots; i++) {
2073 uint64_t *new_l1_table;
2074 Error *local_err = NULL;
2076 ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset,
2077 s->snapshots[i].l1_size, sizeof(uint64_t),
2078 QCOW_MAX_L1_SIZE, "Snapshot L1 table",
2081 error_report_err(local_err);
2085 l1_size2 = s->snapshots[i].l1_size * sizeof(uint64_t);
2086 new_l1_table = g_try_realloc(l1_table, l1_size2);
2088 if (!new_l1_table) {
2093 l1_table = new_l1_table;
2095 ret = bdrv_pread(bs->file, s->snapshots[i].l1_table_offset,
2096 l1_table, l1_size2);
2101 for (j = 0; j < s->snapshots[i].l1_size; j++) {
2102 be64_to_cpus(&l1_table[j]);
2105 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
2106 &visited_l1_entries, l1_entries,
2107 status_cb, cb_opaque);