2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "qemu-common.h"
28 #include "block/block_int.h"
29 #include "block/qcow2.h"
32 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
35 BDRVQcowState *s = bs->opaque;
36 int new_l1_size2, ret, i;
37 uint64_t *new_l1_table;
38 int64_t old_l1_table_offset, old_l1_size;
39 int64_t new_l1_table_offset, new_l1_size;
42 if (min_size <= s->l1_size)
46 new_l1_size = min_size;
48 /* Bump size up to reduce the number of times we have to grow */
49 new_l1_size = s->l1_size;
50 if (new_l1_size == 0) {
53 while (min_size > new_l1_size) {
54 new_l1_size = (new_l1_size * 3 + 1) / 2;
58 if (new_l1_size > INT_MAX) {
63 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
64 s->l1_size, new_l1_size);
67 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
68 new_l1_table = g_malloc0(align_offset(new_l1_size2, 512));
69 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
71 /* write new table (align to cluster) */
72 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
73 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
74 if (new_l1_table_offset < 0) {
76 return new_l1_table_offset;
79 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
84 /* the L1 position has not yet been updated, so these clusters must
85 * indeed be completely free */
86 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT,
87 new_l1_table_offset, new_l1_size2);
92 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
93 for(i = 0; i < s->l1_size; i++)
94 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
95 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2);
98 for(i = 0; i < s->l1_size; i++)
99 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
102 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
103 cpu_to_be32w((uint32_t*)data, new_l1_size);
104 cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset);
105 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data));
110 old_l1_table_offset = s->l1_table_offset;
111 s->l1_table_offset = new_l1_table_offset;
112 s->l1_table = new_l1_table;
113 old_l1_size = s->l1_size;
114 s->l1_size = new_l1_size;
115 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
116 QCOW2_DISCARD_OTHER);
119 g_free(new_l1_table);
120 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
121 QCOW2_DISCARD_OTHER);
128 * Loads a L2 table into memory. If the table is in the cache, the cache
129 * is used; otherwise the L2 table is loaded from the image file.
131 * Returns a pointer to the L2 table on success, or NULL if the read from
132 * the image file failed.
135 static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
138 BDRVQcowState *s = bs->opaque;
141 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
147 * Writes one sector of the L1 table to the disk (can't update single entries
148 * and we really don't want bdrv_pread to perform a read-modify-write)
150 #define L1_ENTRIES_PER_SECTOR (512 / 8)
151 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
153 BDRVQcowState *s = bs->opaque;
154 uint64_t buf[L1_ENTRIES_PER_SECTOR];
158 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
159 for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) {
160 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
163 ret = qcow2_pre_write_overlap_check(bs,
164 QCOW2_OL_DEFAULT & ~QCOW2_OL_ACTIVE_L1,
165 s->l1_table_offset + 8 * l1_start_index, sizeof(buf));
170 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
171 ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index,
183 * Allocate a new l2 entry in the file. If l1_index points to an already
184 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
185 * table) copy the contents of the old L2 table into the newly allocated one.
186 * Otherwise the new table is initialized with zeros.
190 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
192 BDRVQcowState *s = bs->opaque;
193 uint64_t old_l2_offset;
194 uint64_t *l2_table = NULL;
198 old_l2_offset = s->l1_table[l1_index];
200 trace_qcow2_l2_allocate(bs, l1_index);
202 /* allocate a new l2 entry */
204 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
210 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
215 /* allocate a new entry in the l2 cache */
217 trace_qcow2_l2_allocate_get_empty(bs, l1_index);
218 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
225 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
226 /* if there was no old l2 table, clear the new table */
227 memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
231 /* if there was an old l2 table, read it from the disk */
232 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
233 ret = qcow2_cache_get(bs, s->l2_table_cache,
234 old_l2_offset & L1E_OFFSET_MASK,
235 (void**) &old_table);
240 memcpy(l2_table, old_table, s->cluster_size);
242 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table);
248 /* write the l2 table to the file */
249 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
251 trace_qcow2_l2_allocate_write_l2(bs, l1_index);
252 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
253 ret = qcow2_cache_flush(bs, s->l2_table_cache);
258 /* update the L1 entry */
259 trace_qcow2_l2_allocate_write_l1(bs, l1_index);
260 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
261 ret = qcow2_write_l1_entry(bs, l1_index);
267 trace_qcow2_l2_allocate_done(bs, l1_index, 0);
271 trace_qcow2_l2_allocate_done(bs, l1_index, ret);
272 if (l2_table != NULL) {
273 qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
275 s->l1_table[l1_index] = old_l2_offset;
277 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
278 QCOW2_DISCARD_ALWAYS);
284 * Checks how many clusters in a given L2 table are contiguous in the image
285 * file. As soon as one of the flags in the bitmask stop_flags changes compared
286 * to the first cluster, the search is stopped and the cluster is not counted
287 * as contiguous. (This allows it, for example, to stop at the first compressed
288 * cluster which may require a different handling)
290 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
291 uint64_t *l2_table, uint64_t stop_flags)
294 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW2_CLUSTER_COMPRESSED;
295 uint64_t first_entry = be64_to_cpu(l2_table[0]);
296 uint64_t offset = first_entry & mask;
301 assert(qcow2_get_cluster_type(first_entry) != QCOW2_CLUSTER_COMPRESSED);
303 for (i = 0; i < nb_clusters; i++) {
304 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
305 if (offset + (uint64_t) i * cluster_size != l2_entry) {
313 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
317 for (i = 0; i < nb_clusters; i++) {
318 int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i]));
320 if (type != QCOW2_CLUSTER_UNALLOCATED) {
328 /* The crypt function is compatible with the linux cryptoloop
329 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
331 void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
332 uint8_t *out_buf, const uint8_t *in_buf,
333 int nb_sectors, int enc,
342 for(i = 0; i < nb_sectors; i++) {
343 ivec.ll[0] = cpu_to_le64(sector_num);
345 AES_cbc_encrypt(in_buf, out_buf, 512, key,
353 static int coroutine_fn copy_sectors(BlockDriverState *bs,
355 uint64_t cluster_offset,
356 int n_start, int n_end)
358 BDRVQcowState *s = bs->opaque;
364 * If this is the last cluster and it is only partially used, we must only
365 * copy until the end of the image, or bdrv_check_request will fail for the
366 * bdrv_read/write calls below.
368 if (start_sect + n_end > bs->total_sectors) {
369 n_end = bs->total_sectors - start_sect;
377 iov.iov_len = n * BDRV_SECTOR_SIZE;
378 iov.iov_base = qemu_blockalign(bs, iov.iov_len);
380 qemu_iovec_init_external(&qiov, &iov, 1);
382 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
384 /* Call .bdrv_co_readv() directly instead of using the public block-layer
385 * interface. This avoids double I/O throttling and request tracking,
386 * which can lead to deadlock when block layer copy-on-read is enabled.
388 ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov);
393 if (s->crypt_method) {
394 qcow2_encrypt_sectors(s, start_sect + n_start,
395 iov.iov_base, iov.iov_base, n, 1,
396 &s->aes_encrypt_key);
399 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT,
400 cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE);
405 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
406 ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov);
413 qemu_vfree(iov.iov_base);
421 * For a given offset of the disk image, find the cluster offset in
422 * qcow2 file. The offset is stored in *cluster_offset.
424 * on entry, *num is the number of contiguous sectors we'd like to
425 * access following offset.
427 * on exit, *num is the number of contiguous sectors we can read.
429 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
432 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
433 int *num, uint64_t *cluster_offset)
435 BDRVQcowState *s = bs->opaque;
436 unsigned int l2_index;
437 uint64_t l1_index, l2_offset, *l2_table;
439 unsigned int index_in_cluster, nb_clusters;
440 uint64_t nb_available, nb_needed;
443 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
444 nb_needed = *num + index_in_cluster;
446 l1_bits = s->l2_bits + s->cluster_bits;
448 /* compute how many bytes there are between the offset and
449 * the end of the l1 entry
452 nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1));
454 /* compute the number of available sectors */
456 nb_available = (nb_available >> 9) + index_in_cluster;
458 if (nb_needed > nb_available) {
459 nb_needed = nb_available;
464 /* seek the the l2 offset in the l1 table */
466 l1_index = offset >> l1_bits;
467 if (l1_index >= s->l1_size) {
468 ret = QCOW2_CLUSTER_UNALLOCATED;
472 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
474 ret = QCOW2_CLUSTER_UNALLOCATED;
478 /* load the l2 table in memory */
480 ret = l2_load(bs, l2_offset, &l2_table);
485 /* find the cluster offset for the given disk offset */
487 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
488 *cluster_offset = be64_to_cpu(l2_table[l2_index]);
489 nb_clusters = size_to_clusters(s, nb_needed << 9);
491 ret = qcow2_get_cluster_type(*cluster_offset);
493 case QCOW2_CLUSTER_COMPRESSED:
494 /* Compressed clusters can only be processed one by one */
496 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
498 case QCOW2_CLUSTER_ZERO:
499 if (s->qcow_version < 3) {
502 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
503 &l2_table[l2_index], QCOW_OFLAG_ZERO);
506 case QCOW2_CLUSTER_UNALLOCATED:
507 /* how many empty clusters ? */
508 c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
511 case QCOW2_CLUSTER_NORMAL:
512 /* how many allocated clusters ? */
513 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
514 &l2_table[l2_index], QCOW_OFLAG_ZERO);
515 *cluster_offset &= L2E_OFFSET_MASK;
521 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
523 nb_available = (c * s->cluster_sectors);
526 if (nb_available > nb_needed)
527 nb_available = nb_needed;
529 *num = nb_available - index_in_cluster;
537 * for a given disk offset, load (and allocate if needed)
540 * the l2 table offset in the qcow2 file and the cluster index
541 * in the l2 table are given to the caller.
543 * Returns 0 on success, -errno in failure case
545 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
546 uint64_t **new_l2_table,
549 BDRVQcowState *s = bs->opaque;
550 unsigned int l2_index;
551 uint64_t l1_index, l2_offset;
552 uint64_t *l2_table = NULL;
555 /* seek the the l2 offset in the l1 table */
557 l1_index = offset >> (s->l2_bits + s->cluster_bits);
558 if (l1_index >= s->l1_size) {
559 ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
565 assert(l1_index < s->l1_size);
566 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
568 /* seek the l2 table of the given l2 offset */
570 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
571 /* load the l2 table in memory */
572 ret = l2_load(bs, l2_offset, &l2_table);
577 /* First allocate a new L2 table (and do COW if needed) */
578 ret = l2_allocate(bs, l1_index, &l2_table);
583 /* Then decrease the refcount of the old table */
585 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
586 QCOW2_DISCARD_OTHER);
590 /* find the cluster offset for the given disk offset */
592 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
594 *new_l2_table = l2_table;
595 *new_l2_index = l2_index;
601 * alloc_compressed_cluster_offset
603 * For a given offset of the disk image, return cluster offset in
606 * If the offset is not found, allocate a new compressed cluster.
608 * Return the cluster offset if successful,
609 * Return 0, otherwise.
613 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
617 BDRVQcowState *s = bs->opaque;
620 int64_t cluster_offset;
623 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
628 /* Compression can't overwrite anything. Fail if the cluster was already
630 cluster_offset = be64_to_cpu(l2_table[l2_index]);
631 if (cluster_offset & L2E_OFFSET_MASK) {
632 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
636 cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
637 if (cluster_offset < 0) {
638 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
642 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
643 (cluster_offset >> 9);
645 cluster_offset |= QCOW_OFLAG_COMPRESSED |
646 ((uint64_t)nb_csectors << s->csize_shift);
648 /* update L2 table */
650 /* compressed clusters never have the copied flag */
652 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
653 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
654 l2_table[l2_index] = cpu_to_be64(cluster_offset);
655 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
660 return cluster_offset;
663 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r)
665 BDRVQcowState *s = bs->opaque;
668 if (r->nb_sectors == 0) {
672 qemu_co_mutex_unlock(&s->lock);
673 ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset,
674 r->offset / BDRV_SECTOR_SIZE,
675 r->offset / BDRV_SECTOR_SIZE + r->nb_sectors);
676 qemu_co_mutex_lock(&s->lock);
683 * Before we update the L2 table to actually point to the new cluster, we
684 * need to be sure that the refcounts have been increased and COW was
687 qcow2_cache_depends_on_flush(s->l2_table_cache);
692 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
694 BDRVQcowState *s = bs->opaque;
695 int i, j = 0, l2_index, ret;
696 uint64_t *old_cluster, *l2_table;
697 uint64_t cluster_offset = m->alloc_offset;
699 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
700 assert(m->nb_clusters > 0);
702 old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t));
704 /* copy content of unmodified sectors */
705 ret = perform_cow(bs, m, &m->cow_start);
710 ret = perform_cow(bs, m, &m->cow_end);
715 /* Update L2 table. */
716 if (s->use_lazy_refcounts) {
717 qcow2_mark_dirty(bs);
719 if (qcow2_need_accurate_refcounts(s)) {
720 qcow2_cache_set_dependency(bs, s->l2_table_cache,
721 s->refcount_block_cache);
724 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
728 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
730 assert(l2_index + m->nb_clusters <= s->l2_size);
731 for (i = 0; i < m->nb_clusters; i++) {
732 /* if two concurrent writes happen to the same unallocated cluster
733 * each write allocates separate cluster and writes data concurrently.
734 * The first one to complete updates l2 table with pointer to its
735 * cluster the second one has to do RMW (which is done above by
736 * copy_sectors()), update l2 table with its cluster pointer and free
737 * old cluster. This is what this loop does */
738 if(l2_table[l2_index + i] != 0)
739 old_cluster[j++] = l2_table[l2_index + i];
741 l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
742 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
746 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
752 * If this was a COW, we need to decrease the refcount of the old cluster.
753 * Also flush bs->file to get the right order for L2 and refcount update.
755 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
756 * clusters), the next write will reuse them anyway.
759 for (i = 0; i < j; i++) {
760 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
761 QCOW2_DISCARD_NEVER);
772 * Returns the number of contiguous clusters that can be used for an allocating
773 * write, but require COW to be performed (this includes yet unallocated space,
774 * which must copy from the backing file)
776 static int count_cow_clusters(BDRVQcowState *s, int nb_clusters,
777 uint64_t *l2_table, int l2_index)
781 for (i = 0; i < nb_clusters; i++) {
782 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
783 int cluster_type = qcow2_get_cluster_type(l2_entry);
785 switch(cluster_type) {
786 case QCOW2_CLUSTER_NORMAL:
787 if (l2_entry & QCOW_OFLAG_COPIED) {
791 case QCOW2_CLUSTER_UNALLOCATED:
792 case QCOW2_CLUSTER_COMPRESSED:
793 case QCOW2_CLUSTER_ZERO:
801 assert(i <= nb_clusters);
806 * Check if there already is an AIO write request in flight which allocates
807 * the same cluster. In this case we need to wait until the previous
808 * request has completed and updated the L2 table accordingly.
811 * 0 if there was no dependency. *cur_bytes indicates the number of
812 * bytes from guest_offset that can be read before the next
813 * dependency must be processed (or the request is complete)
815 * -EAGAIN if we had to wait for another request, previously gathered
816 * information on cluster allocation may be invalid now. The caller
817 * must start over anyway, so consider *cur_bytes undefined.
819 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
820 uint64_t *cur_bytes, QCowL2Meta **m)
822 BDRVQcowState *s = bs->opaque;
823 QCowL2Meta *old_alloc;
824 uint64_t bytes = *cur_bytes;
826 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
828 uint64_t start = guest_offset;
829 uint64_t end = start + bytes;
830 uint64_t old_start = l2meta_cow_start(old_alloc);
831 uint64_t old_end = l2meta_cow_end(old_alloc);
833 if (end <= old_start || start >= old_end) {
834 /* No intersection */
836 if (start < old_start) {
837 /* Stop at the start of a running allocation */
838 bytes = old_start - start;
843 /* Stop if already an l2meta exists. After yielding, it wouldn't
844 * be valid any more, so we'd have to clean up the old L2Metas
845 * and deal with requests depending on them before starting to
846 * gather new ones. Not worth the trouble. */
847 if (bytes == 0 && *m) {
853 /* Wait for the dependency to complete. We need to recheck
854 * the free/allocated clusters when we continue. */
855 qemu_co_mutex_unlock(&s->lock);
856 qemu_co_queue_wait(&old_alloc->dependent_requests);
857 qemu_co_mutex_lock(&s->lock);
863 /* Make sure that existing clusters and new allocations are only used up to
864 * the next dependency if we shortened the request above */
871 * Checks how many already allocated clusters that don't require a copy on
872 * write there are at the given guest_offset (up to *bytes). If
873 * *host_offset is not zero, only physically contiguous clusters beginning at
874 * this host offset are counted.
876 * Note that guest_offset may not be cluster aligned. In this case, the
877 * returned *host_offset points to exact byte referenced by guest_offset and
878 * therefore isn't cluster aligned as well.
881 * 0: if no allocated clusters are available at the given offset.
882 * *bytes is normally unchanged. It is set to 0 if the cluster
883 * is allocated and doesn't need COW, but doesn't have the right
886 * 1: if allocated clusters that don't require a COW are available at
887 * the requested offset. *bytes may have decreased and describes
888 * the length of the area that can be written to.
890 * -errno: in error cases
892 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
893 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
895 BDRVQcowState *s = bs->opaque;
897 uint64_t cluster_offset;
899 unsigned int nb_clusters;
900 unsigned int keep_clusters;
903 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
906 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset)
907 == offset_into_cluster(s, *host_offset));
910 * Calculate the number of clusters to look for. We stop at L2 table
911 * boundaries to keep things simple.
914 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
916 l2_index = offset_to_l2_index(s, guest_offset);
917 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
919 /* Find L2 entry for the first involved cluster */
920 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
925 cluster_offset = be64_to_cpu(l2_table[l2_index]);
927 /* Check how many clusters are already allocated and don't need COW */
928 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
929 && (cluster_offset & QCOW_OFLAG_COPIED))
931 /* If a specific host_offset is required, check it */
932 bool offset_matches =
933 (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
935 if (*host_offset != 0 && !offset_matches) {
941 /* We keep all QCOW_OFLAG_COPIED clusters */
943 count_contiguous_clusters(nb_clusters, s->cluster_size,
945 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
946 assert(keep_clusters <= nb_clusters);
949 keep_clusters * s->cluster_size
950 - offset_into_cluster(s, guest_offset));
959 pret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
964 /* Only return a host offset if we actually made progress. Otherwise we
965 * would make requirements for handle_alloc() that it can't fulfill */
967 *host_offset = (cluster_offset & L2E_OFFSET_MASK)
968 + offset_into_cluster(s, guest_offset);
975 * Allocates new clusters for the given guest_offset.
977 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
978 * contain the number of clusters that have been allocated and are contiguous
981 * If *host_offset is non-zero, it specifies the offset in the image file at
982 * which the new clusters must start. *nb_clusters can be 0 on return in this
983 * case if the cluster at host_offset is already in use. If *host_offset is
984 * zero, the clusters can be allocated anywhere in the image file.
986 * *host_offset is updated to contain the offset into the image file at which
987 * the first allocated cluster starts.
989 * Return 0 on success and -errno in error cases. -EAGAIN means that the
990 * function has been waiting for another request and the allocation must be
991 * restarted, but the whole request should not be failed.
993 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
994 uint64_t *host_offset, unsigned int *nb_clusters)
996 BDRVQcowState *s = bs->opaque;
998 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
999 *host_offset, *nb_clusters);
1001 /* Allocate new clusters */
1002 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1003 if (*host_offset == 0) {
1004 int64_t cluster_offset =
1005 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1006 if (cluster_offset < 0) {
1007 return cluster_offset;
1009 *host_offset = cluster_offset;
1012 int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
1022 * Allocates new clusters for an area that either is yet unallocated or needs a
1023 * copy on write. If *host_offset is non-zero, clusters are only allocated if
1024 * the new allocation can match the specified host offset.
1026 * Note that guest_offset may not be cluster aligned. In this case, the
1027 * returned *host_offset points to exact byte referenced by guest_offset and
1028 * therefore isn't cluster aligned as well.
1031 * 0: if no clusters could be allocated. *bytes is set to 0,
1032 * *host_offset is left unchanged.
1034 * 1: if new clusters were allocated. *bytes may be decreased if the
1035 * new allocation doesn't cover all of the requested area.
1036 * *host_offset is updated to contain the host offset of the first
1037 * newly allocated cluster.
1039 * -errno: in error cases
1041 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
1042 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
1044 BDRVQcowState *s = bs->opaque;
1048 unsigned int nb_clusters;
1051 uint64_t alloc_cluster_offset;
1053 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1058 * Calculate the number of clusters to look for. We stop at L2 table
1059 * boundaries to keep things simple.
1062 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1064 l2_index = offset_to_l2_index(s, guest_offset);
1065 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1067 /* Find L2 entry for the first involved cluster */
1068 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1073 entry = be64_to_cpu(l2_table[l2_index]);
1075 /* For the moment, overwrite compressed clusters one by one */
1076 if (entry & QCOW_OFLAG_COMPRESSED) {
1079 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
1082 /* This function is only called when there were no non-COW clusters, so if
1083 * we can't find any unallocated or COW clusters either, something is
1084 * wrong with our code. */
1085 assert(nb_clusters > 0);
1087 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1092 /* Allocate, if necessary at a given offset in the image file */
1093 alloc_cluster_offset = start_of_cluster(s, *host_offset);
1094 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1100 /* Can't extend contiguous allocation */
1101 if (nb_clusters == 0) {
1107 * Save info needed for meta data update.
1109 * requested_sectors: Number of sectors from the start of the first
1110 * newly allocated cluster to the end of the (possibly shortened
1111 * before) write request.
1113 * avail_sectors: Number of sectors from the start of the first
1114 * newly allocated to the end of the last newly allocated cluster.
1116 * nb_sectors: The number of sectors from the start of the first
1117 * newly allocated cluster to the end of the area that the write
1118 * request actually writes to (excluding COW at the end)
1120 int requested_sectors =
1121 (*bytes + offset_into_cluster(s, guest_offset))
1122 >> BDRV_SECTOR_BITS;
1123 int avail_sectors = nb_clusters
1124 << (s->cluster_bits - BDRV_SECTOR_BITS);
1125 int alloc_n_start = offset_into_cluster(s, guest_offset)
1126 >> BDRV_SECTOR_BITS;
1127 int nb_sectors = MIN(requested_sectors, avail_sectors);
1128 QCowL2Meta *old_m = *m;
1130 *m = g_malloc0(sizeof(**m));
1132 **m = (QCowL2Meta) {
1135 .alloc_offset = alloc_cluster_offset,
1136 .offset = start_of_cluster(s, guest_offset),
1137 .nb_clusters = nb_clusters,
1138 .nb_available = nb_sectors,
1142 .nb_sectors = alloc_n_start,
1145 .offset = nb_sectors * BDRV_SECTOR_SIZE,
1146 .nb_sectors = avail_sectors - nb_sectors,
1149 qemu_co_queue_init(&(*m)->dependent_requests);
1150 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1152 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
1153 *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE)
1154 - offset_into_cluster(s, guest_offset));
1155 assert(*bytes != 0);
1160 if (*m && (*m)->nb_clusters > 0) {
1161 QLIST_REMOVE(*m, next_in_flight);
1167 * alloc_cluster_offset
1169 * For a given offset on the virtual disk, find the cluster offset in qcow2
1170 * file. If the offset is not found, allocate a new cluster.
1172 * If the cluster was already allocated, m->nb_clusters is set to 0 and
1173 * other fields in m are meaningless.
1175 * If the cluster is newly allocated, m->nb_clusters is set to the number of
1176 * contiguous clusters that have been allocated. In this case, the other
1177 * fields of m are valid and contain information about the first allocated
1180 * If the request conflicts with another write request in flight, the coroutine
1181 * is queued and will be reentered when the dependency has completed.
1183 * Return 0 on success and -errno in error cases
1185 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
1186 int n_start, int n_end, int *num, uint64_t *host_offset, QCowL2Meta **m)
1188 BDRVQcowState *s = bs->opaque;
1189 uint64_t start, remaining;
1190 uint64_t cluster_offset;
1194 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset,
1197 assert(n_start * BDRV_SECTOR_SIZE == offset_into_cluster(s, offset));
1198 offset = start_of_cluster(s, offset);
1201 start = offset + (n_start << BDRV_SECTOR_BITS);
1202 remaining = (n_end - n_start) << BDRV_SECTOR_BITS;
1210 if (!*host_offset) {
1211 *host_offset = start_of_cluster(s, cluster_offset);
1214 assert(remaining >= cur_bytes);
1217 remaining -= cur_bytes;
1218 cluster_offset += cur_bytes;
1220 if (remaining == 0) {
1224 cur_bytes = remaining;
1227 * Now start gathering as many contiguous clusters as possible:
1229 * 1. Check for overlaps with in-flight allocations
1231 * a) Overlap not in the first cluster -> shorten this request and
1232 * let the caller handle the rest in its next loop iteration.
1234 * b) Real overlaps of two requests. Yield and restart the search
1235 * for contiguous clusters (the situation could have changed
1236 * while we were sleeping)
1238 * c) TODO: Request starts in the same cluster as the in-flight
1239 * allocation ends. Shorten the COW of the in-fight allocation,
1240 * set cluster_offset to write to the same cluster and set up
1241 * the right synchronisation between the in-flight request and
1244 ret = handle_dependencies(bs, start, &cur_bytes, m);
1245 if (ret == -EAGAIN) {
1246 /* Currently handle_dependencies() doesn't yield if we already had
1247 * an allocation. If it did, we would have to clean up the L2Meta
1248 * structs before starting over. */
1251 } else if (ret < 0) {
1253 } else if (cur_bytes == 0) {
1256 /* handle_dependencies() may have decreased cur_bytes (shortened
1257 * the allocations below) so that the next dependency is processed
1258 * correctly during the next loop iteration. */
1262 * 2. Count contiguous COPIED clusters.
1264 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1269 } else if (cur_bytes == 0) {
1274 * 3. If the request still hasn't completed, allocate new clusters,
1275 * considering any cluster_offset of steps 1c or 2.
1277 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1283 assert(cur_bytes == 0);
1288 *num = (n_end - n_start) - (remaining >> BDRV_SECTOR_BITS);
1290 assert(*host_offset != 0);
1295 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1296 const uint8_t *buf, int buf_size)
1298 z_stream strm1, *strm = &strm1;
1301 memset(strm, 0, sizeof(*strm));
1303 strm->next_in = (uint8_t *)buf;
1304 strm->avail_in = buf_size;
1305 strm->next_out = out_buf;
1306 strm->avail_out = out_buf_size;
1308 ret = inflateInit2(strm, -12);
1311 ret = inflate(strm, Z_FINISH);
1312 out_len = strm->next_out - out_buf;
1313 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1314 out_len != out_buf_size) {
1322 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
1324 BDRVQcowState *s = bs->opaque;
1325 int ret, csize, nb_csectors, sector_offset;
1328 coffset = cluster_offset & s->cluster_offset_mask;
1329 if (s->cluster_cache_offset != coffset) {
1330 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1331 sector_offset = coffset & 511;
1332 csize = nb_csectors * 512 - sector_offset;
1333 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
1334 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors);
1338 if (decompress_buffer(s->cluster_cache, s->cluster_size,
1339 s->cluster_data + sector_offset, csize) < 0) {
1342 s->cluster_cache_offset = coffset;
1348 * This discards as many clusters of nb_clusters as possible at once (i.e.
1349 * all clusters in the same L2 table) and returns the number of discarded
1352 static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
1353 unsigned int nb_clusters, enum qcow2_discard_type type)
1355 BDRVQcowState *s = bs->opaque;
1361 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1366 /* Limit nb_clusters to one L2 table */
1367 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1369 for (i = 0; i < nb_clusters; i++) {
1370 uint64_t old_offset;
1372 old_offset = be64_to_cpu(l2_table[l2_index + i]);
1373 if ((old_offset & L2E_OFFSET_MASK) == 0) {
1377 /* First remove L2 entries */
1378 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1379 l2_table[l2_index + i] = cpu_to_be64(0);
1381 /* Then decrease the refcount */
1382 qcow2_free_any_clusters(bs, old_offset, 1, type);
1385 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1393 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset,
1394 int nb_sectors, enum qcow2_discard_type type)
1396 BDRVQcowState *s = bs->opaque;
1397 uint64_t end_offset;
1398 unsigned int nb_clusters;
1401 end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS);
1403 /* Round start up and end down */
1404 offset = align_offset(offset, s->cluster_size);
1405 end_offset &= ~(s->cluster_size - 1);
1407 if (offset > end_offset) {
1411 nb_clusters = size_to_clusters(s, end_offset - offset);
1413 s->cache_discards = true;
1415 /* Each L2 table is handled by its own loop iteration */
1416 while (nb_clusters > 0) {
1417 ret = discard_single_l2(bs, offset, nb_clusters, type);
1423 offset += (ret * s->cluster_size);
1428 s->cache_discards = false;
1429 qcow2_process_discards(bs, ret);
1435 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1436 * all clusters in the same L2 table) and returns the number of zeroed
1439 static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
1440 unsigned int nb_clusters)
1442 BDRVQcowState *s = bs->opaque;
1448 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1453 /* Limit nb_clusters to one L2 table */
1454 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1456 for (i = 0; i < nb_clusters; i++) {
1457 uint64_t old_offset;
1459 old_offset = be64_to_cpu(l2_table[l2_index + i]);
1461 /* Update L2 entries */
1462 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1463 if (old_offset & QCOW_OFLAG_COMPRESSED) {
1464 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1465 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
1467 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1471 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
1479 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors)
1481 BDRVQcowState *s = bs->opaque;
1482 unsigned int nb_clusters;
1485 /* The zero flag is only supported by version 3 and newer */
1486 if (s->qcow_version < 3) {
1490 /* Each L2 table is handled by its own loop iteration */
1491 nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS);
1493 s->cache_discards = true;
1495 while (nb_clusters > 0) {
1496 ret = zero_single_l2(bs, offset, nb_clusters);
1502 offset += (ret * s->cluster_size);
1507 s->cache_discards = false;
1508 qcow2_process_discards(bs, ret);
1514 * Expands all zero clusters in a specific L1 table (or deallocates them, for
1515 * non-backed non-pre-allocated zero clusters).
1517 * expanded_clusters is a bitmap where every bit corresponds to one cluster in
1518 * the image file; a bit gets set if the corresponding cluster has been used for
1519 * zero expansion (i.e., has been filled with zeroes and is referenced from an
1520 * L2 table). nb_clusters contains the total cluster count of the image file,
1521 * i.e., the number of bits in expanded_clusters.
1523 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
1524 int l1_size, uint8_t **expanded_clusters,
1525 uint64_t *nb_clusters)
1527 BDRVQcowState *s = bs->opaque;
1528 bool is_active_l1 = (l1_table == s->l1_table);
1529 uint64_t *l2_table = NULL;
1533 if (!is_active_l1) {
1534 /* inactive L2 tables require a buffer to be stored in when loading
1536 l2_table = qemu_blockalign(bs, s->cluster_size);
1539 for (i = 0; i < l1_size; i++) {
1540 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1541 bool l2_dirty = false;
1549 /* get active L2 tables from cache */
1550 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
1551 (void **)&l2_table);
1553 /* load inactive L2 tables from disk */
1554 ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE,
1555 (void *)l2_table, s->cluster_sectors);
1561 for (j = 0; j < s->l2_size; j++) {
1562 uint64_t l2_entry = be64_to_cpu(l2_table[j]);
1563 int64_t offset = l2_entry & L2E_OFFSET_MASK, cluster_index;
1564 int cluster_type = qcow2_get_cluster_type(l2_entry);
1565 bool preallocated = offset != 0;
1567 if (cluster_type == QCOW2_CLUSTER_NORMAL) {
1568 cluster_index = offset >> s->cluster_bits;
1569 assert((cluster_index >= 0) && (cluster_index < *nb_clusters));
1570 if ((*expanded_clusters)[cluster_index / 8] &
1571 (1 << (cluster_index % 8))) {
1572 /* Probably a shared L2 table; this cluster was a zero
1573 * cluster which has been expanded, its refcount
1574 * therefore most likely requires an update. */
1575 ret = qcow2_update_cluster_refcount(bs, cluster_index, 1,
1576 QCOW2_DISCARD_NEVER);
1580 /* Since we just increased the refcount, the COPIED flag may
1581 * no longer be set. */
1582 l2_table[j] = cpu_to_be64(l2_entry & ~QCOW_OFLAG_COPIED);
1587 else if (qcow2_get_cluster_type(l2_entry) != QCOW2_CLUSTER_ZERO) {
1591 if (!preallocated) {
1592 if (!bs->backing_hd) {
1593 /* not backed; therefore we can simply deallocate the
1600 offset = qcow2_alloc_clusters(bs, s->cluster_size);
1607 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT,
1608 offset, s->cluster_size);
1610 if (!preallocated) {
1611 qcow2_free_clusters(bs, offset, s->cluster_size,
1612 QCOW2_DISCARD_ALWAYS);
1617 ret = bdrv_write_zeroes(bs->file, offset / BDRV_SECTOR_SIZE,
1618 s->cluster_sectors);
1620 if (!preallocated) {
1621 qcow2_free_clusters(bs, offset, s->cluster_size,
1622 QCOW2_DISCARD_ALWAYS);
1627 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
1630 cluster_index = offset >> s->cluster_bits;
1632 if (cluster_index >= *nb_clusters) {
1633 uint64_t old_bitmap_size = (*nb_clusters + 7) / 8;
1634 uint64_t new_bitmap_size;
1635 /* The offset may lie beyond the old end of the underlying image
1636 * file for growable files only */
1637 assert(bs->file->growable);
1638 *nb_clusters = size_to_clusters(s, bs->file->total_sectors *
1640 new_bitmap_size = (*nb_clusters + 7) / 8;
1641 *expanded_clusters = g_realloc(*expanded_clusters,
1643 /* clear the newly allocated space */
1644 memset(&(*expanded_clusters)[old_bitmap_size], 0,
1645 new_bitmap_size - old_bitmap_size);
1648 assert((cluster_index >= 0) && (cluster_index < *nb_clusters));
1649 (*expanded_clusters)[cluster_index / 8] |= 1 << (cluster_index % 8);
1654 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
1655 qcow2_cache_depends_on_flush(s->l2_table_cache);
1657 ret = qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
1664 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT &
1665 ~(QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2), l2_offset,
1671 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE,
1672 (void *)l2_table, s->cluster_sectors);
1684 if (!is_active_l1) {
1685 qemu_vfree(l2_table);
1688 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
1690 ret = qcow2_cache_put(bs, s->l2_table_cache,
1691 (void **)&l2_table);
1699 * For backed images, expands all zero clusters on the image. For non-backed
1700 * images, deallocates all non-pre-allocated zero clusters (and claims the
1701 * allocation for pre-allocated ones). This is important for downgrading to a
1702 * qcow2 version which doesn't yet support metadata zero clusters.
1704 int qcow2_expand_zero_clusters(BlockDriverState *bs)
1706 BDRVQcowState *s = bs->opaque;
1707 uint64_t *l1_table = NULL;
1708 uint64_t nb_clusters;
1709 uint8_t *expanded_clusters;
1713 nb_clusters = size_to_clusters(s, bs->file->total_sectors *
1715 expanded_clusters = g_malloc0((nb_clusters + 7) / 8);
1717 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
1718 &expanded_clusters, &nb_clusters);
1723 /* Inactive L1 tables may point to active L2 tables - therefore it is
1724 * necessary to flush the L2 table cache before trying to access the L2
1725 * tables pointed to by inactive L1 entries (else we might try to expand
1726 * zero clusters that have already been expanded); furthermore, it is also
1727 * necessary to empty the L2 table cache, since it may contain tables which
1728 * are now going to be modified directly on disk, bypassing the cache.
1729 * qcow2_cache_empty() does both for us. */
1730 ret = qcow2_cache_empty(bs, s->l2_table_cache);
1735 for (i = 0; i < s->nb_snapshots; i++) {
1736 int l1_sectors = (s->snapshots[i].l1_size * sizeof(uint64_t) +
1737 BDRV_SECTOR_SIZE - 1) / BDRV_SECTOR_SIZE;
1739 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE);
1741 ret = bdrv_read(bs->file, s->snapshots[i].l1_table_offset /
1742 BDRV_SECTOR_SIZE, (void *)l1_table, l1_sectors);
1747 for (j = 0; j < s->snapshots[i].l1_size; j++) {
1748 be64_to_cpus(&l1_table[j]);
1751 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
1752 &expanded_clusters, &nb_clusters);
1761 g_free(expanded_clusters);