]> Git Repo - qemu.git/blame - block/qcow2-cluster.c
qcow2: add shrink image support
[qemu.git] / block / qcow2-cluster.c
CommitLineData
45aba42f
KW
1/*
2 * Block driver for the QCOW version 2 format
3 *
4 * Copyright (c) 2004-2006 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
80c71a24 25#include "qemu/osdep.h"
45aba42f
KW
26#include <zlib.h>
27
da34e65c 28#include "qapi/error.h"
45aba42f 29#include "qemu-common.h"
737e150e 30#include "block/block_int.h"
45aba42f 31#include "block/qcow2.h"
58369e22 32#include "qemu/bswap.h"
3cce16f4 33#include "trace.h"
45aba42f 34
46b732cd
PB
35int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size)
36{
37 BDRVQcow2State *s = bs->opaque;
38 int new_l1_size, i, ret;
39
40 if (exact_size >= s->l1_size) {
41 return 0;
42 }
43
44 new_l1_size = exact_size;
45
46#ifdef DEBUG_ALLOC2
47 fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size);
48#endif
49
50 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE);
51 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset +
52 new_l1_size * sizeof(uint64_t),
53 (s->l1_size - new_l1_size) * sizeof(uint64_t), 0);
54 if (ret < 0) {
55 goto fail;
56 }
57
58 ret = bdrv_flush(bs->file->bs);
59 if (ret < 0) {
60 goto fail;
61 }
62
63 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS);
64 for (i = s->l1_size - 1; i > new_l1_size - 1; i--) {
65 if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) {
66 continue;
67 }
68 qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK,
69 s->cluster_size, QCOW2_DISCARD_ALWAYS);
70 s->l1_table[i] = 0;
71 }
72 return 0;
73
74fail:
75 /*
76 * If the write in the l1_table failed the image may contain a partially
77 * overwritten l1_table. In this case it would be better to clear the
78 * l1_table in memory to avoid possible image corruption.
79 */
80 memset(s->l1_table + new_l1_size, 0,
81 (s->l1_size - new_l1_size) * sizeof(uint64_t));
82 return ret;
83}
84
2cf7cfa1
KW
85int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
86 bool exact_size)
45aba42f 87{
ff99129a 88 BDRVQcow2State *s = bs->opaque;
2cf7cfa1 89 int new_l1_size2, ret, i;
45aba42f 90 uint64_t *new_l1_table;
fda74f82 91 int64_t old_l1_table_offset, old_l1_size;
2cf7cfa1 92 int64_t new_l1_table_offset, new_l1_size;
45aba42f
KW
93 uint8_t data[12];
94
72893756 95 if (min_size <= s->l1_size)
45aba42f 96 return 0;
72893756 97
b93f9950
HR
98 /* Do a sanity check on min_size before trying to calculate new_l1_size
99 * (this prevents overflows during the while loop for the calculation of
100 * new_l1_size) */
101 if (min_size > INT_MAX / sizeof(uint64_t)) {
102 return -EFBIG;
103 }
104
72893756
SH
105 if (exact_size) {
106 new_l1_size = min_size;
107 } else {
108 /* Bump size up to reduce the number of times we have to grow */
109 new_l1_size = s->l1_size;
110 if (new_l1_size == 0) {
111 new_l1_size = 1;
112 }
113 while (min_size > new_l1_size) {
21cf3e12 114 new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2);
72893756 115 }
45aba42f 116 }
72893756 117
84c26520
HR
118 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX);
119 if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) {
2cf7cfa1
KW
120 return -EFBIG;
121 }
122
45aba42f 123#ifdef DEBUG_ALLOC2
2cf7cfa1
KW
124 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n",
125 s->l1_size, new_l1_size);
45aba42f
KW
126#endif
127
128 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
9a4f4c31 129 new_l1_table = qemu_try_blockalign(bs->file->bs,
de82815d
KW
130 align_offset(new_l1_size2, 512));
131 if (new_l1_table == NULL) {
132 return -ENOMEM;
133 }
134 memset(new_l1_table, 0, align_offset(new_l1_size2, 512));
135
0647d47c
SH
136 if (s->l1_size) {
137 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
138 }
45aba42f
KW
139
140 /* write new table (align to cluster) */
66f82cee 141 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
ed6ccf0f 142 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2);
5d757b56 143 if (new_l1_table_offset < 0) {
de82815d 144 qemu_vfree(new_l1_table);
5d757b56
KW
145 return new_l1_table_offset;
146 }
29c1a730
KW
147
148 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
149 if (ret < 0) {
80fa3341 150 goto fail;
29c1a730 151 }
45aba42f 152
cf93980e
HR
153 /* the L1 position has not yet been updated, so these clusters must
154 * indeed be completely free */
231bb267
HR
155 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset,
156 new_l1_size2);
cf93980e
HR
157 if (ret < 0) {
158 goto fail;
159 }
160
66f82cee 161 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
45aba42f
KW
162 for(i = 0; i < s->l1_size; i++)
163 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
d9ca2ea2 164 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset,
9a4f4c31 165 new_l1_table, new_l1_size2);
8b3b7206 166 if (ret < 0)
45aba42f
KW
167 goto fail;
168 for(i = 0; i < s->l1_size; i++)
169 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
170
171 /* set new table */
66f82cee 172 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
f1f7a1dd 173 stl_be_p(data, new_l1_size);
e4ef9f46 174 stq_be_p(data + 4, new_l1_table_offset);
d9ca2ea2 175 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size),
9a4f4c31 176 data, sizeof(data));
8b3b7206 177 if (ret < 0) {
45aba42f 178 goto fail;
fb8fa77c 179 }
de82815d 180 qemu_vfree(s->l1_table);
fda74f82 181 old_l1_table_offset = s->l1_table_offset;
45aba42f
KW
182 s->l1_table_offset = new_l1_table_offset;
183 s->l1_table = new_l1_table;
fda74f82 184 old_l1_size = s->l1_size;
45aba42f 185 s->l1_size = new_l1_size;
fda74f82
HR
186 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t),
187 QCOW2_DISCARD_OTHER);
45aba42f
KW
188 return 0;
189 fail:
de82815d 190 qemu_vfree(new_l1_table);
6cfcb9b8
KW
191 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
192 QCOW2_DISCARD_OTHER);
8b3b7206 193 return ret;
45aba42f
KW
194}
195
45aba42f
KW
196/*
197 * l2_load
198 *
199 * Loads a L2 table into memory. If the table is in the cache, the cache
200 * is used; otherwise the L2 table is loaded from the image file.
201 *
202 * Returns a pointer to the L2 table on success, or NULL if the read from
203 * the image file failed.
204 */
205
55c17e98
KW
206static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
207 uint64_t **l2_table)
45aba42f 208{
ff99129a 209 BDRVQcow2State *s = bs->opaque;
45aba42f 210
9be38598
EH
211 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
212 (void **)l2_table);
45aba42f
KW
213}
214
6583e3c7
KW
215/*
216 * Writes one sector of the L1 table to the disk (can't update single entries
217 * and we really don't want bdrv_pread to perform a read-modify-write)
218 */
219#define L1_ENTRIES_PER_SECTOR (512 / 8)
e23e400e 220int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
6583e3c7 221{
ff99129a 222 BDRVQcow2State *s = bs->opaque;
a1391444 223 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 };
6583e3c7 224 int l1_start_index;
f7defcb6 225 int i, ret;
6583e3c7
KW
226
227 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
a1391444
HR
228 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size;
229 i++)
230 {
6583e3c7
KW
231 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
232 }
233
231bb267 234 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
cf93980e
HR
235 s->l1_table_offset + 8 * l1_start_index, sizeof(buf));
236 if (ret < 0) {
237 return ret;
238 }
239
66f82cee 240 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
d9ca2ea2 241 ret = bdrv_pwrite_sync(bs->file,
9a4f4c31
KW
242 s->l1_table_offset + 8 * l1_start_index,
243 buf, sizeof(buf));
f7defcb6
KW
244 if (ret < 0) {
245 return ret;
6583e3c7
KW
246 }
247
248 return 0;
249}
250
45aba42f
KW
251/*
252 * l2_allocate
253 *
254 * Allocate a new l2 entry in the file. If l1_index points to an already
255 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
256 * table) copy the contents of the old L2 table into the newly allocated one.
257 * Otherwise the new table is initialized with zeros.
258 *
259 */
260
c46e1167 261static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table)
45aba42f 262{
ff99129a 263 BDRVQcow2State *s = bs->opaque;
6583e3c7 264 uint64_t old_l2_offset;
8585afd8 265 uint64_t *l2_table = NULL;
f4f0d391 266 int64_t l2_offset;
c46e1167 267 int ret;
45aba42f
KW
268
269 old_l2_offset = s->l1_table[l1_index];
270
3cce16f4
KW
271 trace_qcow2_l2_allocate(bs, l1_index);
272
45aba42f
KW
273 /* allocate a new l2 entry */
274
ed6ccf0f 275 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
5d757b56 276 if (l2_offset < 0) {
be0b742e
HR
277 ret = l2_offset;
278 goto fail;
5d757b56 279 }
29c1a730
KW
280
281 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
282 if (ret < 0) {
283 goto fail;
284 }
45aba42f 285
45aba42f
KW
286 /* allocate a new entry in the l2 cache */
287
3cce16f4 288 trace_qcow2_l2_allocate_get_empty(bs, l1_index);
29c1a730
KW
289 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table);
290 if (ret < 0) {
be0b742e 291 goto fail;
29c1a730
KW
292 }
293
294 l2_table = *table;
45aba42f 295
8e37f681 296 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) {
45aba42f
KW
297 /* if there was no old l2 table, clear the new table */
298 memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
299 } else {
29c1a730
KW
300 uint64_t* old_table;
301
45aba42f 302 /* if there was an old l2 table, read it from the disk */
66f82cee 303 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ);
8e37f681
KW
304 ret = qcow2_cache_get(bs, s->l2_table_cache,
305 old_l2_offset & L1E_OFFSET_MASK,
29c1a730
KW
306 (void**) &old_table);
307 if (ret < 0) {
308 goto fail;
309 }
310
311 memcpy(l2_table, old_table, s->cluster_size);
312
a3f1afb4 313 qcow2_cache_put(bs, s->l2_table_cache, (void **) &old_table);
45aba42f 314 }
29c1a730 315
45aba42f 316 /* write the l2 table to the file */
66f82cee 317 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE);
29c1a730 318
3cce16f4 319 trace_qcow2_l2_allocate_write_l2(bs, l1_index);
72e80b89 320 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
29c1a730 321 ret = qcow2_cache_flush(bs, s->l2_table_cache);
c46e1167 322 if (ret < 0) {
175e1152
KW
323 goto fail;
324 }
325
326 /* update the L1 entry */
3cce16f4 327 trace_qcow2_l2_allocate_write_l1(bs, l1_index);
175e1152 328 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
e23e400e 329 ret = qcow2_write_l1_entry(bs, l1_index);
175e1152
KW
330 if (ret < 0) {
331 goto fail;
c46e1167 332 }
45aba42f 333
c46e1167 334 *table = l2_table;
3cce16f4 335 trace_qcow2_l2_allocate_done(bs, l1_index, 0);
c46e1167 336 return 0;
175e1152
KW
337
338fail:
3cce16f4 339 trace_qcow2_l2_allocate_done(bs, l1_index, ret);
8585afd8
HR
340 if (l2_table != NULL) {
341 qcow2_cache_put(bs, s->l2_table_cache, (void**) table);
342 }
68dba0bf 343 s->l1_table[l1_index] = old_l2_offset;
e3b21ef9
HR
344 if (l2_offset > 0) {
345 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
346 QCOW2_DISCARD_ALWAYS);
347 }
175e1152 348 return ret;
45aba42f
KW
349}
350
2bfcc4a0
KW
351/*
352 * Checks how many clusters in a given L2 table are contiguous in the image
353 * file. As soon as one of the flags in the bitmask stop_flags changes compared
354 * to the first cluster, the search is stopped and the cluster is not counted
355 * as contiguous. (This allows it, for example, to stop at the first compressed
356 * cluster which may require a different handling)
357 */
b6d36def 358static int count_contiguous_clusters(int nb_clusters, int cluster_size,
61653008 359 uint64_t *l2_table, uint64_t stop_flags)
45aba42f
KW
360{
361 int i;
3ef95218 362 QCow2ClusterType first_cluster_type;
78a52ad5 363 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED;
15684a47
HR
364 uint64_t first_entry = be64_to_cpu(l2_table[0]);
365 uint64_t offset = first_entry & mask;
45aba42f 366
564a6b69 367 if (!offset) {
45aba42f 368 return 0;
564a6b69 369 }
45aba42f 370
564a6b69
HR
371 /* must be allocated */
372 first_cluster_type = qcow2_get_cluster_type(first_entry);
373 assert(first_cluster_type == QCOW2_CLUSTER_NORMAL ||
fdfab37d 374 first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC);
15684a47 375
61653008 376 for (i = 0; i < nb_clusters; i++) {
2bfcc4a0
KW
377 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask;
378 if (offset + (uint64_t) i * cluster_size != l2_entry) {
45aba42f 379 break;
2bfcc4a0
KW
380 }
381 }
45aba42f 382
61653008 383 return i;
45aba42f
KW
384}
385
4341df8a
EB
386/*
387 * Checks how many consecutive unallocated clusters in a given L2
388 * table have the same cluster type.
389 */
390static int count_contiguous_clusters_unallocated(int nb_clusters,
391 uint64_t *l2_table,
3ef95218 392 QCow2ClusterType wanted_type)
45aba42f 393{
2bfcc4a0
KW
394 int i;
395
fdfab37d 396 assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN ||
4341df8a 397 wanted_type == QCOW2_CLUSTER_UNALLOCATED);
2bfcc4a0 398 for (i = 0; i < nb_clusters; i++) {
4341df8a 399 uint64_t entry = be64_to_cpu(l2_table[i]);
3ef95218 400 QCow2ClusterType type = qcow2_get_cluster_type(entry);
45aba42f 401
fdfab37d 402 if (type != wanted_type) {
2bfcc4a0
KW
403 break;
404 }
405 }
45aba42f
KW
406
407 return i;
408}
409
672f0f2c
AG
410static int coroutine_fn do_perform_cow_read(BlockDriverState *bs,
411 uint64_t src_cluster_offset,
412 unsigned offset_in_cluster,
86b862c4 413 QEMUIOVector *qiov)
45aba42f 414{
aaa4d20b 415 int ret;
1b9f1491 416
86b862c4 417 if (qiov->size == 0) {
99450c6f
AG
418 return 0;
419 }
420
66f82cee 421 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
aef4acb6 422
dba28555 423 if (!bs->drv) {
672f0f2c 424 return -ENOMEDIUM;
dba28555
HR
425 }
426
aef4acb6
SH
427 /* Call .bdrv_co_readv() directly instead of using the public block-layer
428 * interface. This avoids double I/O throttling and request tracking,
429 * which can lead to deadlock when block layer copy-on-read is enabled.
430 */
aaa4d20b 431 ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster,
86b862c4 432 qiov->size, qiov, 0);
1b9f1491 433 if (ret < 0) {
672f0f2c 434 return ret;
1b9f1491
KW
435 }
436
672f0f2c
AG
437 return 0;
438}
439
440static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs,
441 uint64_t src_cluster_offset,
4652b8f3 442 uint64_t cluster_offset,
672f0f2c
AG
443 unsigned offset_in_cluster,
444 uint8_t *buffer,
445 unsigned bytes)
446{
447 if (bytes && bs->encrypted) {
448 BDRVQcow2State *s = bs->opaque;
4652b8f3
DB
449 int64_t sector = (s->crypt_physical_offset ?
450 (cluster_offset + offset_in_cluster) :
451 (src_cluster_offset + offset_in_cluster))
aaa4d20b 452 >> BDRV_SECTOR_BITS;
aaa4d20b
KW
453 assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0);
454 assert((bytes & ~BDRV_SECTOR_MASK) == 0);
b25b387f
DB
455 assert(s->crypto);
456 if (qcrypto_block_encrypt(s->crypto, sector, buffer,
457 bytes, NULL) < 0) {
672f0f2c 458 return false;
f6fa64f6 459 }
45aba42f 460 }
672f0f2c
AG
461 return true;
462}
463
464static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
465 uint64_t cluster_offset,
466 unsigned offset_in_cluster,
86b862c4 467 QEMUIOVector *qiov)
672f0f2c 468{
672f0f2c
AG
469 int ret;
470
86b862c4 471 if (qiov->size == 0) {
672f0f2c
AG
472 return 0;
473 }
474
231bb267 475 ret = qcow2_pre_write_overlap_check(bs, 0,
86b862c4 476 cluster_offset + offset_in_cluster, qiov->size);
cf93980e 477 if (ret < 0) {
672f0f2c 478 return ret;
cf93980e
HR
479 }
480
66f82cee 481 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
a03ef88f 482 ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster,
86b862c4 483 qiov->size, qiov, 0);
1b9f1491 484 if (ret < 0) {
672f0f2c 485 return ret;
1b9f1491
KW
486 }
487
672f0f2c 488 return 0;
45aba42f
KW
489}
490
491
492/*
493 * get_cluster_offset
494 *
ecfe1863
KW
495 * For a given offset of the virtual disk, find the cluster type and offset in
496 * the qcow2 file. The offset is stored in *cluster_offset.
45aba42f 497 *
ecfe1863
KW
498 * On entry, *bytes is the maximum number of contiguous bytes starting at
499 * offset that we are interested in.
45aba42f 500 *
ecfe1863
KW
501 * On exit, *bytes is the number of bytes starting at offset that have the same
502 * cluster type and (if applicable) are stored contiguously in the image file.
503 * Compressed clusters are always returned one by one.
45aba42f 504 *
68d000a3
KW
505 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error
506 * cases.
45aba42f 507 */
1c46efaa 508int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
ecfe1863 509 unsigned int *bytes, uint64_t *cluster_offset)
45aba42f 510{
ff99129a 511 BDRVQcow2State *s = bs->opaque;
2cf7cfa1
KW
512 unsigned int l2_index;
513 uint64_t l1_index, l2_offset, *l2_table;
45aba42f 514 int l1_bits, c;
c834cba9
HR
515 unsigned int offset_in_cluster;
516 uint64_t bytes_available, bytes_needed, nb_clusters;
3ef95218 517 QCow2ClusterType type;
55c17e98 518 int ret;
45aba42f 519
b2f65d6b 520 offset_in_cluster = offset_into_cluster(s, offset);
ecfe1863 521 bytes_needed = (uint64_t) *bytes + offset_in_cluster;
45aba42f 522
b2f65d6b 523 l1_bits = s->l2_bits + s->cluster_bits;
45aba42f 524
b2f65d6b
KW
525 /* compute how many bytes there are between the start of the cluster
526 * containing offset and the end of the l1 entry */
527 bytes_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1))
528 + offset_in_cluster;
45aba42f 529
b2f65d6b
KW
530 if (bytes_needed > bytes_available) {
531 bytes_needed = bytes_available;
45aba42f
KW
532 }
533
1c46efaa 534 *cluster_offset = 0;
45aba42f 535
b6af0975 536 /* seek to the l2 offset in the l1 table */
45aba42f
KW
537
538 l1_index = offset >> l1_bits;
68d000a3 539 if (l1_index >= s->l1_size) {
3ef95218 540 type = QCOW2_CLUSTER_UNALLOCATED;
45aba42f 541 goto out;
68d000a3 542 }
45aba42f 543
68d000a3
KW
544 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
545 if (!l2_offset) {
3ef95218 546 type = QCOW2_CLUSTER_UNALLOCATED;
45aba42f 547 goto out;
68d000a3 548 }
45aba42f 549
a97c67ee
HR
550 if (offset_into_cluster(s, l2_offset)) {
551 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
552 " unaligned (L1 index: %#" PRIx64 ")",
553 l2_offset, l1_index);
554 return -EIO;
555 }
556
45aba42f
KW
557 /* load the l2 table in memory */
558
55c17e98
KW
559 ret = l2_load(bs, l2_offset, &l2_table);
560 if (ret < 0) {
561 return ret;
1c46efaa 562 }
45aba42f
KW
563
564 /* find the cluster offset for the given disk offset */
565
24990c5b 566 l2_index = offset_to_l2_index(s, offset);
1c46efaa 567 *cluster_offset = be64_to_cpu(l2_table[l2_index]);
b6d36def 568
b2f65d6b 569 nb_clusters = size_to_clusters(s, bytes_needed);
c834cba9
HR
570 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned
571 * integers; the minimum cluster size is 512, so this assertion is always
572 * true */
573 assert(nb_clusters <= INT_MAX);
45aba42f 574
3ef95218 575 type = qcow2_get_cluster_type(*cluster_offset);
fdfab37d
EB
576 if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN ||
577 type == QCOW2_CLUSTER_ZERO_ALLOC)) {
578 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found"
579 " in pre-v3 image (L2 offset: %#" PRIx64
580 ", L2 index: %#x)", l2_offset, l2_index);
581 ret = -EIO;
582 goto fail;
583 }
3ef95218 584 switch (type) {
68d000a3
KW
585 case QCOW2_CLUSTER_COMPRESSED:
586 /* Compressed clusters can only be processed one by one */
587 c = 1;
588 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK;
589 break;
fdfab37d 590 case QCOW2_CLUSTER_ZERO_PLAIN:
68d000a3 591 case QCOW2_CLUSTER_UNALLOCATED:
45aba42f 592 /* how many empty clusters ? */
4341df8a 593 c = count_contiguous_clusters_unallocated(nb_clusters,
fdfab37d 594 &l2_table[l2_index], type);
68d000a3
KW
595 *cluster_offset = 0;
596 break;
fdfab37d 597 case QCOW2_CLUSTER_ZERO_ALLOC:
68d000a3 598 case QCOW2_CLUSTER_NORMAL:
45aba42f
KW
599 /* how many allocated clusters ? */
600 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
fdfab37d 601 &l2_table[l2_index], QCOW_OFLAG_ZERO);
68d000a3 602 *cluster_offset &= L2E_OFFSET_MASK;
a97c67ee 603 if (offset_into_cluster(s, *cluster_offset)) {
fdfab37d
EB
604 qcow2_signal_corruption(bs, true, -1, -1,
605 "Cluster allocation offset %#"
a97c67ee
HR
606 PRIx64 " unaligned (L2 offset: %#" PRIx64
607 ", L2 index: %#x)", *cluster_offset,
608 l2_offset, l2_index);
609 ret = -EIO;
610 goto fail;
611 }
68d000a3 612 break;
1417d7e4
KW
613 default:
614 abort();
45aba42f
KW
615 }
616
29c1a730
KW
617 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
618
c834cba9 619 bytes_available = (int64_t)c * s->cluster_size;
68d000a3 620
45aba42f 621out:
b2f65d6b
KW
622 if (bytes_available > bytes_needed) {
623 bytes_available = bytes_needed;
624 }
45aba42f 625
c834cba9
HR
626 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster;
627 * subtracting offset_in_cluster will therefore definitely yield something
628 * not exceeding UINT_MAX */
629 assert(bytes_available - offset_in_cluster <= UINT_MAX);
ecfe1863 630 *bytes = bytes_available - offset_in_cluster;
45aba42f 631
3ef95218 632 return type;
a97c67ee
HR
633
634fail:
635 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
636 return ret;
45aba42f
KW
637}
638
639/*
640 * get_cluster_table
641 *
642 * for a given disk offset, load (and allocate if needed)
643 * the l2 table.
644 *
645 * the l2 table offset in the qcow2 file and the cluster index
646 * in the l2 table are given to the caller.
647 *
1e3e8f1a 648 * Returns 0 on success, -errno in failure case
45aba42f 649 */
45aba42f
KW
650static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
651 uint64_t **new_l2_table,
45aba42f
KW
652 int *new_l2_index)
653{
ff99129a 654 BDRVQcow2State *s = bs->opaque;
2cf7cfa1
KW
655 unsigned int l2_index;
656 uint64_t l1_index, l2_offset;
c46e1167 657 uint64_t *l2_table = NULL;
80ee15a6 658 int ret;
45aba42f 659
b6af0975 660 /* seek to the l2 offset in the l1 table */
45aba42f
KW
661
662 l1_index = offset >> (s->l2_bits + s->cluster_bits);
663 if (l1_index >= s->l1_size) {
72893756 664 ret = qcow2_grow_l1_table(bs, l1_index + 1, false);
1e3e8f1a
KW
665 if (ret < 0) {
666 return ret;
667 }
45aba42f 668 }
8e37f681 669
2cf7cfa1 670 assert(l1_index < s->l1_size);
8e37f681 671 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
a97c67ee
HR
672 if (offset_into_cluster(s, l2_offset)) {
673 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64
674 " unaligned (L1 index: %#" PRIx64 ")",
675 l2_offset, l1_index);
676 return -EIO;
677 }
45aba42f
KW
678
679 /* seek the l2 table of the given l2 offset */
680
8e37f681 681 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) {
45aba42f 682 /* load the l2 table in memory */
55c17e98
KW
683 ret = l2_load(bs, l2_offset, &l2_table);
684 if (ret < 0) {
685 return ret;
1e3e8f1a 686 }
45aba42f 687 } else {
16fde5f2 688 /* First allocate a new L2 table (and do COW if needed) */
c46e1167
KW
689 ret = l2_allocate(bs, l1_index, &l2_table);
690 if (ret < 0) {
691 return ret;
1e3e8f1a 692 }
16fde5f2
KW
693
694 /* Then decrease the refcount of the old table */
695 if (l2_offset) {
6cfcb9b8
KW
696 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
697 QCOW2_DISCARD_OTHER);
16fde5f2 698 }
45aba42f
KW
699 }
700
701 /* find the cluster offset for the given disk offset */
702
24990c5b 703 l2_index = offset_to_l2_index(s, offset);
45aba42f
KW
704
705 *new_l2_table = l2_table;
45aba42f
KW
706 *new_l2_index = l2_index;
707
1e3e8f1a 708 return 0;
45aba42f
KW
709}
710
711/*
712 * alloc_compressed_cluster_offset
713 *
714 * For a given offset of the disk image, return cluster offset in
715 * qcow2 file.
716 *
717 * If the offset is not found, allocate a new compressed cluster.
718 *
719 * Return the cluster offset if successful,
720 * Return 0, otherwise.
721 *
722 */
723
ed6ccf0f
KW
724uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
725 uint64_t offset,
726 int compressed_size)
45aba42f 727{
ff99129a 728 BDRVQcow2State *s = bs->opaque;
45aba42f 729 int l2_index, ret;
3948d1d4 730 uint64_t *l2_table;
f4f0d391 731 int64_t cluster_offset;
45aba42f
KW
732 int nb_csectors;
733
3948d1d4 734 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1e3e8f1a 735 if (ret < 0) {
45aba42f 736 return 0;
1e3e8f1a 737 }
45aba42f 738
b0b6862e
KW
739 /* Compression can't overwrite anything. Fail if the cluster was already
740 * allocated. */
45aba42f 741 cluster_offset = be64_to_cpu(l2_table[l2_index]);
b0b6862e 742 if (cluster_offset & L2E_OFFSET_MASK) {
8f1efd00
KW
743 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
744 return 0;
745 }
45aba42f 746
ed6ccf0f 747 cluster_offset = qcow2_alloc_bytes(bs, compressed_size);
5d757b56 748 if (cluster_offset < 0) {
29c1a730 749 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
5d757b56
KW
750 return 0;
751 }
752
45aba42f
KW
753 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
754 (cluster_offset >> 9);
755
756 cluster_offset |= QCOW_OFLAG_COMPRESSED |
757 ((uint64_t)nb_csectors << s->csize_shift);
758
759 /* update L2 table */
760
761 /* compressed clusters never have the copied flag */
762
66f82cee 763 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
72e80b89 764 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
45aba42f 765 l2_table[l2_index] = cpu_to_be64(cluster_offset);
a3f1afb4 766 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
4c1612d9 767
29c1a730 768 return cluster_offset;
4c1612d9
KW
769}
770
99450c6f 771static int perform_cow(BlockDriverState *bs, QCowL2Meta *m)
593fb83c 772{
ff99129a 773 BDRVQcow2State *s = bs->opaque;
99450c6f
AG
774 Qcow2COWRegion *start = &m->cow_start;
775 Qcow2COWRegion *end = &m->cow_end;
672f0f2c 776 unsigned buffer_size;
b3cf1c7c
AG
777 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes);
778 bool merge_reads;
672f0f2c 779 uint8_t *start_buffer, *end_buffer;
86b862c4 780 QEMUIOVector qiov;
593fb83c
KW
781 int ret;
782
672f0f2c 783 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes);
b3cf1c7c
AG
784 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes);
785 assert(start->offset + start->nb_bytes <= end->offset);
ee22a9d8 786 assert(!m->data_qiov || m->data_qiov->size == data_bytes);
672f0f2c 787
99450c6f 788 if (start->nb_bytes == 0 && end->nb_bytes == 0) {
593fb83c
KW
789 return 0;
790 }
791
b3cf1c7c
AG
792 /* If we have to read both the start and end COW regions and the
793 * middle region is not too large then perform just one read
794 * operation */
795 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384;
796 if (merge_reads) {
797 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes;
798 } else {
799 /* If we have to do two reads, add some padding in the middle
800 * if necessary to make sure that the end region is optimally
801 * aligned. */
802 size_t align = bdrv_opt_mem_align(bs);
803 assert(align > 0 && align <= UINT_MAX);
804 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <=
805 UINT_MAX - end->nb_bytes);
806 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes;
807 }
808
809 /* Reserve a buffer large enough to store all the data that we're
810 * going to read */
672f0f2c
AG
811 start_buffer = qemu_try_blockalign(bs, buffer_size);
812 if (start_buffer == NULL) {
813 return -ENOMEM;
814 }
815 /* The part of the buffer where the end region is located */
816 end_buffer = start_buffer + buffer_size - end->nb_bytes;
817
ee22a9d8 818 qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0));
86b862c4 819
593fb83c 820 qemu_co_mutex_unlock(&s->lock);
b3cf1c7c
AG
821 /* First we read the existing data from both COW regions. We
822 * either read the whole region in one go, or the start and end
823 * regions separately. */
824 if (merge_reads) {
86b862c4
AG
825 qemu_iovec_add(&qiov, start_buffer, buffer_size);
826 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
b3cf1c7c 827 } else {
86b862c4
AG
828 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
829 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov);
b3cf1c7c
AG
830 if (ret < 0) {
831 goto fail;
832 }
672f0f2c 833
86b862c4
AG
834 qemu_iovec_reset(&qiov);
835 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
836 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov);
b3cf1c7c 837 }
593fb83c 838 if (ret < 0) {
99450c6f 839 goto fail;
593fb83c
KW
840 }
841
672f0f2c
AG
842 /* Encrypt the data if necessary before writing it */
843 if (bs->encrypted) {
4652b8f3
DB
844 if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
845 start->offset, start_buffer,
846 start->nb_bytes) ||
847 !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
848 end->offset, end_buffer, end->nb_bytes)) {
672f0f2c
AG
849 ret = -EIO;
850 goto fail;
851 }
852 }
853
ee22a9d8
AG
854 /* And now we can write everything. If we have the guest data we
855 * can write everything in one single operation */
856 if (m->data_qiov) {
857 qemu_iovec_reset(&qiov);
858 if (start->nb_bytes) {
859 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
860 }
861 qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes);
862 if (end->nb_bytes) {
863 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
864 }
865 /* NOTE: we have a write_aio blkdebug event here followed by
866 * a cow_write one in do_perform_cow_write(), but there's only
867 * one single I/O operation */
868 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
869 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
870 } else {
871 /* If there's no guest data then write both COW regions separately */
872 qemu_iovec_reset(&qiov);
873 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes);
874 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov);
875 if (ret < 0) {
876 goto fail;
877 }
878
879 qemu_iovec_reset(&qiov);
880 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes);
881 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov);
672f0f2c 882 }
99450c6f
AG
883
884fail:
885 qemu_co_mutex_lock(&s->lock);
886
593fb83c
KW
887 /*
888 * Before we update the L2 table to actually point to the new cluster, we
889 * need to be sure that the refcounts have been increased and COW was
890 * handled.
891 */
99450c6f
AG
892 if (ret == 0) {
893 qcow2_cache_depends_on_flush(s->l2_table_cache);
894 }
593fb83c 895
672f0f2c 896 qemu_vfree(start_buffer);
86b862c4 897 qemu_iovec_destroy(&qiov);
99450c6f 898 return ret;
593fb83c
KW
899}
900
148da7ea 901int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
45aba42f 902{
ff99129a 903 BDRVQcow2State *s = bs->opaque;
45aba42f 904 int i, j = 0, l2_index, ret;
593fb83c 905 uint64_t *old_cluster, *l2_table;
250196f1 906 uint64_t cluster_offset = m->alloc_offset;
45aba42f 907
3cce16f4 908 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters);
f50f88b9 909 assert(m->nb_clusters > 0);
45aba42f 910
5839e53b 911 old_cluster = g_try_new(uint64_t, m->nb_clusters);
de82815d
KW
912 if (old_cluster == NULL) {
913 ret = -ENOMEM;
914 goto err;
915 }
45aba42f
KW
916
917 /* copy content of unmodified sectors */
99450c6f 918 ret = perform_cow(bs, m);
593fb83c
KW
919 if (ret < 0) {
920 goto err;
29c1a730
KW
921 }
922
593fb83c 923 /* Update L2 table. */
74c4510a 924 if (s->use_lazy_refcounts) {
280d3735
KW
925 qcow2_mark_dirty(bs);
926 }
bfe8043e
SH
927 if (qcow2_need_accurate_refcounts(s)) {
928 qcow2_cache_set_dependency(bs, s->l2_table_cache,
929 s->refcount_block_cache);
930 }
280d3735 931
3948d1d4 932 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
1e3e8f1a 933 if (ret < 0) {
45aba42f 934 goto err;
1e3e8f1a 935 }
72e80b89 936 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
45aba42f 937
c01dbccb 938 assert(l2_index + m->nb_clusters <= s->l2_size);
45aba42f
KW
939 for (i = 0; i < m->nb_clusters; i++) {
940 /* if two concurrent writes happen to the same unallocated cluster
aaa4d20b
KW
941 * each write allocates separate cluster and writes data concurrently.
942 * The first one to complete updates l2 table with pointer to its
943 * cluster the second one has to do RMW (which is done above by
944 * perform_cow()), update l2 table with its cluster pointer and free
945 * old cluster. This is what this loop does */
946 if (l2_table[l2_index + i] != 0) {
45aba42f 947 old_cluster[j++] = l2_table[l2_index + i];
aaa4d20b 948 }
45aba42f
KW
949
950 l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
951 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
952 }
953
9f8e668e 954
a3f1afb4 955 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
45aba42f 956
7ec5e6a4
KW
957 /*
958 * If this was a COW, we need to decrease the refcount of the old cluster.
6cfcb9b8
KW
959 *
960 * Don't discard clusters that reach a refcount of 0 (e.g. compressed
961 * clusters), the next write will reuse them anyway.
7ec5e6a4 962 */
564a6b69 963 if (!m->keep_old_clusters && j != 0) {
7ec5e6a4 964 for (i = 0; i < j; i++) {
6cfcb9b8
KW
965 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
966 QCOW2_DISCARD_NEVER);
7ec5e6a4
KW
967 }
968 }
45aba42f
KW
969
970 ret = 0;
971err:
7267c094 972 g_free(old_cluster);
45aba42f
KW
973 return ret;
974 }
975
bf319ece
KW
976/*
977 * Returns the number of contiguous clusters that can be used for an allocating
978 * write, but require COW to be performed (this includes yet unallocated space,
979 * which must copy from the backing file)
980 */
ff99129a 981static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters,
bf319ece
KW
982 uint64_t *l2_table, int l2_index)
983{
143550a8 984 int i;
bf319ece 985
143550a8
KW
986 for (i = 0; i < nb_clusters; i++) {
987 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
3ef95218 988 QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry);
143550a8
KW
989
990 switch(cluster_type) {
991 case QCOW2_CLUSTER_NORMAL:
992 if (l2_entry & QCOW_OFLAG_COPIED) {
993 goto out;
994 }
bf319ece 995 break;
143550a8
KW
996 case QCOW2_CLUSTER_UNALLOCATED:
997 case QCOW2_CLUSTER_COMPRESSED:
fdfab37d
EB
998 case QCOW2_CLUSTER_ZERO_PLAIN:
999 case QCOW2_CLUSTER_ZERO_ALLOC:
bf319ece 1000 break;
143550a8
KW
1001 default:
1002 abort();
1003 }
bf319ece
KW
1004 }
1005
143550a8 1006out:
bf319ece
KW
1007 assert(i <= nb_clusters);
1008 return i;
1009}
1010
250196f1 1011/*
226c3c26
KW
1012 * Check if there already is an AIO write request in flight which allocates
1013 * the same cluster. In this case we need to wait until the previous
1014 * request has completed and updated the L2 table accordingly.
65eb2e35
KW
1015 *
1016 * Returns:
1017 * 0 if there was no dependency. *cur_bytes indicates the number of
1018 * bytes from guest_offset that can be read before the next
1019 * dependency must be processed (or the request is complete)
1020 *
1021 * -EAGAIN if we had to wait for another request, previously gathered
1022 * information on cluster allocation may be invalid now. The caller
1023 * must start over anyway, so consider *cur_bytes undefined.
250196f1 1024 */
226c3c26 1025static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
ecdd5333 1026 uint64_t *cur_bytes, QCowL2Meta **m)
250196f1 1027{
ff99129a 1028 BDRVQcow2State *s = bs->opaque;
250196f1 1029 QCowL2Meta *old_alloc;
65eb2e35 1030 uint64_t bytes = *cur_bytes;
250196f1 1031
250196f1
KW
1032 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
1033
65eb2e35
KW
1034 uint64_t start = guest_offset;
1035 uint64_t end = start + bytes;
1036 uint64_t old_start = l2meta_cow_start(old_alloc);
1037 uint64_t old_end = l2meta_cow_end(old_alloc);
250196f1 1038
d9d74f41 1039 if (end <= old_start || start >= old_end) {
250196f1
KW
1040 /* No intersection */
1041 } else {
1042 if (start < old_start) {
1043 /* Stop at the start of a running allocation */
65eb2e35 1044 bytes = old_start - start;
250196f1 1045 } else {
65eb2e35 1046 bytes = 0;
250196f1
KW
1047 }
1048
ecdd5333
KW
1049 /* Stop if already an l2meta exists. After yielding, it wouldn't
1050 * be valid any more, so we'd have to clean up the old L2Metas
1051 * and deal with requests depending on them before starting to
1052 * gather new ones. Not worth the trouble. */
1053 if (bytes == 0 && *m) {
1054 *cur_bytes = 0;
1055 return 0;
1056 }
1057
65eb2e35 1058 if (bytes == 0) {
250196f1
KW
1059 /* Wait for the dependency to complete. We need to recheck
1060 * the free/allocated clusters when we continue. */
1ace7cea 1061 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock);
250196f1
KW
1062 return -EAGAIN;
1063 }
1064 }
1065 }
1066
65eb2e35
KW
1067 /* Make sure that existing clusters and new allocations are only used up to
1068 * the next dependency if we shortened the request above */
1069 *cur_bytes = bytes;
250196f1 1070
226c3c26
KW
1071 return 0;
1072}
1073
0af729ec
KW
1074/*
1075 * Checks how many already allocated clusters that don't require a copy on
1076 * write there are at the given guest_offset (up to *bytes). If
1077 * *host_offset is not zero, only physically contiguous clusters beginning at
1078 * this host offset are counted.
1079 *
411d62b0
KW
1080 * Note that guest_offset may not be cluster aligned. In this case, the
1081 * returned *host_offset points to exact byte referenced by guest_offset and
1082 * therefore isn't cluster aligned as well.
0af729ec
KW
1083 *
1084 * Returns:
1085 * 0: if no allocated clusters are available at the given offset.
1086 * *bytes is normally unchanged. It is set to 0 if the cluster
1087 * is allocated and doesn't need COW, but doesn't have the right
1088 * physical offset.
1089 *
1090 * 1: if allocated clusters that don't require a COW are available at
1091 * the requested offset. *bytes may have decreased and describes
1092 * the length of the area that can be written to.
1093 *
1094 * -errno: in error cases
0af729ec
KW
1095 */
1096static int handle_copied(BlockDriverState *bs, uint64_t guest_offset,
c53ede9f 1097 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
0af729ec 1098{
ff99129a 1099 BDRVQcow2State *s = bs->opaque;
0af729ec
KW
1100 int l2_index;
1101 uint64_t cluster_offset;
1102 uint64_t *l2_table;
b6d36def 1103 uint64_t nb_clusters;
c53ede9f 1104 unsigned int keep_clusters;
a3f1afb4 1105 int ret;
0af729ec
KW
1106
1107 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset,
1108 *bytes);
0af729ec 1109
411d62b0
KW
1110 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset)
1111 == offset_into_cluster(s, *host_offset));
1112
acb0467f
KW
1113 /*
1114 * Calculate the number of clusters to look for. We stop at L2 table
1115 * boundaries to keep things simple.
1116 */
1117 nb_clusters =
1118 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1119
1120 l2_index = offset_to_l2_index(s, guest_offset);
1121 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
b6d36def 1122 assert(nb_clusters <= INT_MAX);
acb0467f 1123
0af729ec
KW
1124 /* Find L2 entry for the first involved cluster */
1125 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1126 if (ret < 0) {
1127 return ret;
1128 }
1129
1130 cluster_offset = be64_to_cpu(l2_table[l2_index]);
1131
1132 /* Check how many clusters are already allocated and don't need COW */
1133 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
1134 && (cluster_offset & QCOW_OFLAG_COPIED))
1135 {
e62daaf6
KW
1136 /* If a specific host_offset is required, check it */
1137 bool offset_matches =
1138 (cluster_offset & L2E_OFFSET_MASK) == *host_offset;
1139
a97c67ee
HR
1140 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) {
1141 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset "
1142 "%#llx unaligned (guest offset: %#" PRIx64
1143 ")", cluster_offset & L2E_OFFSET_MASK,
1144 guest_offset);
1145 ret = -EIO;
1146 goto out;
1147 }
1148
e62daaf6
KW
1149 if (*host_offset != 0 && !offset_matches) {
1150 *bytes = 0;
1151 ret = 0;
1152 goto out;
1153 }
1154
0af729ec 1155 /* We keep all QCOW_OFLAG_COPIED clusters */
c53ede9f 1156 keep_clusters =
acb0467f 1157 count_contiguous_clusters(nb_clusters, s->cluster_size,
61653008 1158 &l2_table[l2_index],
0af729ec 1159 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
c53ede9f
KW
1160 assert(keep_clusters <= nb_clusters);
1161
1162 *bytes = MIN(*bytes,
1163 keep_clusters * s->cluster_size
1164 - offset_into_cluster(s, guest_offset));
0af729ec
KW
1165
1166 ret = 1;
1167 } else {
0af729ec
KW
1168 ret = 0;
1169 }
1170
0af729ec 1171 /* Cleanup */
e62daaf6 1172out:
a3f1afb4 1173 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
0af729ec 1174
e62daaf6
KW
1175 /* Only return a host offset if we actually made progress. Otherwise we
1176 * would make requirements for handle_alloc() that it can't fulfill */
a97c67ee 1177 if (ret > 0) {
411d62b0
KW
1178 *host_offset = (cluster_offset & L2E_OFFSET_MASK)
1179 + offset_into_cluster(s, guest_offset);
e62daaf6
KW
1180 }
1181
0af729ec
KW
1182 return ret;
1183}
1184
226c3c26
KW
1185/*
1186 * Allocates new clusters for the given guest_offset.
1187 *
1188 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to
1189 * contain the number of clusters that have been allocated and are contiguous
1190 * in the image file.
1191 *
1192 * If *host_offset is non-zero, it specifies the offset in the image file at
1193 * which the new clusters must start. *nb_clusters can be 0 on return in this
1194 * case if the cluster at host_offset is already in use. If *host_offset is
1195 * zero, the clusters can be allocated anywhere in the image file.
1196 *
1197 * *host_offset is updated to contain the offset into the image file at which
1198 * the first allocated cluster starts.
1199 *
1200 * Return 0 on success and -errno in error cases. -EAGAIN means that the
1201 * function has been waiting for another request and the allocation must be
1202 * restarted, but the whole request should not be failed.
1203 */
1204static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
b6d36def 1205 uint64_t *host_offset, uint64_t *nb_clusters)
226c3c26 1206{
ff99129a 1207 BDRVQcow2State *s = bs->opaque;
226c3c26
KW
1208
1209 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset,
1210 *host_offset, *nb_clusters);
1211
250196f1
KW
1212 /* Allocate new clusters */
1213 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self());
1214 if (*host_offset == 0) {
df021791
KW
1215 int64_t cluster_offset =
1216 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size);
1217 if (cluster_offset < 0) {
1218 return cluster_offset;
1219 }
1220 *host_offset = cluster_offset;
1221 return 0;
250196f1 1222 } else {
b6d36def 1223 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters);
df021791
KW
1224 if (ret < 0) {
1225 return ret;
1226 }
1227 *nb_clusters = ret;
1228 return 0;
250196f1 1229 }
250196f1
KW
1230}
1231
10f0ed8b
KW
1232/*
1233 * Allocates new clusters for an area that either is yet unallocated or needs a
1234 * copy on write. If *host_offset is non-zero, clusters are only allocated if
1235 * the new allocation can match the specified host offset.
1236 *
411d62b0
KW
1237 * Note that guest_offset may not be cluster aligned. In this case, the
1238 * returned *host_offset points to exact byte referenced by guest_offset and
1239 * therefore isn't cluster aligned as well.
10f0ed8b
KW
1240 *
1241 * Returns:
1242 * 0: if no clusters could be allocated. *bytes is set to 0,
1243 * *host_offset is left unchanged.
1244 *
1245 * 1: if new clusters were allocated. *bytes may be decreased if the
1246 * new allocation doesn't cover all of the requested area.
1247 * *host_offset is updated to contain the host offset of the first
1248 * newly allocated cluster.
1249 *
1250 * -errno: in error cases
10f0ed8b
KW
1251 */
1252static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
c37f4cd7 1253 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
10f0ed8b 1254{
ff99129a 1255 BDRVQcow2State *s = bs->opaque;
10f0ed8b
KW
1256 int l2_index;
1257 uint64_t *l2_table;
1258 uint64_t entry;
b6d36def 1259 uint64_t nb_clusters;
10f0ed8b 1260 int ret;
564a6b69 1261 bool keep_old_clusters = false;
10f0ed8b 1262
564a6b69 1263 uint64_t alloc_cluster_offset = 0;
10f0ed8b
KW
1264
1265 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
1266 *bytes);
1267 assert(*bytes > 0);
1268
f5bc6350
KW
1269 /*
1270 * Calculate the number of clusters to look for. We stop at L2 table
1271 * boundaries to keep things simple.
1272 */
c37f4cd7
KW
1273 nb_clusters =
1274 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes);
1275
f5bc6350 1276 l2_index = offset_to_l2_index(s, guest_offset);
c37f4cd7 1277 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
b6d36def 1278 assert(nb_clusters <= INT_MAX);
f5bc6350 1279
10f0ed8b
KW
1280 /* Find L2 entry for the first involved cluster */
1281 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index);
1282 if (ret < 0) {
1283 return ret;
1284 }
1285
3b8e2e26 1286 entry = be64_to_cpu(l2_table[l2_index]);
10f0ed8b
KW
1287
1288 /* For the moment, overwrite compressed clusters one by one */
1289 if (entry & QCOW_OFLAG_COMPRESSED) {
1290 nb_clusters = 1;
1291 } else {
3b8e2e26 1292 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
10f0ed8b
KW
1293 }
1294
ecdd5333
KW
1295 /* This function is only called when there were no non-COW clusters, so if
1296 * we can't find any unallocated or COW clusters either, something is
1297 * wrong with our code. */
1298 assert(nb_clusters > 0);
1299
fdfab37d
EB
1300 if (qcow2_get_cluster_type(entry) == QCOW2_CLUSTER_ZERO_ALLOC &&
1301 (entry & QCOW_OFLAG_COPIED) &&
564a6b69
HR
1302 (!*host_offset ||
1303 start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK)))
1304 {
1305 /* Try to reuse preallocated zero clusters; contiguous normal clusters
1306 * would be fine, too, but count_cow_clusters() above has limited
1307 * nb_clusters already to a range of COW clusters */
1308 int preallocated_nb_clusters =
1309 count_contiguous_clusters(nb_clusters, s->cluster_size,
1310 &l2_table[l2_index], QCOW_OFLAG_COPIED);
1311 assert(preallocated_nb_clusters > 0);
10f0ed8b 1312
564a6b69
HR
1313 nb_clusters = preallocated_nb_clusters;
1314 alloc_cluster_offset = entry & L2E_OFFSET_MASK;
10f0ed8b 1315
564a6b69
HR
1316 /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2()
1317 * should not free them. */
1318 keep_old_clusters = true;
10f0ed8b
KW
1319 }
1320
564a6b69
HR
1321 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
1322
ff52aab2 1323 if (!alloc_cluster_offset) {
564a6b69
HR
1324 /* Allocate, if necessary at a given offset in the image file */
1325 alloc_cluster_offset = start_of_cluster(s, *host_offset);
1326 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
1327 &nb_clusters);
1328 if (ret < 0) {
1329 goto fail;
1330 }
1331
1332 /* Can't extend contiguous allocation */
1333 if (nb_clusters == 0) {
1334 *bytes = 0;
1335 return 0;
1336 }
1337
1338 /* !*host_offset would overwrite the image header and is reserved for
1339 * "no host offset preferred". If 0 was a valid host offset, it'd
1340 * trigger the following overlap check; do that now to avoid having an
1341 * invalid value in *host_offset. */
1342 if (!alloc_cluster_offset) {
1343 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset,
1344 nb_clusters * s->cluster_size);
1345 assert(ret < 0);
1346 goto fail;
1347 }
ff52aab2
HR
1348 }
1349
83baa9a4
KW
1350 /*
1351 * Save info needed for meta data update.
1352 *
85567393 1353 * requested_bytes: Number of bytes from the start of the first
83baa9a4
KW
1354 * newly allocated cluster to the end of the (possibly shortened
1355 * before) write request.
1356 *
85567393 1357 * avail_bytes: Number of bytes from the start of the first
83baa9a4
KW
1358 * newly allocated to the end of the last newly allocated cluster.
1359 *
85567393 1360 * nb_bytes: The number of bytes from the start of the first
83baa9a4
KW
1361 * newly allocated cluster to the end of the area that the write
1362 * request actually writes to (excluding COW at the end)
1363 */
85567393
KW
1364 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset);
1365 int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits);
1366 int nb_bytes = MIN(requested_bytes, avail_bytes);
88c6588c 1367 QCowL2Meta *old_m = *m;
83baa9a4 1368
83baa9a4
KW
1369 *m = g_malloc0(sizeof(**m));
1370
1371 **m = (QCowL2Meta) {
88c6588c
KW
1372 .next = old_m,
1373
411d62b0 1374 .alloc_offset = alloc_cluster_offset,
83baa9a4
KW
1375 .offset = start_of_cluster(s, guest_offset),
1376 .nb_clusters = nb_clusters,
83baa9a4 1377
564a6b69
HR
1378 .keep_old_clusters = keep_old_clusters,
1379
83baa9a4
KW
1380 .cow_start = {
1381 .offset = 0,
85567393 1382 .nb_bytes = offset_into_cluster(s, guest_offset),
83baa9a4
KW
1383 },
1384 .cow_end = {
85567393
KW
1385 .offset = nb_bytes,
1386 .nb_bytes = avail_bytes - nb_bytes,
83baa9a4
KW
1387 },
1388 };
1389 qemu_co_queue_init(&(*m)->dependent_requests);
1390 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
1391
411d62b0 1392 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset);
85567393 1393 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset));
83baa9a4
KW
1394 assert(*bytes != 0);
1395
10f0ed8b
KW
1396 return 1;
1397
1398fail:
1399 if (*m && (*m)->nb_clusters > 0) {
1400 QLIST_REMOVE(*m, next_in_flight);
1401 }
1402 return ret;
1403}
1404
45aba42f
KW
1405/*
1406 * alloc_cluster_offset
1407 *
250196f1
KW
1408 * For a given offset on the virtual disk, find the cluster offset in qcow2
1409 * file. If the offset is not found, allocate a new cluster.
45aba42f 1410 *
250196f1 1411 * If the cluster was already allocated, m->nb_clusters is set to 0 and
a7912369 1412 * other fields in m are meaningless.
148da7ea
KW
1413 *
1414 * If the cluster is newly allocated, m->nb_clusters is set to the number of
68d100e9
KW
1415 * contiguous clusters that have been allocated. In this case, the other
1416 * fields of m are valid and contain information about the first allocated
1417 * cluster.
45aba42f 1418 *
68d100e9
KW
1419 * If the request conflicts with another write request in flight, the coroutine
1420 * is queued and will be reentered when the dependency has completed.
148da7ea
KW
1421 *
1422 * Return 0 on success and -errno in error cases
45aba42f 1423 */
f4f0d391 1424int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
d46a0bb2
KW
1425 unsigned int *bytes, uint64_t *host_offset,
1426 QCowL2Meta **m)
45aba42f 1427{
ff99129a 1428 BDRVQcow2State *s = bs->opaque;
710c2496 1429 uint64_t start, remaining;
250196f1 1430 uint64_t cluster_offset;
65eb2e35 1431 uint64_t cur_bytes;
710c2496 1432 int ret;
45aba42f 1433
d46a0bb2 1434 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes);
710c2496 1435
72424114 1436again:
16f0587e 1437 start = offset;
d46a0bb2 1438 remaining = *bytes;
0af729ec
KW
1439 cluster_offset = 0;
1440 *host_offset = 0;
ecdd5333
KW
1441 cur_bytes = 0;
1442 *m = NULL;
0af729ec 1443
2c3b32d2 1444 while (true) {
ecdd5333
KW
1445
1446 if (!*host_offset) {
1447 *host_offset = start_of_cluster(s, cluster_offset);
1448 }
1449
1450 assert(remaining >= cur_bytes);
1451
1452 start += cur_bytes;
1453 remaining -= cur_bytes;
1454 cluster_offset += cur_bytes;
1455
1456 if (remaining == 0) {
1457 break;
1458 }
1459
1460 cur_bytes = remaining;
1461
2c3b32d2
KW
1462 /*
1463 * Now start gathering as many contiguous clusters as possible:
1464 *
1465 * 1. Check for overlaps with in-flight allocations
1466 *
1467 * a) Overlap not in the first cluster -> shorten this request and
1468 * let the caller handle the rest in its next loop iteration.
1469 *
1470 * b) Real overlaps of two requests. Yield and restart the search
1471 * for contiguous clusters (the situation could have changed
1472 * while we were sleeping)
1473 *
1474 * c) TODO: Request starts in the same cluster as the in-flight
1475 * allocation ends. Shorten the COW of the in-fight allocation,
1476 * set cluster_offset to write to the same cluster and set up
1477 * the right synchronisation between the in-flight request and
1478 * the new one.
1479 */
ecdd5333 1480 ret = handle_dependencies(bs, start, &cur_bytes, m);
2c3b32d2 1481 if (ret == -EAGAIN) {
ecdd5333
KW
1482 /* Currently handle_dependencies() doesn't yield if we already had
1483 * an allocation. If it did, we would have to clean up the L2Meta
1484 * structs before starting over. */
1485 assert(*m == NULL);
2c3b32d2
KW
1486 goto again;
1487 } else if (ret < 0) {
1488 return ret;
ecdd5333
KW
1489 } else if (cur_bytes == 0) {
1490 break;
2c3b32d2
KW
1491 } else {
1492 /* handle_dependencies() may have decreased cur_bytes (shortened
1493 * the allocations below) so that the next dependency is processed
1494 * correctly during the next loop iteration. */
0af729ec 1495 }
710c2496 1496
2c3b32d2
KW
1497 /*
1498 * 2. Count contiguous COPIED clusters.
1499 */
1500 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m);
1501 if (ret < 0) {
1502 return ret;
1503 } else if (ret) {
ecdd5333 1504 continue;
2c3b32d2
KW
1505 } else if (cur_bytes == 0) {
1506 break;
1507 }
060bee89 1508
2c3b32d2
KW
1509 /*
1510 * 3. If the request still hasn't completed, allocate new clusters,
1511 * considering any cluster_offset of steps 1c or 2.
1512 */
1513 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m);
1514 if (ret < 0) {
1515 return ret;
1516 } else if (ret) {
ecdd5333 1517 continue;
2c3b32d2
KW
1518 } else {
1519 assert(cur_bytes == 0);
1520 break;
1521 }
f5bc6350 1522 }
10f0ed8b 1523
d46a0bb2
KW
1524 *bytes -= remaining;
1525 assert(*bytes > 0);
710c2496 1526 assert(*host_offset != 0);
45aba42f 1527
148da7ea 1528 return 0;
45aba42f
KW
1529}
1530
1531static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1532 const uint8_t *buf, int buf_size)
1533{
1534 z_stream strm1, *strm = &strm1;
1535 int ret, out_len;
1536
1537 memset(strm, 0, sizeof(*strm));
1538
1539 strm->next_in = (uint8_t *)buf;
1540 strm->avail_in = buf_size;
1541 strm->next_out = out_buf;
1542 strm->avail_out = out_buf_size;
1543
1544 ret = inflateInit2(strm, -12);
1545 if (ret != Z_OK)
1546 return -1;
1547 ret = inflate(strm, Z_FINISH);
1548 out_len = strm->next_out - out_buf;
1549 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1550 out_len != out_buf_size) {
1551 inflateEnd(strm);
1552 return -1;
1553 }
1554 inflateEnd(strm);
1555 return 0;
1556}
1557
66f82cee 1558int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
45aba42f 1559{
ff99129a 1560 BDRVQcow2State *s = bs->opaque;
45aba42f
KW
1561 int ret, csize, nb_csectors, sector_offset;
1562 uint64_t coffset;
1563
1564 coffset = cluster_offset & s->cluster_offset_mask;
1565 if (s->cluster_cache_offset != coffset) {
1566 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1567 sector_offset = coffset & 511;
1568 csize = nb_csectors * 512 - sector_offset;
3e4c7052
SH
1569
1570 /* Allocate buffers on first decompress operation, most images are
1571 * uncompressed and the memory overhead can be avoided. The buffers
1572 * are freed in .bdrv_close().
1573 */
1574 if (!s->cluster_data) {
1575 /* one more sector for decompressed data alignment */
1576 s->cluster_data = qemu_try_blockalign(bs->file->bs,
1577 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size + 512);
1578 if (!s->cluster_data) {
1579 return -ENOMEM;
1580 }
1581 }
1582 if (!s->cluster_cache) {
1583 s->cluster_cache = g_malloc(s->cluster_size);
1584 }
1585
66f82cee 1586 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
fbcbbf4e 1587 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data,
9a4f4c31 1588 nb_csectors);
45aba42f 1589 if (ret < 0) {
8af36488 1590 return ret;
45aba42f
KW
1591 }
1592 if (decompress_buffer(s->cluster_cache, s->cluster_size,
1593 s->cluster_data + sector_offset, csize) < 0) {
8af36488 1594 return -EIO;
45aba42f
KW
1595 }
1596 s->cluster_cache_offset = coffset;
1597 }
1598 return 0;
1599}
5ea929e3
KW
1600
1601/*
1602 * This discards as many clusters of nb_clusters as possible at once (i.e.
1603 * all clusters in the same L2 table) and returns the number of discarded
1604 * clusters.
1605 */
1606static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
b6d36def
HR
1607 uint64_t nb_clusters, enum qcow2_discard_type type,
1608 bool full_discard)
5ea929e3 1609{
ff99129a 1610 BDRVQcow2State *s = bs->opaque;
3948d1d4 1611 uint64_t *l2_table;
5ea929e3
KW
1612 int l2_index;
1613 int ret;
1614 int i;
1615
3948d1d4 1616 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
5ea929e3
KW
1617 if (ret < 0) {
1618 return ret;
1619 }
1620
1621 /* Limit nb_clusters to one L2 table */
1622 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
b6d36def 1623 assert(nb_clusters <= INT_MAX);
5ea929e3
KW
1624
1625 for (i = 0; i < nb_clusters; i++) {
c883db0d 1626 uint64_t old_l2_entry;
5ea929e3 1627
c883db0d 1628 old_l2_entry = be64_to_cpu(l2_table[l2_index + i]);
a71835a0
KW
1629
1630 /*
808c4b6f
HR
1631 * If full_discard is false, make sure that a discarded area reads back
1632 * as zeroes for v3 images (we cannot do it for v2 without actually
1633 * writing a zero-filled buffer). We can skip the operation if the
1634 * cluster is already marked as zero, or if it's unallocated and we
1635 * don't have a backing file.
a71835a0
KW
1636 *
1637 * TODO We might want to use bdrv_get_block_status(bs) here, but we're
1638 * holding s->lock, so that doesn't work today.
808c4b6f
HR
1639 *
1640 * If full_discard is true, the sector should not read back as zeroes,
1641 * but rather fall through to the backing file.
a71835a0 1642 */
c883db0d 1643 switch (qcow2_get_cluster_type(old_l2_entry)) {
bbd995d8
EB
1644 case QCOW2_CLUSTER_UNALLOCATED:
1645 if (full_discard || !bs->backing) {
1646 continue;
1647 }
1648 break;
1649
fdfab37d
EB
1650 case QCOW2_CLUSTER_ZERO_PLAIN:
1651 if (!full_discard) {
bbd995d8
EB
1652 continue;
1653 }
1654 break;
1655
fdfab37d 1656 case QCOW2_CLUSTER_ZERO_ALLOC:
bbd995d8
EB
1657 case QCOW2_CLUSTER_NORMAL:
1658 case QCOW2_CLUSTER_COMPRESSED:
1659 break;
1660
1661 default:
1662 abort();
5ea929e3
KW
1663 }
1664
1665 /* First remove L2 entries */
72e80b89 1666 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
808c4b6f 1667 if (!full_discard && s->qcow_version >= 3) {
a71835a0
KW
1668 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
1669 } else {
1670 l2_table[l2_index + i] = cpu_to_be64(0);
1671 }
5ea929e3
KW
1672
1673 /* Then decrease the refcount */
c883db0d 1674 qcow2_free_any_clusters(bs, old_l2_entry, 1, type);
5ea929e3
KW
1675 }
1676
a3f1afb4 1677 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
5ea929e3
KW
1678
1679 return nb_clusters;
1680}
1681
d2cb36af
EB
1682int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
1683 uint64_t bytes, enum qcow2_discard_type type,
1684 bool full_discard)
5ea929e3 1685{
ff99129a 1686 BDRVQcow2State *s = bs->opaque;
d2cb36af 1687 uint64_t end_offset = offset + bytes;
b6d36def 1688 uint64_t nb_clusters;
d2cb36af 1689 int64_t cleared;
5ea929e3
KW
1690 int ret;
1691
f10ee139 1692 /* Caller must pass aligned values, except at image end */
0c1bd469 1693 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
f10ee139
EB
1694 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1695 end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
5ea929e3 1696
d2cb36af 1697 nb_clusters = size_to_clusters(s, bytes);
5ea929e3 1698
0b919fae
KW
1699 s->cache_discards = true;
1700
5ea929e3
KW
1701 /* Each L2 table is handled by its own loop iteration */
1702 while (nb_clusters > 0) {
d2cb36af
EB
1703 cleared = discard_single_l2(bs, offset, nb_clusters, type,
1704 full_discard);
1705 if (cleared < 0) {
1706 ret = cleared;
0b919fae 1707 goto fail;
5ea929e3
KW
1708 }
1709
d2cb36af
EB
1710 nb_clusters -= cleared;
1711 offset += (cleared * s->cluster_size);
5ea929e3
KW
1712 }
1713
0b919fae
KW
1714 ret = 0;
1715fail:
1716 s->cache_discards = false;
1717 qcow2_process_discards(bs, ret);
1718
1719 return ret;
5ea929e3 1720}
621f0589
KW
1721
1722/*
1723 * This zeroes as many clusters of nb_clusters as possible at once (i.e.
1724 * all clusters in the same L2 table) and returns the number of zeroed
1725 * clusters.
1726 */
1727static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
170f4b2e 1728 uint64_t nb_clusters, int flags)
621f0589 1729{
ff99129a 1730 BDRVQcow2State *s = bs->opaque;
621f0589
KW
1731 uint64_t *l2_table;
1732 int l2_index;
1733 int ret;
1734 int i;
06cc5e2b 1735 bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP);
621f0589
KW
1736
1737 ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
1738 if (ret < 0) {
1739 return ret;
1740 }
1741
1742 /* Limit nb_clusters to one L2 table */
1743 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
b6d36def 1744 assert(nb_clusters <= INT_MAX);
621f0589
KW
1745
1746 for (i = 0; i < nb_clusters; i++) {
1747 uint64_t old_offset;
06cc5e2b 1748 QCow2ClusterType cluster_type;
621f0589
KW
1749
1750 old_offset = be64_to_cpu(l2_table[l2_index + i]);
1751
06cc5e2b
EB
1752 /*
1753 * Minimize L2 changes if the cluster already reads back as
1754 * zeroes with correct allocation.
1755 */
1756 cluster_type = qcow2_get_cluster_type(old_offset);
1757 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN ||
1758 (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) {
1759 continue;
1760 }
1761
72e80b89 1762 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
06cc5e2b 1763 if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) {
621f0589 1764 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
6cfcb9b8 1765 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
621f0589
KW
1766 } else {
1767 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
1768 }
1769 }
1770
a3f1afb4 1771 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
621f0589
KW
1772
1773 return nb_clusters;
1774}
1775
d2cb36af
EB
1776int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
1777 uint64_t bytes, int flags)
621f0589 1778{
ff99129a 1779 BDRVQcow2State *s = bs->opaque;
d2cb36af 1780 uint64_t end_offset = offset + bytes;
b6d36def 1781 uint64_t nb_clusters;
d2cb36af 1782 int64_t cleared;
621f0589
KW
1783 int ret;
1784
f10ee139
EB
1785 /* Caller must pass aligned values, except at image end */
1786 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
1787 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
1788 end_offset == bs->total_sectors << BDRV_SECTOR_BITS);
1789
621f0589
KW
1790 /* The zero flag is only supported by version 3 and newer */
1791 if (s->qcow_version < 3) {
1792 return -ENOTSUP;
1793 }
1794
1795 /* Each L2 table is handled by its own loop iteration */
d2cb36af 1796 nb_clusters = size_to_clusters(s, bytes);
621f0589 1797
0b919fae
KW
1798 s->cache_discards = true;
1799
621f0589 1800 while (nb_clusters > 0) {
d2cb36af
EB
1801 cleared = zero_single_l2(bs, offset, nb_clusters, flags);
1802 if (cleared < 0) {
1803 ret = cleared;
0b919fae 1804 goto fail;
621f0589
KW
1805 }
1806
d2cb36af
EB
1807 nb_clusters -= cleared;
1808 offset += (cleared * s->cluster_size);
621f0589
KW
1809 }
1810
0b919fae
KW
1811 ret = 0;
1812fail:
1813 s->cache_discards = false;
1814 qcow2_process_discards(bs, ret);
1815
1816 return ret;
621f0589 1817}
32b6444d
HR
1818
1819/*
1820 * Expands all zero clusters in a specific L1 table (or deallocates them, for
1821 * non-backed non-pre-allocated zero clusters).
1822 *
4057a2b2
HR
1823 * l1_entries and *visited_l1_entries are used to keep track of progress for
1824 * status_cb(). l1_entries contains the total number of L1 entries and
1825 * *visited_l1_entries counts all visited L1 entries.
32b6444d
HR
1826 */
1827static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
ecf58777 1828 int l1_size, int64_t *visited_l1_entries,
4057a2b2 1829 int64_t l1_entries,
8b13976d
HR
1830 BlockDriverAmendStatusCB *status_cb,
1831 void *cb_opaque)
32b6444d 1832{
ff99129a 1833 BDRVQcow2State *s = bs->opaque;
32b6444d
HR
1834 bool is_active_l1 = (l1_table == s->l1_table);
1835 uint64_t *l2_table = NULL;
1836 int ret;
1837 int i, j;
1838
1839 if (!is_active_l1) {
1840 /* inactive L2 tables require a buffer to be stored in when loading
1841 * them from disk */
9a4f4c31 1842 l2_table = qemu_try_blockalign(bs->file->bs, s->cluster_size);
de82815d
KW
1843 if (l2_table == NULL) {
1844 return -ENOMEM;
1845 }
32b6444d
HR
1846 }
1847
1848 for (i = 0; i < l1_size; i++) {
1849 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
1850 bool l2_dirty = false;
0e06528e 1851 uint64_t l2_refcount;
32b6444d
HR
1852
1853 if (!l2_offset) {
1854 /* unallocated */
4057a2b2
HR
1855 (*visited_l1_entries)++;
1856 if (status_cb) {
8b13976d 1857 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
4057a2b2 1858 }
32b6444d
HR
1859 continue;
1860 }
1861
8dd93d93
HR
1862 if (offset_into_cluster(s, l2_offset)) {
1863 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
1864 PRIx64 " unaligned (L1 index: %#x)",
1865 l2_offset, i);
1866 ret = -EIO;
1867 goto fail;
1868 }
1869
32b6444d
HR
1870 if (is_active_l1) {
1871 /* get active L2 tables from cache */
1872 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
1873 (void **)&l2_table);
1874 } else {
1875 /* load inactive L2 tables from disk */
fbcbbf4e 1876 ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE,
9a4f4c31 1877 (void *)l2_table, s->cluster_sectors);
32b6444d
HR
1878 }
1879 if (ret < 0) {
1880 goto fail;
1881 }
1882
7324c10f
HR
1883 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
1884 &l2_refcount);
1885 if (ret < 0) {
ecf58777
HR
1886 goto fail;
1887 }
1888
32b6444d
HR
1889 for (j = 0; j < s->l2_size; j++) {
1890 uint64_t l2_entry = be64_to_cpu(l2_table[j]);
ecf58777 1891 int64_t offset = l2_entry & L2E_OFFSET_MASK;
3ef95218 1892 QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry);
32b6444d 1893
fdfab37d
EB
1894 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN &&
1895 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) {
32b6444d
HR
1896 continue;
1897 }
1898
fdfab37d 1899 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
760e0063 1900 if (!bs->backing) {
32b6444d
HR
1901 /* not backed; therefore we can simply deallocate the
1902 * cluster */
1903 l2_table[j] = 0;
1904 l2_dirty = true;
1905 continue;
1906 }
1907
1908 offset = qcow2_alloc_clusters(bs, s->cluster_size);
1909 if (offset < 0) {
1910 ret = offset;
1911 goto fail;
1912 }
ecf58777
HR
1913
1914 if (l2_refcount > 1) {
1915 /* For shared L2 tables, set the refcount accordingly (it is
1916 * already 1 and needs to be l2_refcount) */
1917 ret = qcow2_update_cluster_refcount(bs,
2aabe7c7
HR
1918 offset >> s->cluster_bits,
1919 refcount_diff(1, l2_refcount), false,
ecf58777
HR
1920 QCOW2_DISCARD_OTHER);
1921 if (ret < 0) {
1922 qcow2_free_clusters(bs, offset, s->cluster_size,
1923 QCOW2_DISCARD_OTHER);
1924 goto fail;
1925 }
1926 }
32b6444d
HR
1927 }
1928
8dd93d93 1929 if (offset_into_cluster(s, offset)) {
bcb07dba
EB
1930 qcow2_signal_corruption(bs, true, -1, -1,
1931 "Cluster allocation offset "
8dd93d93
HR
1932 "%#" PRIx64 " unaligned (L2 offset: %#"
1933 PRIx64 ", L2 index: %#x)", offset,
1934 l2_offset, j);
fdfab37d 1935 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
8dd93d93
HR
1936 qcow2_free_clusters(bs, offset, s->cluster_size,
1937 QCOW2_DISCARD_ALWAYS);
1938 }
1939 ret = -EIO;
1940 goto fail;
1941 }
1942
231bb267 1943 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size);
32b6444d 1944 if (ret < 0) {
fdfab37d 1945 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
320c7066
HR
1946 qcow2_free_clusters(bs, offset, s->cluster_size,
1947 QCOW2_DISCARD_ALWAYS);
1948 }
32b6444d
HR
1949 goto fail;
1950 }
1951
720ff280 1952 ret = bdrv_pwrite_zeroes(bs->file, offset, s->cluster_size, 0);
32b6444d 1953 if (ret < 0) {
fdfab37d 1954 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) {
320c7066
HR
1955 qcow2_free_clusters(bs, offset, s->cluster_size,
1956 QCOW2_DISCARD_ALWAYS);
1957 }
32b6444d
HR
1958 goto fail;
1959 }
1960
ecf58777
HR
1961 if (l2_refcount == 1) {
1962 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
1963 } else {
1964 l2_table[j] = cpu_to_be64(offset);
e390cf5a 1965 }
ecf58777 1966 l2_dirty = true;
32b6444d
HR
1967 }
1968
1969 if (is_active_l1) {
1970 if (l2_dirty) {
72e80b89 1971 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table);
32b6444d
HR
1972 qcow2_cache_depends_on_flush(s->l2_table_cache);
1973 }
a3f1afb4 1974 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
32b6444d
HR
1975 } else {
1976 if (l2_dirty) {
231bb267
HR
1977 ret = qcow2_pre_write_overlap_check(bs,
1978 QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset,
32b6444d
HR
1979 s->cluster_size);
1980 if (ret < 0) {
1981 goto fail;
1982 }
1983
18d51c4b 1984 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE,
9a4f4c31 1985 (void *)l2_table, s->cluster_sectors);
32b6444d
HR
1986 if (ret < 0) {
1987 goto fail;
1988 }
1989 }
1990 }
4057a2b2
HR
1991
1992 (*visited_l1_entries)++;
1993 if (status_cb) {
8b13976d 1994 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque);
4057a2b2 1995 }
32b6444d
HR
1996 }
1997
1998 ret = 0;
1999
2000fail:
2001 if (l2_table) {
2002 if (!is_active_l1) {
2003 qemu_vfree(l2_table);
2004 } else {
a3f1afb4 2005 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table);
32b6444d
HR
2006 }
2007 }
2008 return ret;
2009}
2010
2011/*
2012 * For backed images, expands all zero clusters on the image. For non-backed
2013 * images, deallocates all non-pre-allocated zero clusters (and claims the
2014 * allocation for pre-allocated ones). This is important for downgrading to a
2015 * qcow2 version which doesn't yet support metadata zero clusters.
2016 */
4057a2b2 2017int qcow2_expand_zero_clusters(BlockDriverState *bs,
8b13976d
HR
2018 BlockDriverAmendStatusCB *status_cb,
2019 void *cb_opaque)
32b6444d 2020{
ff99129a 2021 BDRVQcow2State *s = bs->opaque;
32b6444d 2022 uint64_t *l1_table = NULL;
4057a2b2 2023 int64_t l1_entries = 0, visited_l1_entries = 0;
32b6444d
HR
2024 int ret;
2025 int i, j;
2026
4057a2b2
HR
2027 if (status_cb) {
2028 l1_entries = s->l1_size;
2029 for (i = 0; i < s->nb_snapshots; i++) {
2030 l1_entries += s->snapshots[i].l1_size;
2031 }
2032 }
2033
32b6444d 2034 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
4057a2b2 2035 &visited_l1_entries, l1_entries,
8b13976d 2036 status_cb, cb_opaque);
32b6444d
HR
2037 if (ret < 0) {
2038 goto fail;
2039 }
2040
2041 /* Inactive L1 tables may point to active L2 tables - therefore it is
2042 * necessary to flush the L2 table cache before trying to access the L2
2043 * tables pointed to by inactive L1 entries (else we might try to expand
2044 * zero clusters that have already been expanded); furthermore, it is also
2045 * necessary to empty the L2 table cache, since it may contain tables which
2046 * are now going to be modified directly on disk, bypassing the cache.
2047 * qcow2_cache_empty() does both for us. */
2048 ret = qcow2_cache_empty(bs, s->l2_table_cache);
2049 if (ret < 0) {
2050 goto fail;
2051 }
2052
2053 for (i = 0; i < s->nb_snapshots; i++) {
d737b78c
LV
2054 int l1_sectors = DIV_ROUND_UP(s->snapshots[i].l1_size *
2055 sizeof(uint64_t), BDRV_SECTOR_SIZE);
32b6444d
HR
2056
2057 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE);
2058
fbcbbf4e 2059 ret = bdrv_read(bs->file,
9a4f4c31
KW
2060 s->snapshots[i].l1_table_offset / BDRV_SECTOR_SIZE,
2061 (void *)l1_table, l1_sectors);
32b6444d
HR
2062 if (ret < 0) {
2063 goto fail;
2064 }
2065
2066 for (j = 0; j < s->snapshots[i].l1_size; j++) {
2067 be64_to_cpus(&l1_table[j]);
2068 }
2069
2070 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
4057a2b2 2071 &visited_l1_entries, l1_entries,
8b13976d 2072 status_cb, cb_opaque);
32b6444d
HR
2073 if (ret < 0) {
2074 goto fail;
2075 }
2076 }
2077
2078 ret = 0;
2079
2080fail:
32b6444d
HR
2081 g_free(l1_table);
2082 return ret;
2083}
This page took 0.842841 seconds and 4 git commands to generate.