]>
Commit | Line | Data |
---|---|---|
45aba42f KW |
1 | /* |
2 | * Block driver for the QCOW version 2 format | |
3 | * | |
4 | * Copyright (c) 2004-2006 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include <zlib.h> | |
26 | ||
27 | #include "qemu-common.h" | |
737e150e | 28 | #include "block/block_int.h" |
45aba42f | 29 | #include "block/qcow2.h" |
3cce16f4 | 30 | #include "trace.h" |
45aba42f | 31 | |
2cf7cfa1 KW |
32 | int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, |
33 | bool exact_size) | |
45aba42f KW |
34 | { |
35 | BDRVQcowState *s = bs->opaque; | |
2cf7cfa1 | 36 | int new_l1_size2, ret, i; |
45aba42f | 37 | uint64_t *new_l1_table; |
2cf7cfa1 | 38 | int64_t new_l1_table_offset, new_l1_size; |
45aba42f KW |
39 | uint8_t data[12]; |
40 | ||
72893756 | 41 | if (min_size <= s->l1_size) |
45aba42f | 42 | return 0; |
72893756 SH |
43 | |
44 | if (exact_size) { | |
45 | new_l1_size = min_size; | |
46 | } else { | |
47 | /* Bump size up to reduce the number of times we have to grow */ | |
48 | new_l1_size = s->l1_size; | |
49 | if (new_l1_size == 0) { | |
50 | new_l1_size = 1; | |
51 | } | |
52 | while (min_size > new_l1_size) { | |
53 | new_l1_size = (new_l1_size * 3 + 1) / 2; | |
54 | } | |
45aba42f | 55 | } |
72893756 | 56 | |
2cf7cfa1 KW |
57 | if (new_l1_size > INT_MAX) { |
58 | return -EFBIG; | |
59 | } | |
60 | ||
45aba42f | 61 | #ifdef DEBUG_ALLOC2 |
2cf7cfa1 KW |
62 | fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", |
63 | s->l1_size, new_l1_size); | |
45aba42f KW |
64 | #endif |
65 | ||
66 | new_l1_size2 = sizeof(uint64_t) * new_l1_size; | |
7267c094 | 67 | new_l1_table = g_malloc0(align_offset(new_l1_size2, 512)); |
45aba42f KW |
68 | memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); |
69 | ||
70 | /* write new table (align to cluster) */ | |
66f82cee | 71 | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); |
ed6ccf0f | 72 | new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); |
5d757b56 | 73 | if (new_l1_table_offset < 0) { |
7267c094 | 74 | g_free(new_l1_table); |
5d757b56 KW |
75 | return new_l1_table_offset; |
76 | } | |
29c1a730 KW |
77 | |
78 | ret = qcow2_cache_flush(bs, s->refcount_block_cache); | |
79 | if (ret < 0) { | |
80fa3341 | 80 | goto fail; |
29c1a730 | 81 | } |
45aba42f | 82 | |
cf93980e HR |
83 | /* the L1 position has not yet been updated, so these clusters must |
84 | * indeed be completely free */ | |
85 | ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT, | |
86 | new_l1_table_offset, new_l1_size2); | |
87 | if (ret < 0) { | |
88 | goto fail; | |
89 | } | |
90 | ||
66f82cee | 91 | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); |
45aba42f KW |
92 | for(i = 0; i < s->l1_size; i++) |
93 | new_l1_table[i] = cpu_to_be64(new_l1_table[i]); | |
8b3b7206 KW |
94 | ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2); |
95 | if (ret < 0) | |
45aba42f KW |
96 | goto fail; |
97 | for(i = 0; i < s->l1_size; i++) | |
98 | new_l1_table[i] = be64_to_cpu(new_l1_table[i]); | |
99 | ||
100 | /* set new table */ | |
66f82cee | 101 | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); |
45aba42f | 102 | cpu_to_be32w((uint32_t*)data, new_l1_size); |
653df36b | 103 | cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset); |
8b3b7206 KW |
104 | ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data)); |
105 | if (ret < 0) { | |
45aba42f | 106 | goto fail; |
fb8fa77c | 107 | } |
7267c094 | 108 | g_free(s->l1_table); |
6cfcb9b8 KW |
109 | qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t), |
110 | QCOW2_DISCARD_OTHER); | |
45aba42f KW |
111 | s->l1_table_offset = new_l1_table_offset; |
112 | s->l1_table = new_l1_table; | |
113 | s->l1_size = new_l1_size; | |
114 | return 0; | |
115 | fail: | |
7267c094 | 116 | g_free(new_l1_table); |
6cfcb9b8 KW |
117 | qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, |
118 | QCOW2_DISCARD_OTHER); | |
8b3b7206 | 119 | return ret; |
45aba42f KW |
120 | } |
121 | ||
45aba42f KW |
122 | /* |
123 | * l2_load | |
124 | * | |
125 | * Loads a L2 table into memory. If the table is in the cache, the cache | |
126 | * is used; otherwise the L2 table is loaded from the image file. | |
127 | * | |
128 | * Returns a pointer to the L2 table on success, or NULL if the read from | |
129 | * the image file failed. | |
130 | */ | |
131 | ||
55c17e98 KW |
132 | static int l2_load(BlockDriverState *bs, uint64_t l2_offset, |
133 | uint64_t **l2_table) | |
45aba42f KW |
134 | { |
135 | BDRVQcowState *s = bs->opaque; | |
55c17e98 | 136 | int ret; |
45aba42f | 137 | |
29c1a730 | 138 | ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table); |
45aba42f | 139 | |
29c1a730 | 140 | return ret; |
45aba42f KW |
141 | } |
142 | ||
6583e3c7 KW |
143 | /* |
144 | * Writes one sector of the L1 table to the disk (can't update single entries | |
145 | * and we really don't want bdrv_pread to perform a read-modify-write) | |
146 | */ | |
147 | #define L1_ENTRIES_PER_SECTOR (512 / 8) | |
e23e400e | 148 | int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) |
6583e3c7 | 149 | { |
66f82cee | 150 | BDRVQcowState *s = bs->opaque; |
6583e3c7 KW |
151 | uint64_t buf[L1_ENTRIES_PER_SECTOR]; |
152 | int l1_start_index; | |
f7defcb6 | 153 | int i, ret; |
6583e3c7 KW |
154 | |
155 | l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); | |
156 | for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) { | |
157 | buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); | |
158 | } | |
159 | ||
cf93980e HR |
160 | ret = qcow2_pre_write_overlap_check(bs, |
161 | QCOW2_OL_DEFAULT & ~QCOW2_OL_ACTIVE_L1, | |
162 | s->l1_table_offset + 8 * l1_start_index, sizeof(buf)); | |
163 | if (ret < 0) { | |
164 | return ret; | |
165 | } | |
166 | ||
66f82cee | 167 | BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); |
8b3b7206 | 168 | ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index, |
f7defcb6 KW |
169 | buf, sizeof(buf)); |
170 | if (ret < 0) { | |
171 | return ret; | |
6583e3c7 KW |
172 | } |
173 | ||
174 | return 0; | |
175 | } | |
176 | ||
45aba42f KW |
177 | /* |
178 | * l2_allocate | |
179 | * | |
180 | * Allocate a new l2 entry in the file. If l1_index points to an already | |
181 | * used entry in the L2 table (i.e. we are doing a copy on write for the L2 | |
182 | * table) copy the contents of the old L2 table into the newly allocated one. | |
183 | * Otherwise the new table is initialized with zeros. | |
184 | * | |
185 | */ | |
186 | ||
c46e1167 | 187 | static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) |
45aba42f KW |
188 | { |
189 | BDRVQcowState *s = bs->opaque; | |
6583e3c7 | 190 | uint64_t old_l2_offset; |
8585afd8 | 191 | uint64_t *l2_table = NULL; |
f4f0d391 | 192 | int64_t l2_offset; |
c46e1167 | 193 | int ret; |
45aba42f KW |
194 | |
195 | old_l2_offset = s->l1_table[l1_index]; | |
196 | ||
3cce16f4 KW |
197 | trace_qcow2_l2_allocate(bs, l1_index); |
198 | ||
45aba42f KW |
199 | /* allocate a new l2 entry */ |
200 | ||
ed6ccf0f | 201 | l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); |
5d757b56 | 202 | if (l2_offset < 0) { |
be0b742e HR |
203 | ret = l2_offset; |
204 | goto fail; | |
5d757b56 | 205 | } |
29c1a730 KW |
206 | |
207 | ret = qcow2_cache_flush(bs, s->refcount_block_cache); | |
208 | if (ret < 0) { | |
209 | goto fail; | |
210 | } | |
45aba42f | 211 | |
45aba42f KW |
212 | /* allocate a new entry in the l2 cache */ |
213 | ||
3cce16f4 | 214 | trace_qcow2_l2_allocate_get_empty(bs, l1_index); |
29c1a730 KW |
215 | ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table); |
216 | if (ret < 0) { | |
be0b742e | 217 | goto fail; |
29c1a730 KW |
218 | } |
219 | ||
220 | l2_table = *table; | |
45aba42f | 221 | |
8e37f681 | 222 | if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { |
45aba42f KW |
223 | /* if there was no old l2 table, clear the new table */ |
224 | memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); | |
225 | } else { | |
29c1a730 KW |
226 | uint64_t* old_table; |
227 | ||
45aba42f | 228 | /* if there was an old l2 table, read it from the disk */ |
66f82cee | 229 | BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); |
8e37f681 KW |
230 | ret = qcow2_cache_get(bs, s->l2_table_cache, |
231 | old_l2_offset & L1E_OFFSET_MASK, | |
29c1a730 KW |
232 | (void**) &old_table); |
233 | if (ret < 0) { | |
234 | goto fail; | |
235 | } | |
236 | ||
237 | memcpy(l2_table, old_table, s->cluster_size); | |
238 | ||
239 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table); | |
c46e1167 | 240 | if (ret < 0) { |
175e1152 | 241 | goto fail; |
c46e1167 | 242 | } |
45aba42f | 243 | } |
29c1a730 | 244 | |
45aba42f | 245 | /* write the l2 table to the file */ |
66f82cee | 246 | BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); |
29c1a730 | 247 | |
3cce16f4 | 248 | trace_qcow2_l2_allocate_write_l2(bs, l1_index); |
29c1a730 KW |
249 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
250 | ret = qcow2_cache_flush(bs, s->l2_table_cache); | |
c46e1167 | 251 | if (ret < 0) { |
175e1152 KW |
252 | goto fail; |
253 | } | |
254 | ||
255 | /* update the L1 entry */ | |
3cce16f4 | 256 | trace_qcow2_l2_allocate_write_l1(bs, l1_index); |
175e1152 | 257 | s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; |
e23e400e | 258 | ret = qcow2_write_l1_entry(bs, l1_index); |
175e1152 KW |
259 | if (ret < 0) { |
260 | goto fail; | |
c46e1167 | 261 | } |
45aba42f | 262 | |
c46e1167 | 263 | *table = l2_table; |
3cce16f4 | 264 | trace_qcow2_l2_allocate_done(bs, l1_index, 0); |
c46e1167 | 265 | return 0; |
175e1152 KW |
266 | |
267 | fail: | |
3cce16f4 | 268 | trace_qcow2_l2_allocate_done(bs, l1_index, ret); |
8585afd8 HR |
269 | if (l2_table != NULL) { |
270 | qcow2_cache_put(bs, s->l2_table_cache, (void**) table); | |
271 | } | |
68dba0bf | 272 | s->l1_table[l1_index] = old_l2_offset; |
175e1152 | 273 | return ret; |
45aba42f KW |
274 | } |
275 | ||
2bfcc4a0 KW |
276 | /* |
277 | * Checks how many clusters in a given L2 table are contiguous in the image | |
278 | * file. As soon as one of the flags in the bitmask stop_flags changes compared | |
279 | * to the first cluster, the search is stopped and the cluster is not counted | |
280 | * as contiguous. (This allows it, for example, to stop at the first compressed | |
281 | * cluster which may require a different handling) | |
282 | */ | |
45aba42f | 283 | static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, |
61653008 | 284 | uint64_t *l2_table, uint64_t stop_flags) |
45aba42f KW |
285 | { |
286 | int i; | |
15684a47 HR |
287 | uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW2_CLUSTER_COMPRESSED; |
288 | uint64_t first_entry = be64_to_cpu(l2_table[0]); | |
289 | uint64_t offset = first_entry & mask; | |
45aba42f KW |
290 | |
291 | if (!offset) | |
292 | return 0; | |
293 | ||
15684a47 HR |
294 | assert(qcow2_get_cluster_type(first_entry) != QCOW2_CLUSTER_COMPRESSED); |
295 | ||
61653008 | 296 | for (i = 0; i < nb_clusters; i++) { |
2bfcc4a0 KW |
297 | uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask; |
298 | if (offset + (uint64_t) i * cluster_size != l2_entry) { | |
45aba42f | 299 | break; |
2bfcc4a0 KW |
300 | } |
301 | } | |
45aba42f | 302 | |
61653008 | 303 | return i; |
45aba42f KW |
304 | } |
305 | ||
306 | static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) | |
307 | { | |
2bfcc4a0 KW |
308 | int i; |
309 | ||
310 | for (i = 0; i < nb_clusters; i++) { | |
311 | int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i])); | |
45aba42f | 312 | |
2bfcc4a0 KW |
313 | if (type != QCOW2_CLUSTER_UNALLOCATED) { |
314 | break; | |
315 | } | |
316 | } | |
45aba42f KW |
317 | |
318 | return i; | |
319 | } | |
320 | ||
321 | /* The crypt function is compatible with the linux cryptoloop | |
322 | algorithm for < 4 GB images. NOTE: out_buf == in_buf is | |
323 | supported */ | |
ed6ccf0f KW |
324 | void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num, |
325 | uint8_t *out_buf, const uint8_t *in_buf, | |
326 | int nb_sectors, int enc, | |
327 | const AES_KEY *key) | |
45aba42f KW |
328 | { |
329 | union { | |
330 | uint64_t ll[2]; | |
331 | uint8_t b[16]; | |
332 | } ivec; | |
333 | int i; | |
334 | ||
335 | for(i = 0; i < nb_sectors; i++) { | |
336 | ivec.ll[0] = cpu_to_le64(sector_num); | |
337 | ivec.ll[1] = 0; | |
338 | AES_cbc_encrypt(in_buf, out_buf, 512, key, | |
339 | ivec.b, enc); | |
340 | sector_num++; | |
341 | in_buf += 512; | |
342 | out_buf += 512; | |
343 | } | |
344 | } | |
345 | ||
aef4acb6 SH |
346 | static int coroutine_fn copy_sectors(BlockDriverState *bs, |
347 | uint64_t start_sect, | |
348 | uint64_t cluster_offset, | |
349 | int n_start, int n_end) | |
45aba42f KW |
350 | { |
351 | BDRVQcowState *s = bs->opaque; | |
aef4acb6 SH |
352 | QEMUIOVector qiov; |
353 | struct iovec iov; | |
45aba42f | 354 | int n, ret; |
1b9f1491 KW |
355 | |
356 | /* | |
357 | * If this is the last cluster and it is only partially used, we must only | |
358 | * copy until the end of the image, or bdrv_check_request will fail for the | |
359 | * bdrv_read/write calls below. | |
360 | */ | |
361 | if (start_sect + n_end > bs->total_sectors) { | |
362 | n_end = bs->total_sectors - start_sect; | |
363 | } | |
45aba42f KW |
364 | |
365 | n = n_end - n_start; | |
1b9f1491 | 366 | if (n <= 0) { |
45aba42f | 367 | return 0; |
1b9f1491 KW |
368 | } |
369 | ||
aef4acb6 SH |
370 | iov.iov_len = n * BDRV_SECTOR_SIZE; |
371 | iov.iov_base = qemu_blockalign(bs, iov.iov_len); | |
372 | ||
373 | qemu_iovec_init_external(&qiov, &iov, 1); | |
1b9f1491 | 374 | |
66f82cee | 375 | BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); |
aef4acb6 SH |
376 | |
377 | /* Call .bdrv_co_readv() directly instead of using the public block-layer | |
378 | * interface. This avoids double I/O throttling and request tracking, | |
379 | * which can lead to deadlock when block layer copy-on-read is enabled. | |
380 | */ | |
381 | ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov); | |
1b9f1491 KW |
382 | if (ret < 0) { |
383 | goto out; | |
384 | } | |
385 | ||
45aba42f | 386 | if (s->crypt_method) { |
ed6ccf0f | 387 | qcow2_encrypt_sectors(s, start_sect + n_start, |
aef4acb6 | 388 | iov.iov_base, iov.iov_base, n, 1, |
45aba42f KW |
389 | &s->aes_encrypt_key); |
390 | } | |
1b9f1491 | 391 | |
cf93980e HR |
392 | ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT, |
393 | cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE); | |
394 | if (ret < 0) { | |
395 | goto out; | |
396 | } | |
397 | ||
66f82cee | 398 | BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); |
aef4acb6 | 399 | ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov); |
1b9f1491 KW |
400 | if (ret < 0) { |
401 | goto out; | |
402 | } | |
403 | ||
404 | ret = 0; | |
405 | out: | |
aef4acb6 | 406 | qemu_vfree(iov.iov_base); |
1b9f1491 | 407 | return ret; |
45aba42f KW |
408 | } |
409 | ||
410 | ||
411 | /* | |
412 | * get_cluster_offset | |
413 | * | |
1c46efaa KW |
414 | * For a given offset of the disk image, find the cluster offset in |
415 | * qcow2 file. The offset is stored in *cluster_offset. | |
45aba42f | 416 | * |
d57237f2 | 417 | * on entry, *num is the number of contiguous sectors we'd like to |
45aba42f KW |
418 | * access following offset. |
419 | * | |
d57237f2 | 420 | * on exit, *num is the number of contiguous sectors we can read. |
45aba42f | 421 | * |
68d000a3 KW |
422 | * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error |
423 | * cases. | |
45aba42f | 424 | */ |
1c46efaa KW |
425 | int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, |
426 | int *num, uint64_t *cluster_offset) | |
45aba42f KW |
427 | { |
428 | BDRVQcowState *s = bs->opaque; | |
2cf7cfa1 KW |
429 | unsigned int l2_index; |
430 | uint64_t l1_index, l2_offset, *l2_table; | |
45aba42f | 431 | int l1_bits, c; |
80ee15a6 KW |
432 | unsigned int index_in_cluster, nb_clusters; |
433 | uint64_t nb_available, nb_needed; | |
55c17e98 | 434 | int ret; |
45aba42f KW |
435 | |
436 | index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); | |
437 | nb_needed = *num + index_in_cluster; | |
438 | ||
439 | l1_bits = s->l2_bits + s->cluster_bits; | |
440 | ||
441 | /* compute how many bytes there are between the offset and | |
442 | * the end of the l1 entry | |
443 | */ | |
444 | ||
80ee15a6 | 445 | nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)); |
45aba42f KW |
446 | |
447 | /* compute the number of available sectors */ | |
448 | ||
449 | nb_available = (nb_available >> 9) + index_in_cluster; | |
450 | ||
451 | if (nb_needed > nb_available) { | |
452 | nb_needed = nb_available; | |
453 | } | |
454 | ||
1c46efaa | 455 | *cluster_offset = 0; |
45aba42f KW |
456 | |
457 | /* seek the the l2 offset in the l1 table */ | |
458 | ||
459 | l1_index = offset >> l1_bits; | |
68d000a3 KW |
460 | if (l1_index >= s->l1_size) { |
461 | ret = QCOW2_CLUSTER_UNALLOCATED; | |
45aba42f | 462 | goto out; |
68d000a3 | 463 | } |
45aba42f | 464 | |
68d000a3 KW |
465 | l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; |
466 | if (!l2_offset) { | |
467 | ret = QCOW2_CLUSTER_UNALLOCATED; | |
45aba42f | 468 | goto out; |
68d000a3 | 469 | } |
45aba42f KW |
470 | |
471 | /* load the l2 table in memory */ | |
472 | ||
55c17e98 KW |
473 | ret = l2_load(bs, l2_offset, &l2_table); |
474 | if (ret < 0) { | |
475 | return ret; | |
1c46efaa | 476 | } |
45aba42f KW |
477 | |
478 | /* find the cluster offset for the given disk offset */ | |
479 | ||
480 | l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); | |
1c46efaa | 481 | *cluster_offset = be64_to_cpu(l2_table[l2_index]); |
45aba42f KW |
482 | nb_clusters = size_to_clusters(s, nb_needed << 9); |
483 | ||
68d000a3 KW |
484 | ret = qcow2_get_cluster_type(*cluster_offset); |
485 | switch (ret) { | |
486 | case QCOW2_CLUSTER_COMPRESSED: | |
487 | /* Compressed clusters can only be processed one by one */ | |
488 | c = 1; | |
489 | *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; | |
490 | break; | |
6377af48 | 491 | case QCOW2_CLUSTER_ZERO: |
381b487d PB |
492 | if (s->qcow_version < 3) { |
493 | return -EIO; | |
494 | } | |
6377af48 | 495 | c = count_contiguous_clusters(nb_clusters, s->cluster_size, |
61653008 | 496 | &l2_table[l2_index], QCOW_OFLAG_ZERO); |
6377af48 KW |
497 | *cluster_offset = 0; |
498 | break; | |
68d000a3 | 499 | case QCOW2_CLUSTER_UNALLOCATED: |
45aba42f KW |
500 | /* how many empty clusters ? */ |
501 | c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); | |
68d000a3 KW |
502 | *cluster_offset = 0; |
503 | break; | |
504 | case QCOW2_CLUSTER_NORMAL: | |
45aba42f KW |
505 | /* how many allocated clusters ? */ |
506 | c = count_contiguous_clusters(nb_clusters, s->cluster_size, | |
61653008 | 507 | &l2_table[l2_index], QCOW_OFLAG_ZERO); |
68d000a3 KW |
508 | *cluster_offset &= L2E_OFFSET_MASK; |
509 | break; | |
1417d7e4 KW |
510 | default: |
511 | abort(); | |
45aba42f KW |
512 | } |
513 | ||
29c1a730 KW |
514 | qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
515 | ||
68d000a3 KW |
516 | nb_available = (c * s->cluster_sectors); |
517 | ||
45aba42f KW |
518 | out: |
519 | if (nb_available > nb_needed) | |
520 | nb_available = nb_needed; | |
521 | ||
522 | *num = nb_available - index_in_cluster; | |
523 | ||
68d000a3 | 524 | return ret; |
45aba42f KW |
525 | } |
526 | ||
527 | /* | |
528 | * get_cluster_table | |
529 | * | |
530 | * for a given disk offset, load (and allocate if needed) | |
531 | * the l2 table. | |
532 | * | |
533 | * the l2 table offset in the qcow2 file and the cluster index | |
534 | * in the l2 table are given to the caller. | |
535 | * | |
1e3e8f1a | 536 | * Returns 0 on success, -errno in failure case |
45aba42f | 537 | */ |
45aba42f KW |
538 | static int get_cluster_table(BlockDriverState *bs, uint64_t offset, |
539 | uint64_t **new_l2_table, | |
45aba42f KW |
540 | int *new_l2_index) |
541 | { | |
542 | BDRVQcowState *s = bs->opaque; | |
2cf7cfa1 KW |
543 | unsigned int l2_index; |
544 | uint64_t l1_index, l2_offset; | |
c46e1167 | 545 | uint64_t *l2_table = NULL; |
80ee15a6 | 546 | int ret; |
45aba42f KW |
547 | |
548 | /* seek the the l2 offset in the l1 table */ | |
549 | ||
550 | l1_index = offset >> (s->l2_bits + s->cluster_bits); | |
551 | if (l1_index >= s->l1_size) { | |
72893756 | 552 | ret = qcow2_grow_l1_table(bs, l1_index + 1, false); |
1e3e8f1a KW |
553 | if (ret < 0) { |
554 | return ret; | |
555 | } | |
45aba42f | 556 | } |
8e37f681 | 557 | |
2cf7cfa1 | 558 | assert(l1_index < s->l1_size); |
8e37f681 | 559 | l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; |
45aba42f KW |
560 | |
561 | /* seek the l2 table of the given l2 offset */ | |
562 | ||
8e37f681 | 563 | if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) { |
45aba42f | 564 | /* load the l2 table in memory */ |
55c17e98 KW |
565 | ret = l2_load(bs, l2_offset, &l2_table); |
566 | if (ret < 0) { | |
567 | return ret; | |
1e3e8f1a | 568 | } |
45aba42f | 569 | } else { |
16fde5f2 | 570 | /* First allocate a new L2 table (and do COW if needed) */ |
c46e1167 KW |
571 | ret = l2_allocate(bs, l1_index, &l2_table); |
572 | if (ret < 0) { | |
573 | return ret; | |
1e3e8f1a | 574 | } |
16fde5f2 KW |
575 | |
576 | /* Then decrease the refcount of the old table */ | |
577 | if (l2_offset) { | |
6cfcb9b8 KW |
578 | qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), |
579 | QCOW2_DISCARD_OTHER); | |
16fde5f2 | 580 | } |
45aba42f KW |
581 | } |
582 | ||
583 | /* find the cluster offset for the given disk offset */ | |
584 | ||
585 | l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); | |
586 | ||
587 | *new_l2_table = l2_table; | |
45aba42f KW |
588 | *new_l2_index = l2_index; |
589 | ||
1e3e8f1a | 590 | return 0; |
45aba42f KW |
591 | } |
592 | ||
593 | /* | |
594 | * alloc_compressed_cluster_offset | |
595 | * | |
596 | * For a given offset of the disk image, return cluster offset in | |
597 | * qcow2 file. | |
598 | * | |
599 | * If the offset is not found, allocate a new compressed cluster. | |
600 | * | |
601 | * Return the cluster offset if successful, | |
602 | * Return 0, otherwise. | |
603 | * | |
604 | */ | |
605 | ||
ed6ccf0f KW |
606 | uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, |
607 | uint64_t offset, | |
608 | int compressed_size) | |
45aba42f KW |
609 | { |
610 | BDRVQcowState *s = bs->opaque; | |
611 | int l2_index, ret; | |
3948d1d4 | 612 | uint64_t *l2_table; |
f4f0d391 | 613 | int64_t cluster_offset; |
45aba42f KW |
614 | int nb_csectors; |
615 | ||
3948d1d4 | 616 | ret = get_cluster_table(bs, offset, &l2_table, &l2_index); |
1e3e8f1a | 617 | if (ret < 0) { |
45aba42f | 618 | return 0; |
1e3e8f1a | 619 | } |
45aba42f | 620 | |
b0b6862e KW |
621 | /* Compression can't overwrite anything. Fail if the cluster was already |
622 | * allocated. */ | |
45aba42f | 623 | cluster_offset = be64_to_cpu(l2_table[l2_index]); |
b0b6862e | 624 | if (cluster_offset & L2E_OFFSET_MASK) { |
8f1efd00 KW |
625 | qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
626 | return 0; | |
627 | } | |
45aba42f | 628 | |
ed6ccf0f | 629 | cluster_offset = qcow2_alloc_bytes(bs, compressed_size); |
5d757b56 | 630 | if (cluster_offset < 0) { |
29c1a730 | 631 | qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
5d757b56 KW |
632 | return 0; |
633 | } | |
634 | ||
45aba42f KW |
635 | nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - |
636 | (cluster_offset >> 9); | |
637 | ||
638 | cluster_offset |= QCOW_OFLAG_COMPRESSED | | |
639 | ((uint64_t)nb_csectors << s->csize_shift); | |
640 | ||
641 | /* update L2 table */ | |
642 | ||
643 | /* compressed clusters never have the copied flag */ | |
644 | ||
66f82cee | 645 | BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); |
29c1a730 | 646 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
45aba42f | 647 | l2_table[l2_index] = cpu_to_be64(cluster_offset); |
29c1a730 | 648 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
79a31189 | 649 | if (ret < 0) { |
29c1a730 | 650 | return 0; |
4c1612d9 KW |
651 | } |
652 | ||
29c1a730 | 653 | return cluster_offset; |
4c1612d9 KW |
654 | } |
655 | ||
593fb83c KW |
656 | static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r) |
657 | { | |
658 | BDRVQcowState *s = bs->opaque; | |
659 | int ret; | |
660 | ||
661 | if (r->nb_sectors == 0) { | |
662 | return 0; | |
663 | } | |
664 | ||
665 | qemu_co_mutex_unlock(&s->lock); | |
666 | ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset, | |
667 | r->offset / BDRV_SECTOR_SIZE, | |
668 | r->offset / BDRV_SECTOR_SIZE + r->nb_sectors); | |
669 | qemu_co_mutex_lock(&s->lock); | |
670 | ||
671 | if (ret < 0) { | |
672 | return ret; | |
673 | } | |
674 | ||
675 | /* | |
676 | * Before we update the L2 table to actually point to the new cluster, we | |
677 | * need to be sure that the refcounts have been increased and COW was | |
678 | * handled. | |
679 | */ | |
680 | qcow2_cache_depends_on_flush(s->l2_table_cache); | |
681 | ||
682 | return 0; | |
683 | } | |
684 | ||
148da7ea | 685 | int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) |
45aba42f KW |
686 | { |
687 | BDRVQcowState *s = bs->opaque; | |
688 | int i, j = 0, l2_index, ret; | |
593fb83c | 689 | uint64_t *old_cluster, *l2_table; |
250196f1 | 690 | uint64_t cluster_offset = m->alloc_offset; |
45aba42f | 691 | |
3cce16f4 | 692 | trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); |
f50f88b9 | 693 | assert(m->nb_clusters > 0); |
45aba42f | 694 | |
7267c094 | 695 | old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t)); |
45aba42f KW |
696 | |
697 | /* copy content of unmodified sectors */ | |
593fb83c KW |
698 | ret = perform_cow(bs, m, &m->cow_start); |
699 | if (ret < 0) { | |
700 | goto err; | |
45aba42f KW |
701 | } |
702 | ||
593fb83c KW |
703 | ret = perform_cow(bs, m, &m->cow_end); |
704 | if (ret < 0) { | |
705 | goto err; | |
29c1a730 KW |
706 | } |
707 | ||
593fb83c | 708 | /* Update L2 table. */ |
74c4510a | 709 | if (s->use_lazy_refcounts) { |
280d3735 KW |
710 | qcow2_mark_dirty(bs); |
711 | } | |
bfe8043e SH |
712 | if (qcow2_need_accurate_refcounts(s)) { |
713 | qcow2_cache_set_dependency(bs, s->l2_table_cache, | |
714 | s->refcount_block_cache); | |
715 | } | |
280d3735 | 716 | |
3948d1d4 | 717 | ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index); |
1e3e8f1a | 718 | if (ret < 0) { |
45aba42f | 719 | goto err; |
1e3e8f1a | 720 | } |
29c1a730 | 721 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
45aba42f | 722 | |
c01dbccb | 723 | assert(l2_index + m->nb_clusters <= s->l2_size); |
45aba42f KW |
724 | for (i = 0; i < m->nb_clusters; i++) { |
725 | /* if two concurrent writes happen to the same unallocated cluster | |
726 | * each write allocates separate cluster and writes data concurrently. | |
727 | * The first one to complete updates l2 table with pointer to its | |
728 | * cluster the second one has to do RMW (which is done above by | |
729 | * copy_sectors()), update l2 table with its cluster pointer and free | |
730 | * old cluster. This is what this loop does */ | |
731 | if(l2_table[l2_index + i] != 0) | |
732 | old_cluster[j++] = l2_table[l2_index + i]; | |
733 | ||
734 | l2_table[l2_index + i] = cpu_to_be64((cluster_offset + | |
735 | (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); | |
736 | } | |
737 | ||
9f8e668e | 738 | |
29c1a730 | 739 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
c835d00f | 740 | if (ret < 0) { |
45aba42f | 741 | goto err; |
4c1612d9 | 742 | } |
45aba42f | 743 | |
7ec5e6a4 KW |
744 | /* |
745 | * If this was a COW, we need to decrease the refcount of the old cluster. | |
746 | * Also flush bs->file to get the right order for L2 and refcount update. | |
6cfcb9b8 KW |
747 | * |
748 | * Don't discard clusters that reach a refcount of 0 (e.g. compressed | |
749 | * clusters), the next write will reuse them anyway. | |
7ec5e6a4 KW |
750 | */ |
751 | if (j != 0) { | |
7ec5e6a4 | 752 | for (i = 0; i < j; i++) { |
6cfcb9b8 KW |
753 | qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, |
754 | QCOW2_DISCARD_NEVER); | |
7ec5e6a4 KW |
755 | } |
756 | } | |
45aba42f KW |
757 | |
758 | ret = 0; | |
759 | err: | |
7267c094 | 760 | g_free(old_cluster); |
45aba42f KW |
761 | return ret; |
762 | } | |
763 | ||
bf319ece KW |
764 | /* |
765 | * Returns the number of contiguous clusters that can be used for an allocating | |
766 | * write, but require COW to be performed (this includes yet unallocated space, | |
767 | * which must copy from the backing file) | |
768 | */ | |
769 | static int count_cow_clusters(BDRVQcowState *s, int nb_clusters, | |
770 | uint64_t *l2_table, int l2_index) | |
771 | { | |
143550a8 | 772 | int i; |
bf319ece | 773 | |
143550a8 KW |
774 | for (i = 0; i < nb_clusters; i++) { |
775 | uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]); | |
776 | int cluster_type = qcow2_get_cluster_type(l2_entry); | |
777 | ||
778 | switch(cluster_type) { | |
779 | case QCOW2_CLUSTER_NORMAL: | |
780 | if (l2_entry & QCOW_OFLAG_COPIED) { | |
781 | goto out; | |
782 | } | |
bf319ece | 783 | break; |
143550a8 KW |
784 | case QCOW2_CLUSTER_UNALLOCATED: |
785 | case QCOW2_CLUSTER_COMPRESSED: | |
6377af48 | 786 | case QCOW2_CLUSTER_ZERO: |
bf319ece | 787 | break; |
143550a8 KW |
788 | default: |
789 | abort(); | |
790 | } | |
bf319ece KW |
791 | } |
792 | ||
143550a8 | 793 | out: |
bf319ece KW |
794 | assert(i <= nb_clusters); |
795 | return i; | |
796 | } | |
797 | ||
250196f1 | 798 | /* |
226c3c26 KW |
799 | * Check if there already is an AIO write request in flight which allocates |
800 | * the same cluster. In this case we need to wait until the previous | |
801 | * request has completed and updated the L2 table accordingly. | |
65eb2e35 KW |
802 | * |
803 | * Returns: | |
804 | * 0 if there was no dependency. *cur_bytes indicates the number of | |
805 | * bytes from guest_offset that can be read before the next | |
806 | * dependency must be processed (or the request is complete) | |
807 | * | |
808 | * -EAGAIN if we had to wait for another request, previously gathered | |
809 | * information on cluster allocation may be invalid now. The caller | |
810 | * must start over anyway, so consider *cur_bytes undefined. | |
250196f1 | 811 | */ |
226c3c26 | 812 | static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, |
ecdd5333 | 813 | uint64_t *cur_bytes, QCowL2Meta **m) |
250196f1 KW |
814 | { |
815 | BDRVQcowState *s = bs->opaque; | |
250196f1 | 816 | QCowL2Meta *old_alloc; |
65eb2e35 | 817 | uint64_t bytes = *cur_bytes; |
250196f1 | 818 | |
250196f1 KW |
819 | QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { |
820 | ||
65eb2e35 KW |
821 | uint64_t start = guest_offset; |
822 | uint64_t end = start + bytes; | |
823 | uint64_t old_start = l2meta_cow_start(old_alloc); | |
824 | uint64_t old_end = l2meta_cow_end(old_alloc); | |
250196f1 | 825 | |
d9d74f41 | 826 | if (end <= old_start || start >= old_end) { |
250196f1 KW |
827 | /* No intersection */ |
828 | } else { | |
829 | if (start < old_start) { | |
830 | /* Stop at the start of a running allocation */ | |
65eb2e35 | 831 | bytes = old_start - start; |
250196f1 | 832 | } else { |
65eb2e35 | 833 | bytes = 0; |
250196f1 KW |
834 | } |
835 | ||
ecdd5333 KW |
836 | /* Stop if already an l2meta exists. After yielding, it wouldn't |
837 | * be valid any more, so we'd have to clean up the old L2Metas | |
838 | * and deal with requests depending on them before starting to | |
839 | * gather new ones. Not worth the trouble. */ | |
840 | if (bytes == 0 && *m) { | |
841 | *cur_bytes = 0; | |
842 | return 0; | |
843 | } | |
844 | ||
65eb2e35 | 845 | if (bytes == 0) { |
250196f1 KW |
846 | /* Wait for the dependency to complete. We need to recheck |
847 | * the free/allocated clusters when we continue. */ | |
848 | qemu_co_mutex_unlock(&s->lock); | |
849 | qemu_co_queue_wait(&old_alloc->dependent_requests); | |
850 | qemu_co_mutex_lock(&s->lock); | |
851 | return -EAGAIN; | |
852 | } | |
853 | } | |
854 | } | |
855 | ||
65eb2e35 KW |
856 | /* Make sure that existing clusters and new allocations are only used up to |
857 | * the next dependency if we shortened the request above */ | |
858 | *cur_bytes = bytes; | |
250196f1 | 859 | |
226c3c26 KW |
860 | return 0; |
861 | } | |
862 | ||
0af729ec KW |
863 | /* |
864 | * Checks how many already allocated clusters that don't require a copy on | |
865 | * write there are at the given guest_offset (up to *bytes). If | |
866 | * *host_offset is not zero, only physically contiguous clusters beginning at | |
867 | * this host offset are counted. | |
868 | * | |
411d62b0 KW |
869 | * Note that guest_offset may not be cluster aligned. In this case, the |
870 | * returned *host_offset points to exact byte referenced by guest_offset and | |
871 | * therefore isn't cluster aligned as well. | |
0af729ec KW |
872 | * |
873 | * Returns: | |
874 | * 0: if no allocated clusters are available at the given offset. | |
875 | * *bytes is normally unchanged. It is set to 0 if the cluster | |
876 | * is allocated and doesn't need COW, but doesn't have the right | |
877 | * physical offset. | |
878 | * | |
879 | * 1: if allocated clusters that don't require a COW are available at | |
880 | * the requested offset. *bytes may have decreased and describes | |
881 | * the length of the area that can be written to. | |
882 | * | |
883 | * -errno: in error cases | |
0af729ec KW |
884 | */ |
885 | static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, | |
c53ede9f | 886 | uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) |
0af729ec KW |
887 | { |
888 | BDRVQcowState *s = bs->opaque; | |
889 | int l2_index; | |
890 | uint64_t cluster_offset; | |
891 | uint64_t *l2_table; | |
acb0467f | 892 | unsigned int nb_clusters; |
c53ede9f | 893 | unsigned int keep_clusters; |
0af729ec KW |
894 | int ret, pret; |
895 | ||
896 | trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, | |
897 | *bytes); | |
0af729ec | 898 | |
411d62b0 KW |
899 | assert(*host_offset == 0 || offset_into_cluster(s, guest_offset) |
900 | == offset_into_cluster(s, *host_offset)); | |
901 | ||
acb0467f KW |
902 | /* |
903 | * Calculate the number of clusters to look for. We stop at L2 table | |
904 | * boundaries to keep things simple. | |
905 | */ | |
906 | nb_clusters = | |
907 | size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); | |
908 | ||
909 | l2_index = offset_to_l2_index(s, guest_offset); | |
910 | nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); | |
911 | ||
0af729ec KW |
912 | /* Find L2 entry for the first involved cluster */ |
913 | ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); | |
914 | if (ret < 0) { | |
915 | return ret; | |
916 | } | |
917 | ||
918 | cluster_offset = be64_to_cpu(l2_table[l2_index]); | |
919 | ||
920 | /* Check how many clusters are already allocated and don't need COW */ | |
921 | if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL | |
922 | && (cluster_offset & QCOW_OFLAG_COPIED)) | |
923 | { | |
e62daaf6 KW |
924 | /* If a specific host_offset is required, check it */ |
925 | bool offset_matches = | |
926 | (cluster_offset & L2E_OFFSET_MASK) == *host_offset; | |
927 | ||
928 | if (*host_offset != 0 && !offset_matches) { | |
929 | *bytes = 0; | |
930 | ret = 0; | |
931 | goto out; | |
932 | } | |
933 | ||
0af729ec | 934 | /* We keep all QCOW_OFLAG_COPIED clusters */ |
c53ede9f | 935 | keep_clusters = |
acb0467f | 936 | count_contiguous_clusters(nb_clusters, s->cluster_size, |
61653008 | 937 | &l2_table[l2_index], |
0af729ec | 938 | QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); |
c53ede9f KW |
939 | assert(keep_clusters <= nb_clusters); |
940 | ||
941 | *bytes = MIN(*bytes, | |
942 | keep_clusters * s->cluster_size | |
943 | - offset_into_cluster(s, guest_offset)); | |
0af729ec KW |
944 | |
945 | ret = 1; | |
946 | } else { | |
0af729ec KW |
947 | ret = 0; |
948 | } | |
949 | ||
0af729ec | 950 | /* Cleanup */ |
e62daaf6 | 951 | out: |
0af729ec KW |
952 | pret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
953 | if (pret < 0) { | |
954 | return pret; | |
955 | } | |
956 | ||
e62daaf6 KW |
957 | /* Only return a host offset if we actually made progress. Otherwise we |
958 | * would make requirements for handle_alloc() that it can't fulfill */ | |
959 | if (ret) { | |
411d62b0 KW |
960 | *host_offset = (cluster_offset & L2E_OFFSET_MASK) |
961 | + offset_into_cluster(s, guest_offset); | |
e62daaf6 KW |
962 | } |
963 | ||
0af729ec KW |
964 | return ret; |
965 | } | |
966 | ||
226c3c26 KW |
967 | /* |
968 | * Allocates new clusters for the given guest_offset. | |
969 | * | |
970 | * At most *nb_clusters are allocated, and on return *nb_clusters is updated to | |
971 | * contain the number of clusters that have been allocated and are contiguous | |
972 | * in the image file. | |
973 | * | |
974 | * If *host_offset is non-zero, it specifies the offset in the image file at | |
975 | * which the new clusters must start. *nb_clusters can be 0 on return in this | |
976 | * case if the cluster at host_offset is already in use. If *host_offset is | |
977 | * zero, the clusters can be allocated anywhere in the image file. | |
978 | * | |
979 | * *host_offset is updated to contain the offset into the image file at which | |
980 | * the first allocated cluster starts. | |
981 | * | |
982 | * Return 0 on success and -errno in error cases. -EAGAIN means that the | |
983 | * function has been waiting for another request and the allocation must be | |
984 | * restarted, but the whole request should not be failed. | |
985 | */ | |
986 | static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, | |
987 | uint64_t *host_offset, unsigned int *nb_clusters) | |
988 | { | |
989 | BDRVQcowState *s = bs->opaque; | |
226c3c26 KW |
990 | |
991 | trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, | |
992 | *host_offset, *nb_clusters); | |
993 | ||
250196f1 KW |
994 | /* Allocate new clusters */ |
995 | trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); | |
996 | if (*host_offset == 0) { | |
df021791 KW |
997 | int64_t cluster_offset = |
998 | qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); | |
999 | if (cluster_offset < 0) { | |
1000 | return cluster_offset; | |
1001 | } | |
1002 | *host_offset = cluster_offset; | |
1003 | return 0; | |
250196f1 | 1004 | } else { |
17a71e58 | 1005 | int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); |
df021791 KW |
1006 | if (ret < 0) { |
1007 | return ret; | |
1008 | } | |
1009 | *nb_clusters = ret; | |
1010 | return 0; | |
250196f1 | 1011 | } |
250196f1 KW |
1012 | } |
1013 | ||
10f0ed8b KW |
1014 | /* |
1015 | * Allocates new clusters for an area that either is yet unallocated or needs a | |
1016 | * copy on write. If *host_offset is non-zero, clusters are only allocated if | |
1017 | * the new allocation can match the specified host offset. | |
1018 | * | |
411d62b0 KW |
1019 | * Note that guest_offset may not be cluster aligned. In this case, the |
1020 | * returned *host_offset points to exact byte referenced by guest_offset and | |
1021 | * therefore isn't cluster aligned as well. | |
10f0ed8b KW |
1022 | * |
1023 | * Returns: | |
1024 | * 0: if no clusters could be allocated. *bytes is set to 0, | |
1025 | * *host_offset is left unchanged. | |
1026 | * | |
1027 | * 1: if new clusters were allocated. *bytes may be decreased if the | |
1028 | * new allocation doesn't cover all of the requested area. | |
1029 | * *host_offset is updated to contain the host offset of the first | |
1030 | * newly allocated cluster. | |
1031 | * | |
1032 | * -errno: in error cases | |
10f0ed8b KW |
1033 | */ |
1034 | static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, | |
c37f4cd7 | 1035 | uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) |
10f0ed8b KW |
1036 | { |
1037 | BDRVQcowState *s = bs->opaque; | |
1038 | int l2_index; | |
1039 | uint64_t *l2_table; | |
1040 | uint64_t entry; | |
f5bc6350 | 1041 | unsigned int nb_clusters; |
10f0ed8b KW |
1042 | int ret; |
1043 | ||
10f0ed8b | 1044 | uint64_t alloc_cluster_offset; |
10f0ed8b KW |
1045 | |
1046 | trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, | |
1047 | *bytes); | |
1048 | assert(*bytes > 0); | |
1049 | ||
f5bc6350 KW |
1050 | /* |
1051 | * Calculate the number of clusters to look for. We stop at L2 table | |
1052 | * boundaries to keep things simple. | |
1053 | */ | |
c37f4cd7 KW |
1054 | nb_clusters = |
1055 | size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); | |
1056 | ||
f5bc6350 | 1057 | l2_index = offset_to_l2_index(s, guest_offset); |
c37f4cd7 | 1058 | nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); |
f5bc6350 | 1059 | |
10f0ed8b KW |
1060 | /* Find L2 entry for the first involved cluster */ |
1061 | ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); | |
1062 | if (ret < 0) { | |
1063 | return ret; | |
1064 | } | |
1065 | ||
3b8e2e26 | 1066 | entry = be64_to_cpu(l2_table[l2_index]); |
10f0ed8b KW |
1067 | |
1068 | /* For the moment, overwrite compressed clusters one by one */ | |
1069 | if (entry & QCOW_OFLAG_COMPRESSED) { | |
1070 | nb_clusters = 1; | |
1071 | } else { | |
3b8e2e26 | 1072 | nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index); |
10f0ed8b KW |
1073 | } |
1074 | ||
ecdd5333 KW |
1075 | /* This function is only called when there were no non-COW clusters, so if |
1076 | * we can't find any unallocated or COW clusters either, something is | |
1077 | * wrong with our code. */ | |
1078 | assert(nb_clusters > 0); | |
1079 | ||
10f0ed8b KW |
1080 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
1081 | if (ret < 0) { | |
1082 | return ret; | |
1083 | } | |
1084 | ||
10f0ed8b | 1085 | /* Allocate, if necessary at a given offset in the image file */ |
411d62b0 | 1086 | alloc_cluster_offset = start_of_cluster(s, *host_offset); |
83baa9a4 | 1087 | ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, |
10f0ed8b KW |
1088 | &nb_clusters); |
1089 | if (ret < 0) { | |
1090 | goto fail; | |
1091 | } | |
1092 | ||
83baa9a4 KW |
1093 | /* Can't extend contiguous allocation */ |
1094 | if (nb_clusters == 0) { | |
10f0ed8b KW |
1095 | *bytes = 0; |
1096 | return 0; | |
1097 | } | |
1098 | ||
83baa9a4 KW |
1099 | /* |
1100 | * Save info needed for meta data update. | |
1101 | * | |
1102 | * requested_sectors: Number of sectors from the start of the first | |
1103 | * newly allocated cluster to the end of the (possibly shortened | |
1104 | * before) write request. | |
1105 | * | |
1106 | * avail_sectors: Number of sectors from the start of the first | |
1107 | * newly allocated to the end of the last newly allocated cluster. | |
1108 | * | |
1109 | * nb_sectors: The number of sectors from the start of the first | |
1110 | * newly allocated cluster to the end of the area that the write | |
1111 | * request actually writes to (excluding COW at the end) | |
1112 | */ | |
1113 | int requested_sectors = | |
1114 | (*bytes + offset_into_cluster(s, guest_offset)) | |
1115 | >> BDRV_SECTOR_BITS; | |
1116 | int avail_sectors = nb_clusters | |
1117 | << (s->cluster_bits - BDRV_SECTOR_BITS); | |
1118 | int alloc_n_start = offset_into_cluster(s, guest_offset) | |
1119 | >> BDRV_SECTOR_BITS; | |
1120 | int nb_sectors = MIN(requested_sectors, avail_sectors); | |
88c6588c | 1121 | QCowL2Meta *old_m = *m; |
83baa9a4 | 1122 | |
83baa9a4 KW |
1123 | *m = g_malloc0(sizeof(**m)); |
1124 | ||
1125 | **m = (QCowL2Meta) { | |
88c6588c KW |
1126 | .next = old_m, |
1127 | ||
411d62b0 | 1128 | .alloc_offset = alloc_cluster_offset, |
83baa9a4 KW |
1129 | .offset = start_of_cluster(s, guest_offset), |
1130 | .nb_clusters = nb_clusters, | |
1131 | .nb_available = nb_sectors, | |
1132 | ||
1133 | .cow_start = { | |
1134 | .offset = 0, | |
1135 | .nb_sectors = alloc_n_start, | |
1136 | }, | |
1137 | .cow_end = { | |
1138 | .offset = nb_sectors * BDRV_SECTOR_SIZE, | |
1139 | .nb_sectors = avail_sectors - nb_sectors, | |
1140 | }, | |
1141 | }; | |
1142 | qemu_co_queue_init(&(*m)->dependent_requests); | |
1143 | QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); | |
1144 | ||
411d62b0 | 1145 | *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); |
83baa9a4 KW |
1146 | *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE) |
1147 | - offset_into_cluster(s, guest_offset)); | |
1148 | assert(*bytes != 0); | |
1149 | ||
10f0ed8b KW |
1150 | return 1; |
1151 | ||
1152 | fail: | |
1153 | if (*m && (*m)->nb_clusters > 0) { | |
1154 | QLIST_REMOVE(*m, next_in_flight); | |
1155 | } | |
1156 | return ret; | |
1157 | } | |
1158 | ||
45aba42f KW |
1159 | /* |
1160 | * alloc_cluster_offset | |
1161 | * | |
250196f1 KW |
1162 | * For a given offset on the virtual disk, find the cluster offset in qcow2 |
1163 | * file. If the offset is not found, allocate a new cluster. | |
45aba42f | 1164 | * |
250196f1 | 1165 | * If the cluster was already allocated, m->nb_clusters is set to 0 and |
a7912369 | 1166 | * other fields in m are meaningless. |
148da7ea KW |
1167 | * |
1168 | * If the cluster is newly allocated, m->nb_clusters is set to the number of | |
68d100e9 KW |
1169 | * contiguous clusters that have been allocated. In this case, the other |
1170 | * fields of m are valid and contain information about the first allocated | |
1171 | * cluster. | |
45aba42f | 1172 | * |
68d100e9 KW |
1173 | * If the request conflicts with another write request in flight, the coroutine |
1174 | * is queued and will be reentered when the dependency has completed. | |
148da7ea KW |
1175 | * |
1176 | * Return 0 on success and -errno in error cases | |
45aba42f | 1177 | */ |
f4f0d391 | 1178 | int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, |
f50f88b9 | 1179 | int n_start, int n_end, int *num, uint64_t *host_offset, QCowL2Meta **m) |
45aba42f KW |
1180 | { |
1181 | BDRVQcowState *s = bs->opaque; | |
710c2496 | 1182 | uint64_t start, remaining; |
250196f1 | 1183 | uint64_t cluster_offset; |
65eb2e35 | 1184 | uint64_t cur_bytes; |
710c2496 | 1185 | int ret; |
45aba42f | 1186 | |
3cce16f4 KW |
1187 | trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, |
1188 | n_start, n_end); | |
1189 | ||
710c2496 KW |
1190 | assert(n_start * BDRV_SECTOR_SIZE == offset_into_cluster(s, offset)); |
1191 | offset = start_of_cluster(s, offset); | |
1192 | ||
72424114 | 1193 | again: |
710c2496 KW |
1194 | start = offset + (n_start << BDRV_SECTOR_BITS); |
1195 | remaining = (n_end - n_start) << BDRV_SECTOR_BITS; | |
0af729ec KW |
1196 | cluster_offset = 0; |
1197 | *host_offset = 0; | |
ecdd5333 KW |
1198 | cur_bytes = 0; |
1199 | *m = NULL; | |
0af729ec | 1200 | |
2c3b32d2 | 1201 | while (true) { |
ecdd5333 KW |
1202 | |
1203 | if (!*host_offset) { | |
1204 | *host_offset = start_of_cluster(s, cluster_offset); | |
1205 | } | |
1206 | ||
1207 | assert(remaining >= cur_bytes); | |
1208 | ||
1209 | start += cur_bytes; | |
1210 | remaining -= cur_bytes; | |
1211 | cluster_offset += cur_bytes; | |
1212 | ||
1213 | if (remaining == 0) { | |
1214 | break; | |
1215 | } | |
1216 | ||
1217 | cur_bytes = remaining; | |
1218 | ||
2c3b32d2 KW |
1219 | /* |
1220 | * Now start gathering as many contiguous clusters as possible: | |
1221 | * | |
1222 | * 1. Check for overlaps with in-flight allocations | |
1223 | * | |
1224 | * a) Overlap not in the first cluster -> shorten this request and | |
1225 | * let the caller handle the rest in its next loop iteration. | |
1226 | * | |
1227 | * b) Real overlaps of two requests. Yield and restart the search | |
1228 | * for contiguous clusters (the situation could have changed | |
1229 | * while we were sleeping) | |
1230 | * | |
1231 | * c) TODO: Request starts in the same cluster as the in-flight | |
1232 | * allocation ends. Shorten the COW of the in-fight allocation, | |
1233 | * set cluster_offset to write to the same cluster and set up | |
1234 | * the right synchronisation between the in-flight request and | |
1235 | * the new one. | |
1236 | */ | |
ecdd5333 | 1237 | ret = handle_dependencies(bs, start, &cur_bytes, m); |
2c3b32d2 | 1238 | if (ret == -EAGAIN) { |
ecdd5333 KW |
1239 | /* Currently handle_dependencies() doesn't yield if we already had |
1240 | * an allocation. If it did, we would have to clean up the L2Meta | |
1241 | * structs before starting over. */ | |
1242 | assert(*m == NULL); | |
2c3b32d2 KW |
1243 | goto again; |
1244 | } else if (ret < 0) { | |
1245 | return ret; | |
ecdd5333 KW |
1246 | } else if (cur_bytes == 0) { |
1247 | break; | |
2c3b32d2 KW |
1248 | } else { |
1249 | /* handle_dependencies() may have decreased cur_bytes (shortened | |
1250 | * the allocations below) so that the next dependency is processed | |
1251 | * correctly during the next loop iteration. */ | |
0af729ec | 1252 | } |
710c2496 | 1253 | |
2c3b32d2 KW |
1254 | /* |
1255 | * 2. Count contiguous COPIED clusters. | |
1256 | */ | |
1257 | ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); | |
1258 | if (ret < 0) { | |
1259 | return ret; | |
1260 | } else if (ret) { | |
ecdd5333 | 1261 | continue; |
2c3b32d2 KW |
1262 | } else if (cur_bytes == 0) { |
1263 | break; | |
1264 | } | |
060bee89 | 1265 | |
2c3b32d2 KW |
1266 | /* |
1267 | * 3. If the request still hasn't completed, allocate new clusters, | |
1268 | * considering any cluster_offset of steps 1c or 2. | |
1269 | */ | |
1270 | ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); | |
1271 | if (ret < 0) { | |
1272 | return ret; | |
1273 | } else if (ret) { | |
ecdd5333 | 1274 | continue; |
2c3b32d2 KW |
1275 | } else { |
1276 | assert(cur_bytes == 0); | |
1277 | break; | |
1278 | } | |
f5bc6350 | 1279 | } |
10f0ed8b | 1280 | |
710c2496 KW |
1281 | *num = (n_end - n_start) - (remaining >> BDRV_SECTOR_BITS); |
1282 | assert(*num > 0); | |
1283 | assert(*host_offset != 0); | |
45aba42f | 1284 | |
148da7ea | 1285 | return 0; |
45aba42f KW |
1286 | } |
1287 | ||
1288 | static int decompress_buffer(uint8_t *out_buf, int out_buf_size, | |
1289 | const uint8_t *buf, int buf_size) | |
1290 | { | |
1291 | z_stream strm1, *strm = &strm1; | |
1292 | int ret, out_len; | |
1293 | ||
1294 | memset(strm, 0, sizeof(*strm)); | |
1295 | ||
1296 | strm->next_in = (uint8_t *)buf; | |
1297 | strm->avail_in = buf_size; | |
1298 | strm->next_out = out_buf; | |
1299 | strm->avail_out = out_buf_size; | |
1300 | ||
1301 | ret = inflateInit2(strm, -12); | |
1302 | if (ret != Z_OK) | |
1303 | return -1; | |
1304 | ret = inflate(strm, Z_FINISH); | |
1305 | out_len = strm->next_out - out_buf; | |
1306 | if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || | |
1307 | out_len != out_buf_size) { | |
1308 | inflateEnd(strm); | |
1309 | return -1; | |
1310 | } | |
1311 | inflateEnd(strm); | |
1312 | return 0; | |
1313 | } | |
1314 | ||
66f82cee | 1315 | int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) |
45aba42f | 1316 | { |
66f82cee | 1317 | BDRVQcowState *s = bs->opaque; |
45aba42f KW |
1318 | int ret, csize, nb_csectors, sector_offset; |
1319 | uint64_t coffset; | |
1320 | ||
1321 | coffset = cluster_offset & s->cluster_offset_mask; | |
1322 | if (s->cluster_cache_offset != coffset) { | |
1323 | nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; | |
1324 | sector_offset = coffset & 511; | |
1325 | csize = nb_csectors * 512 - sector_offset; | |
66f82cee KW |
1326 | BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); |
1327 | ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors); | |
45aba42f | 1328 | if (ret < 0) { |
8af36488 | 1329 | return ret; |
45aba42f KW |
1330 | } |
1331 | if (decompress_buffer(s->cluster_cache, s->cluster_size, | |
1332 | s->cluster_data + sector_offset, csize) < 0) { | |
8af36488 | 1333 | return -EIO; |
45aba42f KW |
1334 | } |
1335 | s->cluster_cache_offset = coffset; | |
1336 | } | |
1337 | return 0; | |
1338 | } | |
5ea929e3 KW |
1339 | |
1340 | /* | |
1341 | * This discards as many clusters of nb_clusters as possible at once (i.e. | |
1342 | * all clusters in the same L2 table) and returns the number of discarded | |
1343 | * clusters. | |
1344 | */ | |
1345 | static int discard_single_l2(BlockDriverState *bs, uint64_t offset, | |
670df5e3 | 1346 | unsigned int nb_clusters, enum qcow2_discard_type type) |
5ea929e3 KW |
1347 | { |
1348 | BDRVQcowState *s = bs->opaque; | |
3948d1d4 | 1349 | uint64_t *l2_table; |
5ea929e3 KW |
1350 | int l2_index; |
1351 | int ret; | |
1352 | int i; | |
1353 | ||
3948d1d4 | 1354 | ret = get_cluster_table(bs, offset, &l2_table, &l2_index); |
5ea929e3 KW |
1355 | if (ret < 0) { |
1356 | return ret; | |
1357 | } | |
1358 | ||
1359 | /* Limit nb_clusters to one L2 table */ | |
1360 | nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); | |
1361 | ||
1362 | for (i = 0; i < nb_clusters; i++) { | |
1363 | uint64_t old_offset; | |
1364 | ||
1365 | old_offset = be64_to_cpu(l2_table[l2_index + i]); | |
8e37f681 | 1366 | if ((old_offset & L2E_OFFSET_MASK) == 0) { |
5ea929e3 KW |
1367 | continue; |
1368 | } | |
1369 | ||
1370 | /* First remove L2 entries */ | |
1371 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); | |
1372 | l2_table[l2_index + i] = cpu_to_be64(0); | |
1373 | ||
1374 | /* Then decrease the refcount */ | |
670df5e3 | 1375 | qcow2_free_any_clusters(bs, old_offset, 1, type); |
5ea929e3 KW |
1376 | } |
1377 | ||
1378 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); | |
1379 | if (ret < 0) { | |
1380 | return ret; | |
1381 | } | |
1382 | ||
1383 | return nb_clusters; | |
1384 | } | |
1385 | ||
1386 | int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset, | |
670df5e3 | 1387 | int nb_sectors, enum qcow2_discard_type type) |
5ea929e3 KW |
1388 | { |
1389 | BDRVQcowState *s = bs->opaque; | |
1390 | uint64_t end_offset; | |
1391 | unsigned int nb_clusters; | |
1392 | int ret; | |
1393 | ||
1394 | end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS); | |
1395 | ||
1396 | /* Round start up and end down */ | |
1397 | offset = align_offset(offset, s->cluster_size); | |
1398 | end_offset &= ~(s->cluster_size - 1); | |
1399 | ||
1400 | if (offset > end_offset) { | |
1401 | return 0; | |
1402 | } | |
1403 | ||
1404 | nb_clusters = size_to_clusters(s, end_offset - offset); | |
1405 | ||
0b919fae KW |
1406 | s->cache_discards = true; |
1407 | ||
5ea929e3 KW |
1408 | /* Each L2 table is handled by its own loop iteration */ |
1409 | while (nb_clusters > 0) { | |
670df5e3 | 1410 | ret = discard_single_l2(bs, offset, nb_clusters, type); |
5ea929e3 | 1411 | if (ret < 0) { |
0b919fae | 1412 | goto fail; |
5ea929e3 KW |
1413 | } |
1414 | ||
1415 | nb_clusters -= ret; | |
1416 | offset += (ret * s->cluster_size); | |
1417 | } | |
1418 | ||
0b919fae KW |
1419 | ret = 0; |
1420 | fail: | |
1421 | s->cache_discards = false; | |
1422 | qcow2_process_discards(bs, ret); | |
1423 | ||
1424 | return ret; | |
5ea929e3 | 1425 | } |
621f0589 KW |
1426 | |
1427 | /* | |
1428 | * This zeroes as many clusters of nb_clusters as possible at once (i.e. | |
1429 | * all clusters in the same L2 table) and returns the number of zeroed | |
1430 | * clusters. | |
1431 | */ | |
1432 | static int zero_single_l2(BlockDriverState *bs, uint64_t offset, | |
1433 | unsigned int nb_clusters) | |
1434 | { | |
1435 | BDRVQcowState *s = bs->opaque; | |
1436 | uint64_t *l2_table; | |
1437 | int l2_index; | |
1438 | int ret; | |
1439 | int i; | |
1440 | ||
1441 | ret = get_cluster_table(bs, offset, &l2_table, &l2_index); | |
1442 | if (ret < 0) { | |
1443 | return ret; | |
1444 | } | |
1445 | ||
1446 | /* Limit nb_clusters to one L2 table */ | |
1447 | nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); | |
1448 | ||
1449 | for (i = 0; i < nb_clusters; i++) { | |
1450 | uint64_t old_offset; | |
1451 | ||
1452 | old_offset = be64_to_cpu(l2_table[l2_index + i]); | |
1453 | ||
1454 | /* Update L2 entries */ | |
1455 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); | |
1456 | if (old_offset & QCOW_OFLAG_COMPRESSED) { | |
1457 | l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); | |
6cfcb9b8 | 1458 | qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); |
621f0589 KW |
1459 | } else { |
1460 | l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); | |
1461 | } | |
1462 | } | |
1463 | ||
1464 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); | |
1465 | if (ret < 0) { | |
1466 | return ret; | |
1467 | } | |
1468 | ||
1469 | return nb_clusters; | |
1470 | } | |
1471 | ||
1472 | int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors) | |
1473 | { | |
1474 | BDRVQcowState *s = bs->opaque; | |
1475 | unsigned int nb_clusters; | |
1476 | int ret; | |
1477 | ||
1478 | /* The zero flag is only supported by version 3 and newer */ | |
1479 | if (s->qcow_version < 3) { | |
1480 | return -ENOTSUP; | |
1481 | } | |
1482 | ||
1483 | /* Each L2 table is handled by its own loop iteration */ | |
1484 | nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS); | |
1485 | ||
0b919fae KW |
1486 | s->cache_discards = true; |
1487 | ||
621f0589 KW |
1488 | while (nb_clusters > 0) { |
1489 | ret = zero_single_l2(bs, offset, nb_clusters); | |
1490 | if (ret < 0) { | |
0b919fae | 1491 | goto fail; |
621f0589 KW |
1492 | } |
1493 | ||
1494 | nb_clusters -= ret; | |
1495 | offset += (ret * s->cluster_size); | |
1496 | } | |
1497 | ||
0b919fae KW |
1498 | ret = 0; |
1499 | fail: | |
1500 | s->cache_discards = false; | |
1501 | qcow2_process_discards(bs, ret); | |
1502 | ||
1503 | return ret; | |
621f0589 | 1504 | } |
32b6444d HR |
1505 | |
1506 | /* | |
1507 | * Expands all zero clusters in a specific L1 table (or deallocates them, for | |
1508 | * non-backed non-pre-allocated zero clusters). | |
1509 | * | |
1510 | * expanded_clusters is a bitmap where every bit corresponds to one cluster in | |
1511 | * the image file; a bit gets set if the corresponding cluster has been used for | |
1512 | * zero expansion (i.e., has been filled with zeroes and is referenced from an | |
1513 | * L2 table). nb_clusters contains the total cluster count of the image file, | |
1514 | * i.e., the number of bits in expanded_clusters. | |
1515 | */ | |
1516 | static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, | |
e390cf5a HR |
1517 | int l1_size, uint8_t **expanded_clusters, |
1518 | uint64_t *nb_clusters) | |
32b6444d HR |
1519 | { |
1520 | BDRVQcowState *s = bs->opaque; | |
1521 | bool is_active_l1 = (l1_table == s->l1_table); | |
1522 | uint64_t *l2_table = NULL; | |
1523 | int ret; | |
1524 | int i, j; | |
1525 | ||
1526 | if (!is_active_l1) { | |
1527 | /* inactive L2 tables require a buffer to be stored in when loading | |
1528 | * them from disk */ | |
1529 | l2_table = qemu_blockalign(bs, s->cluster_size); | |
1530 | } | |
1531 | ||
1532 | for (i = 0; i < l1_size; i++) { | |
1533 | uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; | |
1534 | bool l2_dirty = false; | |
1535 | ||
1536 | if (!l2_offset) { | |
1537 | /* unallocated */ | |
1538 | continue; | |
1539 | } | |
1540 | ||
1541 | if (is_active_l1) { | |
1542 | /* get active L2 tables from cache */ | |
1543 | ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, | |
1544 | (void **)&l2_table); | |
1545 | } else { | |
1546 | /* load inactive L2 tables from disk */ | |
1547 | ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE, | |
1548 | (void *)l2_table, s->cluster_sectors); | |
1549 | } | |
1550 | if (ret < 0) { | |
1551 | goto fail; | |
1552 | } | |
1553 | ||
1554 | for (j = 0; j < s->l2_size; j++) { | |
1555 | uint64_t l2_entry = be64_to_cpu(l2_table[j]); | |
1556 | int64_t offset = l2_entry & L2E_OFFSET_MASK, cluster_index; | |
1557 | int cluster_type = qcow2_get_cluster_type(l2_entry); | |
320c7066 | 1558 | bool preallocated = offset != 0; |
32b6444d HR |
1559 | |
1560 | if (cluster_type == QCOW2_CLUSTER_NORMAL) { | |
1561 | cluster_index = offset >> s->cluster_bits; | |
e390cf5a HR |
1562 | assert((cluster_index >= 0) && (cluster_index < *nb_clusters)); |
1563 | if ((*expanded_clusters)[cluster_index / 8] & | |
32b6444d HR |
1564 | (1 << (cluster_index % 8))) { |
1565 | /* Probably a shared L2 table; this cluster was a zero | |
1566 | * cluster which has been expanded, its refcount | |
1567 | * therefore most likely requires an update. */ | |
1568 | ret = qcow2_update_cluster_refcount(bs, cluster_index, 1, | |
1569 | QCOW2_DISCARD_NEVER); | |
1570 | if (ret < 0) { | |
1571 | goto fail; | |
1572 | } | |
1573 | /* Since we just increased the refcount, the COPIED flag may | |
1574 | * no longer be set. */ | |
1575 | l2_table[j] = cpu_to_be64(l2_entry & ~QCOW_OFLAG_COPIED); | |
1576 | l2_dirty = true; | |
1577 | } | |
1578 | continue; | |
1579 | } | |
1580 | else if (qcow2_get_cluster_type(l2_entry) != QCOW2_CLUSTER_ZERO) { | |
1581 | continue; | |
1582 | } | |
1583 | ||
320c7066 | 1584 | if (!preallocated) { |
32b6444d HR |
1585 | if (!bs->backing_hd) { |
1586 | /* not backed; therefore we can simply deallocate the | |
1587 | * cluster */ | |
1588 | l2_table[j] = 0; | |
1589 | l2_dirty = true; | |
1590 | continue; | |
1591 | } | |
1592 | ||
1593 | offset = qcow2_alloc_clusters(bs, s->cluster_size); | |
1594 | if (offset < 0) { | |
1595 | ret = offset; | |
1596 | goto fail; | |
1597 | } | |
1598 | } | |
1599 | ||
1600 | ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT, | |
1601 | offset, s->cluster_size); | |
1602 | if (ret < 0) { | |
320c7066 HR |
1603 | if (!preallocated) { |
1604 | qcow2_free_clusters(bs, offset, s->cluster_size, | |
1605 | QCOW2_DISCARD_ALWAYS); | |
1606 | } | |
32b6444d HR |
1607 | goto fail; |
1608 | } | |
1609 | ||
1610 | ret = bdrv_write_zeroes(bs->file, offset / BDRV_SECTOR_SIZE, | |
1611 | s->cluster_sectors); | |
1612 | if (ret < 0) { | |
320c7066 HR |
1613 | if (!preallocated) { |
1614 | qcow2_free_clusters(bs, offset, s->cluster_size, | |
1615 | QCOW2_DISCARD_ALWAYS); | |
1616 | } | |
32b6444d HR |
1617 | goto fail; |
1618 | } | |
1619 | ||
1620 | l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED); | |
1621 | l2_dirty = true; | |
1622 | ||
1623 | cluster_index = offset >> s->cluster_bits; | |
e390cf5a HR |
1624 | |
1625 | if (cluster_index >= *nb_clusters) { | |
1626 | uint64_t old_bitmap_size = (*nb_clusters + 7) / 8; | |
1627 | uint64_t new_bitmap_size; | |
1628 | /* The offset may lie beyond the old end of the underlying image | |
1629 | * file for growable files only */ | |
1630 | assert(bs->file->growable); | |
1631 | *nb_clusters = size_to_clusters(s, bs->file->total_sectors * | |
1632 | BDRV_SECTOR_SIZE); | |
1633 | new_bitmap_size = (*nb_clusters + 7) / 8; | |
1634 | *expanded_clusters = g_realloc(*expanded_clusters, | |
1635 | new_bitmap_size); | |
1636 | /* clear the newly allocated space */ | |
1637 | memset(&(*expanded_clusters)[old_bitmap_size], 0, | |
1638 | new_bitmap_size - old_bitmap_size); | |
1639 | } | |
1640 | ||
1641 | assert((cluster_index >= 0) && (cluster_index < *nb_clusters)); | |
1642 | (*expanded_clusters)[cluster_index / 8] |= 1 << (cluster_index % 8); | |
32b6444d HR |
1643 | } |
1644 | ||
1645 | if (is_active_l1) { | |
1646 | if (l2_dirty) { | |
1647 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); | |
1648 | qcow2_cache_depends_on_flush(s->l2_table_cache); | |
1649 | } | |
1650 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); | |
1651 | if (ret < 0) { | |
1652 | l2_table = NULL; | |
1653 | goto fail; | |
1654 | } | |
1655 | } else { | |
1656 | if (l2_dirty) { | |
1657 | ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT & | |
1658 | ~(QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2), l2_offset, | |
1659 | s->cluster_size); | |
1660 | if (ret < 0) { | |
1661 | goto fail; | |
1662 | } | |
1663 | ||
1664 | ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE, | |
1665 | (void *)l2_table, s->cluster_sectors); | |
1666 | if (ret < 0) { | |
1667 | goto fail; | |
1668 | } | |
1669 | } | |
1670 | } | |
1671 | } | |
1672 | ||
1673 | ret = 0; | |
1674 | ||
1675 | fail: | |
1676 | if (l2_table) { | |
1677 | if (!is_active_l1) { | |
1678 | qemu_vfree(l2_table); | |
1679 | } else { | |
1680 | if (ret < 0) { | |
1681 | qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); | |
1682 | } else { | |
1683 | ret = qcow2_cache_put(bs, s->l2_table_cache, | |
1684 | (void **)&l2_table); | |
1685 | } | |
1686 | } | |
1687 | } | |
1688 | return ret; | |
1689 | } | |
1690 | ||
1691 | /* | |
1692 | * For backed images, expands all zero clusters on the image. For non-backed | |
1693 | * images, deallocates all non-pre-allocated zero clusters (and claims the | |
1694 | * allocation for pre-allocated ones). This is important for downgrading to a | |
1695 | * qcow2 version which doesn't yet support metadata zero clusters. | |
1696 | */ | |
1697 | int qcow2_expand_zero_clusters(BlockDriverState *bs) | |
1698 | { | |
1699 | BDRVQcowState *s = bs->opaque; | |
1700 | uint64_t *l1_table = NULL; | |
32b6444d HR |
1701 | uint64_t nb_clusters; |
1702 | uint8_t *expanded_clusters; | |
1703 | int ret; | |
1704 | int i, j; | |
1705 | ||
e390cf5a HR |
1706 | nb_clusters = size_to_clusters(s, bs->file->total_sectors * |
1707 | BDRV_SECTOR_SIZE); | |
32b6444d HR |
1708 | expanded_clusters = g_malloc0((nb_clusters + 7) / 8); |
1709 | ||
1710 | ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, | |
e390cf5a | 1711 | &expanded_clusters, &nb_clusters); |
32b6444d HR |
1712 | if (ret < 0) { |
1713 | goto fail; | |
1714 | } | |
1715 | ||
1716 | /* Inactive L1 tables may point to active L2 tables - therefore it is | |
1717 | * necessary to flush the L2 table cache before trying to access the L2 | |
1718 | * tables pointed to by inactive L1 entries (else we might try to expand | |
1719 | * zero clusters that have already been expanded); furthermore, it is also | |
1720 | * necessary to empty the L2 table cache, since it may contain tables which | |
1721 | * are now going to be modified directly on disk, bypassing the cache. | |
1722 | * qcow2_cache_empty() does both for us. */ | |
1723 | ret = qcow2_cache_empty(bs, s->l2_table_cache); | |
1724 | if (ret < 0) { | |
1725 | goto fail; | |
1726 | } | |
1727 | ||
1728 | for (i = 0; i < s->nb_snapshots; i++) { | |
1729 | int l1_sectors = (s->snapshots[i].l1_size * sizeof(uint64_t) + | |
1730 | BDRV_SECTOR_SIZE - 1) / BDRV_SECTOR_SIZE; | |
1731 | ||
1732 | l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE); | |
1733 | ||
1734 | ret = bdrv_read(bs->file, s->snapshots[i].l1_table_offset / | |
1735 | BDRV_SECTOR_SIZE, (void *)l1_table, l1_sectors); | |
1736 | if (ret < 0) { | |
1737 | goto fail; | |
1738 | } | |
1739 | ||
1740 | for (j = 0; j < s->snapshots[i].l1_size; j++) { | |
1741 | be64_to_cpus(&l1_table[j]); | |
1742 | } | |
1743 | ||
1744 | ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, | |
e390cf5a | 1745 | &expanded_clusters, &nb_clusters); |
32b6444d HR |
1746 | if (ret < 0) { |
1747 | goto fail; | |
1748 | } | |
1749 | } | |
1750 | ||
1751 | ret = 0; | |
1752 | ||
1753 | fail: | |
1754 | g_free(expanded_clusters); | |
1755 | g_free(l1_table); | |
1756 | return ret; | |
1757 | } |