]> Git Repo - qemu.git/blame - block/qed.c
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
[qemu.git] / block / qed.c
CommitLineData
75411d23
SH
1/*
2 * QEMU Enhanced Disk Format
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * Authors:
7 * Stefan Hajnoczi <[email protected]>
8 * Anthony Liguori <[email protected]>
9 *
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
12 *
13 */
14
1de7afc9 15#include "qemu/timer.h"
eabba580 16#include "trace.h"
75411d23 17#include "qed.h"
7b1b5d19 18#include "qapi/qmp/qerror.h"
caf71f86 19#include "migration/migration.h"
75411d23 20
eabba580
SH
21static void qed_aio_cancel(BlockDriverAIOCB *blockacb)
22{
23 QEDAIOCB *acb = (QEDAIOCB *)blockacb;
a8c868c3 24 AioContext *aio_context = bdrv_get_aio_context(blockacb->bs);
eabba580
SH
25 bool finished = false;
26
27 /* Wait for the request to finish */
28 acb->finished = &finished;
29 while (!finished) {
a8c868c3 30 aio_poll(aio_context, true);
eabba580
SH
31 }
32}
33
d7331bed 34static const AIOCBInfo qed_aiocb_info = {
eabba580
SH
35 .aiocb_size = sizeof(QEDAIOCB),
36 .cancel = qed_aio_cancel,
37};
38
75411d23
SH
39static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
40 const char *filename)
41{
42 const QEDHeader *header = (const QEDHeader *)buf;
43
44 if (buf_size < sizeof(*header)) {
45 return 0;
46 }
47 if (le32_to_cpu(header->magic) != QED_MAGIC) {
48 return 0;
49 }
50 return 100;
51}
52
53/**
54 * Check whether an image format is raw
55 *
56 * @fmt: Backing file format, may be NULL
57 */
58static bool qed_fmt_is_raw(const char *fmt)
59{
60 return fmt && strcmp(fmt, "raw") == 0;
61}
62
63static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
64{
65 cpu->magic = le32_to_cpu(le->magic);
66 cpu->cluster_size = le32_to_cpu(le->cluster_size);
67 cpu->table_size = le32_to_cpu(le->table_size);
68 cpu->header_size = le32_to_cpu(le->header_size);
69 cpu->features = le64_to_cpu(le->features);
70 cpu->compat_features = le64_to_cpu(le->compat_features);
71 cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
72 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
73 cpu->image_size = le64_to_cpu(le->image_size);
74 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
75 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
76}
77
78static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
79{
80 le->magic = cpu_to_le32(cpu->magic);
81 le->cluster_size = cpu_to_le32(cpu->cluster_size);
82 le->table_size = cpu_to_le32(cpu->table_size);
83 le->header_size = cpu_to_le32(cpu->header_size);
84 le->features = cpu_to_le64(cpu->features);
85 le->compat_features = cpu_to_le64(cpu->compat_features);
86 le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
87 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
88 le->image_size = cpu_to_le64(cpu->image_size);
89 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
90 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
91}
92
b10170ac 93int qed_write_header_sync(BDRVQEDState *s)
75411d23
SH
94{
95 QEDHeader le;
96 int ret;
97
98 qed_header_cpu_to_le(&s->header, &le);
99 ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
100 if (ret != sizeof(le)) {
101 return ret;
102 }
103 return 0;
104}
105
01979a98
SH
106typedef struct {
107 GenericCB gencb;
108 BDRVQEDState *s;
109 struct iovec iov;
110 QEMUIOVector qiov;
111 int nsectors;
112 uint8_t *buf;
113} QEDWriteHeaderCB;
114
115static void qed_write_header_cb(void *opaque, int ret)
116{
117 QEDWriteHeaderCB *write_header_cb = opaque;
118
119 qemu_vfree(write_header_cb->buf);
120 gencb_complete(write_header_cb, ret);
121}
122
123static void qed_write_header_read_cb(void *opaque, int ret)
124{
125 QEDWriteHeaderCB *write_header_cb = opaque;
126 BDRVQEDState *s = write_header_cb->s;
01979a98
SH
127
128 if (ret) {
129 qed_write_header_cb(write_header_cb, ret);
130 return;
131 }
132
133 /* Update header */
134 qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
135
ad54ae80
PB
136 bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov,
137 write_header_cb->nsectors, qed_write_header_cb,
138 write_header_cb);
01979a98
SH
139}
140
141/**
142 * Update header in-place (does not rewrite backing filename or other strings)
143 *
144 * This function only updates known header fields in-place and does not affect
145 * extra data after the QED header.
146 */
147static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb,
148 void *opaque)
149{
150 /* We must write full sectors for O_DIRECT but cannot necessarily generate
151 * the data following the header if an unrecognized compat feature is
152 * active. Therefore, first read the sectors containing the header, update
153 * them, and write back.
154 */
155
01979a98
SH
156 int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) /
157 BDRV_SECTOR_SIZE;
158 size_t len = nsectors * BDRV_SECTOR_SIZE;
159 QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
160 cb, opaque);
161
162 write_header_cb->s = s;
163 write_header_cb->nsectors = nsectors;
164 write_header_cb->buf = qemu_blockalign(s->bs, len);
165 write_header_cb->iov.iov_base = write_header_cb->buf;
166 write_header_cb->iov.iov_len = len;
167 qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
168
ad54ae80
PB
169 bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
170 qed_write_header_read_cb, write_header_cb);
01979a98
SH
171}
172
75411d23
SH
173static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
174{
175 uint64_t table_entries;
176 uint64_t l2_size;
177
178 table_entries = (table_size * cluster_size) / sizeof(uint64_t);
179 l2_size = table_entries * cluster_size;
180
181 return l2_size * table_entries;
182}
183
184static bool qed_is_cluster_size_valid(uint32_t cluster_size)
185{
186 if (cluster_size < QED_MIN_CLUSTER_SIZE ||
187 cluster_size > QED_MAX_CLUSTER_SIZE) {
188 return false;
189 }
190 if (cluster_size & (cluster_size - 1)) {
191 return false; /* not power of 2 */
192 }
193 return true;
194}
195
196static bool qed_is_table_size_valid(uint32_t table_size)
197{
198 if (table_size < QED_MIN_TABLE_SIZE ||
199 table_size > QED_MAX_TABLE_SIZE) {
200 return false;
201 }
202 if (table_size & (table_size - 1)) {
203 return false; /* not power of 2 */
204 }
205 return true;
206}
207
208static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
209 uint32_t table_size)
210{
211 if (image_size % BDRV_SECTOR_SIZE != 0) {
212 return false; /* not multiple of sector size */
213 }
214 if (image_size > qed_max_image_size(cluster_size, table_size)) {
215 return false; /* image is too large */
216 }
217 return true;
218}
219
220/**
221 * Read a string of known length from the image file
222 *
223 * @file: Image file
224 * @offset: File offset to start of string, in bytes
225 * @n: String length in bytes
226 * @buf: Destination buffer
227 * @buflen: Destination buffer length in bytes
228 * @ret: 0 on success, -errno on failure
229 *
230 * The string is NUL-terminated.
231 */
232static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n,
233 char *buf, size_t buflen)
234{
235 int ret;
236 if (n >= buflen) {
237 return -EINVAL;
238 }
239 ret = bdrv_pread(file, offset, buf, n);
240 if (ret < 0) {
241 return ret;
242 }
243 buf[n] = '\0';
244 return 0;
245}
246
eabba580
SH
247/**
248 * Allocate new clusters
249 *
250 * @s: QED state
251 * @n: Number of contiguous clusters to allocate
252 * @ret: Offset of first allocated cluster
253 *
254 * This function only produces the offset where the new clusters should be
255 * written. It updates BDRVQEDState but does not make any changes to the image
256 * file.
257 */
258static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
259{
260 uint64_t offset = s->file_size;
261 s->file_size += n * s->header.cluster_size;
262 return offset;
263}
264
298800ca
SH
265QEDTable *qed_alloc_table(BDRVQEDState *s)
266{
267 /* Honor O_DIRECT memory alignment requirements */
268 return qemu_blockalign(s->bs,
269 s->header.cluster_size * s->header.table_size);
270}
271
eabba580
SH
272/**
273 * Allocate a new zeroed L2 table
274 */
275static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
276{
277 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
278
279 l2_table->table = qed_alloc_table(s);
280 l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
281
282 memset(l2_table->table->offsets, 0,
283 s->header.cluster_size * s->header.table_size);
284 return l2_table;
285}
286
287static void qed_aio_next_io(void *opaque, int ret);
288
6f321e93
SH
289static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
290{
291 assert(!s->allocating_write_reqs_plugged);
292
293 s->allocating_write_reqs_plugged = true;
294}
295
296static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
297{
298 QEDAIOCB *acb;
299
300 assert(s->allocating_write_reqs_plugged);
301
302 s->allocating_write_reqs_plugged = false;
303
304 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
305 if (acb) {
306 qed_aio_next_io(acb, 0);
307 }
308}
309
310static void qed_finish_clear_need_check(void *opaque, int ret)
311{
312 /* Do nothing */
313}
314
315static void qed_flush_after_clear_need_check(void *opaque, int ret)
316{
317 BDRVQEDState *s = opaque;
318
319 bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);
320
321 /* No need to wait until flush completes */
322 qed_unplug_allocating_write_reqs(s);
323}
324
325static void qed_clear_need_check(void *opaque, int ret)
326{
327 BDRVQEDState *s = opaque;
328
329 if (ret) {
330 qed_unplug_allocating_write_reqs(s);
331 return;
332 }
333
334 s->header.features &= ~QED_F_NEED_CHECK;
335 qed_write_header(s, qed_flush_after_clear_need_check, s);
336}
337
338static void qed_need_check_timer_cb(void *opaque)
339{
340 BDRVQEDState *s = opaque;
341
342 /* The timer should only fire when allocating writes have drained */
343 assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs));
344
345 trace_qed_need_check_timer_cb(s);
346
347 qed_plug_allocating_write_reqs(s);
348
349 /* Ensure writes are on disk before clearing flag */
350 bdrv_aio_flush(s->bs, qed_clear_need_check, s);
351}
352
353static void qed_start_need_check_timer(BDRVQEDState *s)
354{
355 trace_qed_start_need_check_timer(s);
356
bc72ad67 357 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
6f321e93
SH
358 * migration.
359 */
bc72ad67 360 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
6f321e93
SH
361 get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT);
362}
363
364/* It's okay to call this multiple times or when no timer is started */
365static void qed_cancel_need_check_timer(BDRVQEDState *s)
366{
367 trace_qed_cancel_need_check_timer(s);
bc72ad67 368 timer_del(s->need_check_timer);
6f321e93
SH
369}
370
e023b2e2
PB
371static void bdrv_qed_rebind(BlockDriverState *bs)
372{
373 BDRVQEDState *s = bs->opaque;
374 s->bs = bs;
375}
376
a8c868c3
SH
377static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
378{
379 BDRVQEDState *s = bs->opaque;
380
381 qed_cancel_need_check_timer(s);
382 timer_free(s->need_check_timer);
383}
384
385static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
386 AioContext *new_context)
387{
388 BDRVQEDState *s = bs->opaque;
389
390 s->need_check_timer = aio_timer_new(new_context,
391 QEMU_CLOCK_VIRTUAL, SCALE_NS,
392 qed_need_check_timer_cb, s);
393 if (s->header.features & QED_F_NEED_CHECK) {
394 qed_start_need_check_timer(s);
395 }
396}
397
015a1036
HR
398static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
399 Error **errp)
75411d23
SH
400{
401 BDRVQEDState *s = bs->opaque;
402 QEDHeader le_header;
403 int64_t file_size;
404 int ret;
405
406 s->bs = bs;
eabba580 407 QSIMPLEQ_INIT(&s->allocating_write_reqs);
75411d23
SH
408
409 ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
410 if (ret < 0) {
411 return ret;
412 }
75411d23
SH
413 qed_header_le_to_cpu(&le_header, &s->header);
414
415 if (s->header.magic != QED_MAGIC) {
76abe407
PB
416 error_setg(errp, "Image not in QED format");
417 return -EINVAL;
75411d23
SH
418 }
419 if (s->header.features & ~QED_FEATURE_MASK) {
10b758e8
KW
420 /* image uses unsupported feature bits */
421 char buf[64];
422 snprintf(buf, sizeof(buf), "%" PRIx64,
423 s->header.features & ~QED_FEATURE_MASK);
0fea6b79 424 error_set(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
10b758e8
KW
425 bs->device_name, "QED", buf);
426 return -ENOTSUP;
75411d23
SH
427 }
428 if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
429 return -EINVAL;
430 }
431
432 /* Round down file size to the last cluster */
433 file_size = bdrv_getlength(bs->file);
434 if (file_size < 0) {
435 return file_size;
436 }
437 s->file_size = qed_start_of_cluster(s, file_size);
438
439 if (!qed_is_table_size_valid(s->header.table_size)) {
440 return -EINVAL;
441 }
442 if (!qed_is_image_size_valid(s->header.image_size,
443 s->header.cluster_size,
444 s->header.table_size)) {
445 return -EINVAL;
446 }
447 if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
448 return -EINVAL;
449 }
450
451 s->table_nelems = (s->header.cluster_size * s->header.table_size) /
452 sizeof(uint64_t);
453 s->l2_shift = ffs(s->header.cluster_size) - 1;
454 s->l2_mask = s->table_nelems - 1;
455 s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1;
456
457 if ((s->header.features & QED_F_BACKING_FILE)) {
458 if ((uint64_t)s->header.backing_filename_offset +
459 s->header.backing_filename_size >
460 s->header.cluster_size * s->header.header_size) {
461 return -EINVAL;
462 }
463
464 ret = qed_read_string(bs->file, s->header.backing_filename_offset,
465 s->header.backing_filename_size, bs->backing_file,
466 sizeof(bs->backing_file));
467 if (ret < 0) {
468 return ret;
469 }
470
471 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
472 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
473 }
474 }
475
476 /* Reset unknown autoclear feature bits. This is a backwards
477 * compatibility mechanism that allows images to be opened by older
478 * programs, which "knock out" unknown feature bits. When an image is
479 * opened by a newer program again it can detect that the autoclear
480 * feature is no longer valid.
481 */
482 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
2d1f3c23 483 !bdrv_is_read_only(bs->file) && !(flags & BDRV_O_INCOMING)) {
75411d23
SH
484 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
485
486 ret = qed_write_header_sync(s);
487 if (ret) {
488 return ret;
489 }
490
491 /* From here on only known autoclear feature bits are valid */
492 bdrv_flush(bs->file);
493 }
494
298800ca
SH
495 s->l1_table = qed_alloc_table(s);
496 qed_init_l2_cache(&s->l2_cache);
497
498 ret = qed_read_l1_table_sync(s);
01979a98
SH
499 if (ret) {
500 goto out;
501 }
502
503 /* If image was not closed cleanly, check consistency */
058f8f16 504 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
01979a98
SH
505 /* Read-only images cannot be fixed. There is no risk of corruption
506 * since write operations are not possible. Therefore, allow
507 * potentially inconsistent images to be opened read-only. This can
508 * aid data recovery from an otherwise inconsistent image.
509 */
2d1f3c23
BC
510 if (!bdrv_is_read_only(bs->file) &&
511 !(flags & BDRV_O_INCOMING)) {
01979a98
SH
512 BdrvCheckResult result = {0};
513
514 ret = qed_check(s, &result, true);
6f321e93
SH
515 if (ret) {
516 goto out;
517 }
01979a98
SH
518 }
519 }
520
a8c868c3 521 bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
6f321e93 522
01979a98 523out:
298800ca
SH
524 if (ret) {
525 qed_free_l2_cache(&s->l2_cache);
526 qemu_vfree(s->l1_table);
527 }
75411d23
SH
528 return ret;
529}
530
d34682cd
KW
531static int bdrv_qed_refresh_limits(BlockDriverState *bs)
532{
533 BDRVQEDState *s = bs->opaque;
534
535 bs->bl.write_zeroes_alignment = s->header.cluster_size >> BDRV_SECTOR_BITS;
536
537 return 0;
538}
539
f9cb20f1
JC
540/* We have nothing to do for QED reopen, stubs just return
541 * success */
542static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
543 BlockReopenQueue *queue, Error **errp)
544{
545 return 0;
546}
547
75411d23
SH
548static void bdrv_qed_close(BlockDriverState *bs)
549{
298800ca
SH
550 BDRVQEDState *s = bs->opaque;
551
a8c868c3 552 bdrv_qed_detach_aio_context(bs);
6f321e93 553
01979a98
SH
554 /* Ensure writes reach stable storage */
555 bdrv_flush(bs->file);
556
557 /* Clean shutdown, no check required on next open */
558 if (s->header.features & QED_F_NEED_CHECK) {
559 s->header.features &= ~QED_F_NEED_CHECK;
560 qed_write_header_sync(s);
561 }
562
298800ca
SH
563 qed_free_l2_cache(&s->l2_cache);
564 qemu_vfree(s->l1_table);
75411d23
SH
565}
566
75411d23
SH
567static int qed_create(const char *filename, uint32_t cluster_size,
568 uint64_t image_size, uint32_t table_size,
0fea6b79 569 const char *backing_file, const char *backing_fmt,
4ab15590 570 QemuOpts *opts, Error **errp)
75411d23
SH
571{
572 QEDHeader header = {
573 .magic = QED_MAGIC,
574 .cluster_size = cluster_size,
575 .table_size = table_size,
576 .header_size = 1,
577 .features = 0,
578 .compat_features = 0,
579 .l1_table_offset = cluster_size,
580 .image_size = image_size,
581 };
582 QEDHeader le_header;
583 uint8_t *l1_table = NULL;
584 size_t l1_size = header.cluster_size * header.table_size;
34b5d2c6 585 Error *local_err = NULL;
75411d23 586 int ret = 0;
2e40134b 587 BlockDriverState *bs;
75411d23 588
4ab15590 589 ret = bdrv_create_file(filename, opts, &local_err);
75411d23 590 if (ret < 0) {
0fea6b79 591 error_propagate(errp, local_err);
75411d23
SH
592 return ret;
593 }
594
2e40134b
HR
595 bs = NULL;
596 ret = bdrv_open(&bs, filename, NULL, NULL,
597 BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_PROTOCOL, NULL,
598 &local_err);
75411d23 599 if (ret < 0) {
0fea6b79 600 error_propagate(errp, local_err);
75411d23
SH
601 return ret;
602 }
603
c743849b
SH
604 /* File must start empty and grow, check truncate is supported */
605 ret = bdrv_truncate(bs, 0);
606 if (ret < 0) {
607 goto out;
608 }
609
75411d23
SH
610 if (backing_file) {
611 header.features |= QED_F_BACKING_FILE;
612 header.backing_filename_offset = sizeof(le_header);
613 header.backing_filename_size = strlen(backing_file);
614
615 if (qed_fmt_is_raw(backing_fmt)) {
616 header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
617 }
618 }
619
620 qed_header_cpu_to_le(&header, &le_header);
621 ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header));
622 if (ret < 0) {
623 goto out;
624 }
625 ret = bdrv_pwrite(bs, sizeof(le_header), backing_file,
626 header.backing_filename_size);
627 if (ret < 0) {
628 goto out;
629 }
630
7267c094 631 l1_table = g_malloc0(l1_size);
75411d23
SH
632 ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size);
633 if (ret < 0) {
634 goto out;
635 }
636
637 ret = 0; /* success */
638out:
7267c094 639 g_free(l1_table);
4f6fd349 640 bdrv_unref(bs);
75411d23
SH
641 return ret;
642}
643
7ab74849 644static int bdrv_qed_create(const char *filename, QemuOpts *opts, Error **errp)
75411d23
SH
645{
646 uint64_t image_size = 0;
647 uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
648 uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
7ab74849
CL
649 char *backing_file = NULL;
650 char *backing_fmt = NULL;
651 int ret;
652
653 image_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0);
654 backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
655 backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT);
656 cluster_size = qemu_opt_get_size_del(opts,
657 BLOCK_OPT_CLUSTER_SIZE,
658 QED_DEFAULT_CLUSTER_SIZE);
659 table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE,
660 QED_DEFAULT_TABLE_SIZE);
75411d23
SH
661
662 if (!qed_is_cluster_size_valid(cluster_size)) {
5ff679b4
AG
663 error_setg(errp, "QED cluster size must be within range [%u, %u] "
664 "and power of 2",
665 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
7ab74849
CL
666 ret = -EINVAL;
667 goto finish;
75411d23
SH
668 }
669 if (!qed_is_table_size_valid(table_size)) {
5ff679b4
AG
670 error_setg(errp, "QED table size must be within range [%u, %u] "
671 "and power of 2",
672 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
7ab74849
CL
673 ret = -EINVAL;
674 goto finish;
75411d23
SH
675 }
676 if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
5ff679b4
AG
677 error_setg(errp, "QED image size must be a non-zero multiple of "
678 "cluster size and less than %" PRIu64 " bytes",
679 qed_max_image_size(cluster_size, table_size));
7ab74849
CL
680 ret = -EINVAL;
681 goto finish;
75411d23
SH
682 }
683
7ab74849 684 ret = qed_create(filename, cluster_size, image_size, table_size,
4ab15590 685 backing_file, backing_fmt, opts, errp);
7ab74849
CL
686
687finish:
688 g_free(backing_file);
689 g_free(backing_fmt);
690 return ret;
75411d23
SH
691}
692
298800ca 693typedef struct {
4bc74be9 694 BlockDriverState *bs;
b7d5a5b8 695 Coroutine *co;
4bc74be9
PB
696 uint64_t pos;
697 int64_t status;
298800ca
SH
698 int *pnum;
699} QEDIsAllocatedCB;
700
701static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
702{
703 QEDIsAllocatedCB *cb = opaque;
4bc74be9 704 BDRVQEDState *s = cb->bs->opaque;
298800ca 705 *cb->pnum = len / BDRV_SECTOR_SIZE;
4bc74be9
PB
706 switch (ret) {
707 case QED_CLUSTER_FOUND:
708 offset |= qed_offset_into_cluster(s, cb->pos);
709 cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
710 break;
711 case QED_CLUSTER_ZERO:
712 cb->status = BDRV_BLOCK_ZERO;
713 break;
714 case QED_CLUSTER_L2:
715 case QED_CLUSTER_L1:
716 cb->status = 0;
717 break;
718 default:
719 assert(ret < 0);
720 cb->status = ret;
721 break;
722 }
723
b7d5a5b8
SH
724 if (cb->co) {
725 qemu_coroutine_enter(cb->co, NULL);
726 }
298800ca
SH
727}
728
b6b8a333 729static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs,
b7d5a5b8
SH
730 int64_t sector_num,
731 int nb_sectors, int *pnum)
75411d23 732{
298800ca 733 BDRVQEDState *s = bs->opaque;
298800ca
SH
734 size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
735 QEDIsAllocatedCB cb = {
4bc74be9
PB
736 .bs = bs,
737 .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE,
738 .status = BDRV_BLOCK_OFFSET_MASK,
298800ca
SH
739 .pnum = pnum,
740 };
741 QEDRequest request = { .l2_table = NULL };
742
4bc74be9 743 qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb);
298800ca 744
b7d5a5b8 745 /* Now sleep if the callback wasn't invoked immediately */
4bc74be9 746 while (cb.status == BDRV_BLOCK_OFFSET_MASK) {
b7d5a5b8
SH
747 cb.co = qemu_coroutine_self();
748 qemu_coroutine_yield();
298800ca
SH
749 }
750
298800ca
SH
751 qed_unref_l2_cache_entry(request.l2_table);
752
4bc74be9 753 return cb.status;
75411d23
SH
754}
755
eabba580
SH
756static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
757{
758 return acb->common.bs->opaque;
759}
760
761/**
762 * Read from the backing file or zero-fill if no backing file
763 *
f06ee3d4
KW
764 * @s: QED state
765 * @pos: Byte position in device
766 * @qiov: Destination I/O vector
767 * @backing_qiov: Possibly shortened copy of qiov, to be allocated here
768 * @cb: Completion function
769 * @opaque: User data for completion function
eabba580
SH
770 *
771 * This function reads qiov->size bytes starting at pos from the backing file.
772 * If there is no backing file then zeroes are read.
773 */
774static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
775 QEMUIOVector *qiov,
f06ee3d4 776 QEMUIOVector **backing_qiov,
eabba580
SH
777 BlockDriverCompletionFunc *cb, void *opaque)
778{
eabba580
SH
779 uint64_t backing_length = 0;
780 size_t size;
781
782 /* If there is a backing file, get its length. Treat the absence of a
783 * backing file like a zero length backing file.
784 */
785 if (s->bs->backing_hd) {
786 int64_t l = bdrv_getlength(s->bs->backing_hd);
787 if (l < 0) {
788 cb(opaque, l);
789 return;
790 }
791 backing_length = l;
792 }
793
794 /* Zero all sectors if reading beyond the end of the backing file */
795 if (pos >= backing_length ||
796 pos + qiov->size > backing_length) {
3d9b4925 797 qemu_iovec_memset(qiov, 0, 0, qiov->size);
eabba580
SH
798 }
799
800 /* Complete now if there are no backing file sectors to read */
801 if (pos >= backing_length) {
802 cb(opaque, 0);
803 return;
804 }
805
806 /* If the read straddles the end of the backing file, shorten it */
807 size = MIN((uint64_t)backing_length - pos, qiov->size);
808
f06ee3d4
KW
809 assert(*backing_qiov == NULL);
810 *backing_qiov = g_new(QEMUIOVector, 1);
811 qemu_iovec_init(*backing_qiov, qiov->niov);
812 qemu_iovec_concat(*backing_qiov, qiov, 0, size);
813
820100fd 814 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
ad54ae80 815 bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE,
f06ee3d4 816 *backing_qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
eabba580
SH
817}
818
819typedef struct {
820 GenericCB gencb;
821 BDRVQEDState *s;
822 QEMUIOVector qiov;
f06ee3d4 823 QEMUIOVector *backing_qiov;
eabba580
SH
824 struct iovec iov;
825 uint64_t offset;
826} CopyFromBackingFileCB;
827
828static void qed_copy_from_backing_file_cb(void *opaque, int ret)
829{
830 CopyFromBackingFileCB *copy_cb = opaque;
831 qemu_vfree(copy_cb->iov.iov_base);
832 gencb_complete(&copy_cb->gencb, ret);
833}
834
835static void qed_copy_from_backing_file_write(void *opaque, int ret)
836{
837 CopyFromBackingFileCB *copy_cb = opaque;
838 BDRVQEDState *s = copy_cb->s;
eabba580 839
f06ee3d4
KW
840 if (copy_cb->backing_qiov) {
841 qemu_iovec_destroy(copy_cb->backing_qiov);
842 g_free(copy_cb->backing_qiov);
843 copy_cb->backing_qiov = NULL;
844 }
845
eabba580
SH
846 if (ret) {
847 qed_copy_from_backing_file_cb(copy_cb, ret);
848 return;
849 }
850
851 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
ad54ae80
PB
852 bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE,
853 &copy_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE,
854 qed_copy_from_backing_file_cb, copy_cb);
eabba580
SH
855}
856
857/**
858 * Copy data from backing file into the image
859 *
860 * @s: QED state
861 * @pos: Byte position in device
862 * @len: Number of bytes
863 * @offset: Byte offset in image file
864 * @cb: Completion function
865 * @opaque: User data for completion function
866 */
867static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
868 uint64_t len, uint64_t offset,
869 BlockDriverCompletionFunc *cb,
870 void *opaque)
871{
872 CopyFromBackingFileCB *copy_cb;
873
874 /* Skip copy entirely if there is no work to do */
875 if (len == 0) {
876 cb(opaque, 0);
877 return;
878 }
879
880 copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
881 copy_cb->s = s;
882 copy_cb->offset = offset;
f06ee3d4 883 copy_cb->backing_qiov = NULL;
eabba580
SH
884 copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
885 copy_cb->iov.iov_len = len;
886 qemu_iovec_init_external(&copy_cb->qiov, &copy_cb->iov, 1);
887
f06ee3d4 888 qed_read_backing_file(s, pos, &copy_cb->qiov, &copy_cb->backing_qiov,
eabba580
SH
889 qed_copy_from_backing_file_write, copy_cb);
890}
891
892/**
893 * Link one or more contiguous clusters into a table
894 *
895 * @s: QED state
896 * @table: L2 table
897 * @index: First cluster index
898 * @n: Number of contiguous clusters
21df65b6
AL
899 * @cluster: First cluster offset
900 *
901 * The cluster offset may be an allocated byte offset in the image file, the
902 * zero cluster marker, or the unallocated cluster marker.
eabba580
SH
903 */
904static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
905 unsigned int n, uint64_t cluster)
906{
907 int i;
908 for (i = index; i < index + n; i++) {
909 table->offsets[i] = cluster;
21df65b6
AL
910 if (!qed_offset_is_unalloc_cluster(cluster) &&
911 !qed_offset_is_zero_cluster(cluster)) {
912 cluster += s->header.cluster_size;
913 }
eabba580
SH
914 }
915}
916
917static void qed_aio_complete_bh(void *opaque)
918{
919 QEDAIOCB *acb = opaque;
920 BlockDriverCompletionFunc *cb = acb->common.cb;
921 void *user_opaque = acb->common.opaque;
922 int ret = acb->bh_ret;
923 bool *finished = acb->finished;
924
925 qemu_bh_delete(acb->bh);
926 qemu_aio_release(acb);
927
928 /* Invoke callback */
929 cb(user_opaque, ret);
930
931 /* Signal cancel completion */
932 if (finished) {
933 *finished = true;
934 }
935}
936
937static void qed_aio_complete(QEDAIOCB *acb, int ret)
938{
939 BDRVQEDState *s = acb_to_s(acb);
940
941 trace_qed_aio_complete(s, acb, ret);
942
943 /* Free resources */
944 qemu_iovec_destroy(&acb->cur_qiov);
945 qed_unref_l2_cache_entry(acb->request.l2_table);
946
0e71be19
SH
947 /* Free the buffer we may have allocated for zero writes */
948 if (acb->flags & QED_AIOCB_ZERO) {
949 qemu_vfree(acb->qiov->iov[0].iov_base);
950 acb->qiov->iov[0].iov_base = NULL;
951 }
952
eabba580
SH
953 /* Arrange for a bh to invoke the completion function */
954 acb->bh_ret = ret;
a8c868c3
SH
955 acb->bh = aio_bh_new(bdrv_get_aio_context(acb->common.bs),
956 qed_aio_complete_bh, acb);
eabba580
SH
957 qemu_bh_schedule(acb->bh);
958
959 /* Start next allocating write request waiting behind this one. Note that
960 * requests enqueue themselves when they first hit an unallocated cluster
961 * but they wait until the entire request is finished before waking up the
962 * next request in the queue. This ensures that we don't cycle through
963 * requests multiple times but rather finish one at a time completely.
964 */
965 if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
966 QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
967 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
968 if (acb) {
969 qed_aio_next_io(acb, 0);
6f321e93
SH
970 } else if (s->header.features & QED_F_NEED_CHECK) {
971 qed_start_need_check_timer(s);
eabba580
SH
972 }
973 }
974}
975
976/**
977 * Commit the current L2 table to the cache
978 */
979static void qed_commit_l2_update(void *opaque, int ret)
980{
981 QEDAIOCB *acb = opaque;
982 BDRVQEDState *s = acb_to_s(acb);
983 CachedL2Table *l2_table = acb->request.l2_table;
e4fc8781 984 uint64_t l2_offset = l2_table->offset;
eabba580
SH
985
986 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
987
988 /* This is guaranteed to succeed because we just committed the entry to the
989 * cache.
990 */
e4fc8781 991 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
eabba580
SH
992 assert(acb->request.l2_table != NULL);
993
994 qed_aio_next_io(opaque, ret);
995}
996
997/**
998 * Update L1 table with new L2 table offset and write it out
999 */
1000static void qed_aio_write_l1_update(void *opaque, int ret)
1001{
1002 QEDAIOCB *acb = opaque;
1003 BDRVQEDState *s = acb_to_s(acb);
1004 int index;
1005
1006 if (ret) {
1007 qed_aio_complete(acb, ret);
1008 return;
1009 }
1010
1011 index = qed_l1_index(s, acb->cur_pos);
1012 s->l1_table->offsets[index] = acb->request.l2_table->offset;
1013
1014 qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
1015}
1016
1017/**
1018 * Update L2 table with new cluster offsets and write them out
1019 */
0e71be19 1020static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
eabba580 1021{
eabba580
SH
1022 BDRVQEDState *s = acb_to_s(acb);
1023 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
1024 int index;
1025
1026 if (ret) {
1027 goto err;
1028 }
1029
1030 if (need_alloc) {
1031 qed_unref_l2_cache_entry(acb->request.l2_table);
1032 acb->request.l2_table = qed_new_l2_table(s);
1033 }
1034
1035 index = qed_l2_index(s, acb->cur_pos);
1036 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
0e71be19 1037 offset);
eabba580
SH
1038
1039 if (need_alloc) {
1040 /* Write out the whole new L2 table */
1041 qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
1042 qed_aio_write_l1_update, acb);
1043 } else {
1044 /* Write out only the updated part of the L2 table */
1045 qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
1046 qed_aio_next_io, acb);
1047 }
1048 return;
1049
1050err:
1051 qed_aio_complete(acb, ret);
1052}
1053
0e71be19
SH
1054static void qed_aio_write_l2_update_cb(void *opaque, int ret)
1055{
1056 QEDAIOCB *acb = opaque;
1057 qed_aio_write_l2_update(acb, ret, acb->cur_cluster);
1058}
1059
eabba580
SH
1060/**
1061 * Flush new data clusters before updating the L2 table
1062 *
1063 * This flush is necessary when a backing file is in use. A crash during an
1064 * allocating write could result in empty clusters in the image. If the write
1065 * only touched a subregion of the cluster, then backing image sectors have
1066 * been lost in the untouched region. The solution is to flush after writing a
1067 * new data cluster and before updating the L2 table.
1068 */
1069static void qed_aio_write_flush_before_l2_update(void *opaque, int ret)
1070{
1071 QEDAIOCB *acb = opaque;
1072 BDRVQEDState *s = acb_to_s(acb);
1073
0e71be19 1074 if (!bdrv_aio_flush(s->bs->file, qed_aio_write_l2_update_cb, opaque)) {
eabba580
SH
1075 qed_aio_complete(acb, -EIO);
1076 }
1077}
1078
1079/**
1080 * Write data to the image file
1081 */
1082static void qed_aio_write_main(void *opaque, int ret)
1083{
1084 QEDAIOCB *acb = opaque;
1085 BDRVQEDState *s = acb_to_s(acb);
1086 uint64_t offset = acb->cur_cluster +
1087 qed_offset_into_cluster(s, acb->cur_pos);
1088 BlockDriverCompletionFunc *next_fn;
eabba580
SH
1089
1090 trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
1091
1092 if (ret) {
1093 qed_aio_complete(acb, ret);
1094 return;
1095 }
1096
1097 if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
1098 next_fn = qed_aio_next_io;
1099 } else {
1100 if (s->bs->backing_hd) {
1101 next_fn = qed_aio_write_flush_before_l2_update;
1102 } else {
0e71be19 1103 next_fn = qed_aio_write_l2_update_cb;
eabba580
SH
1104 }
1105 }
1106
1107 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
ad54ae80
PB
1108 bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE,
1109 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1110 next_fn, acb);
eabba580
SH
1111}
1112
1113/**
1114 * Populate back untouched region of new data cluster
1115 */
1116static void qed_aio_write_postfill(void *opaque, int ret)
1117{
1118 QEDAIOCB *acb = opaque;
1119 BDRVQEDState *s = acb_to_s(acb);
1120 uint64_t start = acb->cur_pos + acb->cur_qiov.size;
1121 uint64_t len =
1122 qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1123 uint64_t offset = acb->cur_cluster +
1124 qed_offset_into_cluster(s, acb->cur_pos) +
1125 acb->cur_qiov.size;
1126
1127 if (ret) {
1128 qed_aio_complete(acb, ret);
1129 return;
1130 }
1131
1132 trace_qed_aio_write_postfill(s, acb, start, len, offset);
1133 qed_copy_from_backing_file(s, start, len, offset,
1134 qed_aio_write_main, acb);
1135}
1136
1137/**
1138 * Populate front untouched region of new data cluster
1139 */
1140static void qed_aio_write_prefill(void *opaque, int ret)
1141{
1142 QEDAIOCB *acb = opaque;
1143 BDRVQEDState *s = acb_to_s(acb);
1144 uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
1145 uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);
1146
1147 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1148 qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
1149 qed_aio_write_postfill, acb);
1150}
1151
0d09c797
SH
1152/**
1153 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1154 */
1155static bool qed_should_set_need_check(BDRVQEDState *s)
1156{
1157 /* The flush before L2 update path ensures consistency */
1158 if (s->bs->backing_hd) {
1159 return false;
1160 }
1161
1162 return !(s->header.features & QED_F_NEED_CHECK);
1163}
1164
0e71be19
SH
1165static void qed_aio_write_zero_cluster(void *opaque, int ret)
1166{
1167 QEDAIOCB *acb = opaque;
1168
1169 if (ret) {
1170 qed_aio_complete(acb, ret);
1171 return;
1172 }
1173
1174 qed_aio_write_l2_update(acb, 0, 1);
1175}
1176
eabba580
SH
1177/**
1178 * Write new data cluster
1179 *
1180 * @acb: Write request
1181 * @len: Length in bytes
1182 *
1183 * This path is taken when writing to previously unallocated clusters.
1184 */
1185static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1186{
1187 BDRVQEDState *s = acb_to_s(acb);
0e71be19 1188 BlockDriverCompletionFunc *cb;
eabba580 1189
6f321e93
SH
1190 /* Cancel timer when the first allocating request comes in */
1191 if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
1192 qed_cancel_need_check_timer(s);
1193 }
1194
eabba580
SH
1195 /* Freeze this request if another allocating write is in progress */
1196 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1197 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next);
1198 }
6f321e93
SH
1199 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
1200 s->allocating_write_reqs_plugged) {
eabba580
SH
1201 return; /* wait for existing request to finish */
1202 }
1203
1204 acb->cur_nclusters = qed_bytes_to_clusters(s,
1205 qed_offset_into_cluster(s, acb->cur_pos) + len);
1b093c48 1206 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
eabba580 1207
0e71be19
SH
1208 if (acb->flags & QED_AIOCB_ZERO) {
1209 /* Skip ahead if the clusters are already zero */
1210 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1211 qed_aio_next_io(acb, 0);
1212 return;
1213 }
1214
1215 cb = qed_aio_write_zero_cluster;
1216 } else {
1217 cb = qed_aio_write_prefill;
1218 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1219 }
1220
0d09c797
SH
1221 if (qed_should_set_need_check(s)) {
1222 s->header.features |= QED_F_NEED_CHECK;
0e71be19 1223 qed_write_header(s, cb, acb);
0d09c797 1224 } else {
0e71be19 1225 cb(acb, 0);
01979a98 1226 }
eabba580
SH
1227}
1228
1229/**
1230 * Write data cluster in place
1231 *
1232 * @acb: Write request
1233 * @offset: Cluster offset in bytes
1234 * @len: Length in bytes
1235 *
1236 * This path is taken when writing to already allocated clusters.
1237 */
1238static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
1239{
0e71be19
SH
1240 /* Allocate buffer for zero writes */
1241 if (acb->flags & QED_AIOCB_ZERO) {
1242 struct iovec *iov = acb->qiov->iov;
1243
1244 if (!iov->iov_base) {
1245 iov->iov_base = qemu_blockalign(acb->common.bs, iov->iov_len);
1246 memset(iov->iov_base, 0, iov->iov_len);
1247 }
1248 }
1249
eabba580
SH
1250 /* Calculate the I/O vector */
1251 acb->cur_cluster = offset;
1b093c48 1252 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
eabba580
SH
1253
1254 /* Do the actual write */
1255 qed_aio_write_main(acb, 0);
1256}
1257
1258/**
1259 * Write data cluster
1260 *
1261 * @opaque: Write request
1262 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1263 * or -errno
1264 * @offset: Cluster offset in bytes
1265 * @len: Length in bytes
1266 *
1267 * Callback from qed_find_cluster().
1268 */
1269static void qed_aio_write_data(void *opaque, int ret,
1270 uint64_t offset, size_t len)
1271{
1272 QEDAIOCB *acb = opaque;
1273
1274 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1275
1276 acb->find_cluster_ret = ret;
1277
1278 switch (ret) {
1279 case QED_CLUSTER_FOUND:
1280 qed_aio_write_inplace(acb, offset, len);
1281 break;
1282
1283 case QED_CLUSTER_L2:
1284 case QED_CLUSTER_L1:
21df65b6 1285 case QED_CLUSTER_ZERO:
eabba580
SH
1286 qed_aio_write_alloc(acb, len);
1287 break;
1288
1289 default:
1290 qed_aio_complete(acb, ret);
1291 break;
1292 }
1293}
1294
1295/**
1296 * Read data cluster
1297 *
1298 * @opaque: Read request
1299 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1300 * or -errno
1301 * @offset: Cluster offset in bytes
1302 * @len: Length in bytes
1303 *
1304 * Callback from qed_find_cluster().
1305 */
1306static void qed_aio_read_data(void *opaque, int ret,
1307 uint64_t offset, size_t len)
1308{
1309 QEDAIOCB *acb = opaque;
1310 BDRVQEDState *s = acb_to_s(acb);
1311 BlockDriverState *bs = acb->common.bs;
eabba580
SH
1312
1313 /* Adjust offset into cluster */
1314 offset += qed_offset_into_cluster(s, acb->cur_pos);
1315
1316 trace_qed_aio_read_data(s, acb, ret, offset, len);
1317
1318 if (ret < 0) {
1319 goto err;
1320 }
1321
1b093c48 1322 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
eabba580 1323
21df65b6
AL
1324 /* Handle zero cluster and backing file reads */
1325 if (ret == QED_CLUSTER_ZERO) {
3d9b4925 1326 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
21df65b6
AL
1327 qed_aio_next_io(acb, 0);
1328 return;
1329 } else if (ret != QED_CLUSTER_FOUND) {
eabba580 1330 qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
f06ee3d4 1331 &acb->backing_qiov, qed_aio_next_io, acb);
eabba580
SH
1332 return;
1333 }
1334
1335 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
ad54ae80
PB
1336 bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE,
1337 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1338 qed_aio_next_io, acb);
eabba580
SH
1339 return;
1340
1341err:
1342 qed_aio_complete(acb, ret);
1343}
1344
1345/**
1346 * Begin next I/O or complete the request
1347 */
1348static void qed_aio_next_io(void *opaque, int ret)
1349{
1350 QEDAIOCB *acb = opaque;
1351 BDRVQEDState *s = acb_to_s(acb);
6e4f59bd
SH
1352 QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ?
1353 qed_aio_write_data : qed_aio_read_data;
eabba580
SH
1354
1355 trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);
1356
f06ee3d4
KW
1357 if (acb->backing_qiov) {
1358 qemu_iovec_destroy(acb->backing_qiov);
1359 g_free(acb->backing_qiov);
1360 acb->backing_qiov = NULL;
1361 }
1362
eabba580
SH
1363 /* Handle I/O error */
1364 if (ret) {
1365 qed_aio_complete(acb, ret);
1366 return;
1367 }
1368
1369 acb->qiov_offset += acb->cur_qiov.size;
1370 acb->cur_pos += acb->cur_qiov.size;
1371 qemu_iovec_reset(&acb->cur_qiov);
1372
1373 /* Complete request */
1374 if (acb->cur_pos >= acb->end_pos) {
1375 qed_aio_complete(acb, 0);
1376 return;
1377 }
1378
1379 /* Find next cluster and start I/O */
1380 qed_find_cluster(s, &acb->request,
1381 acb->cur_pos, acb->end_pos - acb->cur_pos,
1382 io_fn, acb);
1383}
1384
1385static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs,
1386 int64_t sector_num,
1387 QEMUIOVector *qiov, int nb_sectors,
1388 BlockDriverCompletionFunc *cb,
6e4f59bd 1389 void *opaque, int flags)
eabba580 1390{
d7331bed 1391 QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque);
eabba580
SH
1392
1393 trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
6e4f59bd 1394 opaque, flags);
eabba580 1395
6e4f59bd 1396 acb->flags = flags;
eabba580
SH
1397 acb->finished = NULL;
1398 acb->qiov = qiov;
1399 acb->qiov_offset = 0;
1400 acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
1401 acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
f06ee3d4 1402 acb->backing_qiov = NULL;
eabba580
SH
1403 acb->request.l2_table = NULL;
1404 qemu_iovec_init(&acb->cur_qiov, qiov->niov);
1405
1406 /* Start request */
1407 qed_aio_next_io(acb, 0);
1408 return &acb->common;
1409}
1410
75411d23
SH
1411static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
1412 int64_t sector_num,
1413 QEMUIOVector *qiov, int nb_sectors,
1414 BlockDriverCompletionFunc *cb,
1415 void *opaque)
1416{
6e4f59bd 1417 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
75411d23
SH
1418}
1419
1420static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
1421 int64_t sector_num,
1422 QEMUIOVector *qiov, int nb_sectors,
1423 BlockDriverCompletionFunc *cb,
1424 void *opaque)
1425{
6e4f59bd
SH
1426 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb,
1427 opaque, QED_AIOCB_WRITE);
75411d23
SH
1428}
1429
0e71be19
SH
1430typedef struct {
1431 Coroutine *co;
1432 int ret;
1433 bool done;
1434} QEDWriteZeroesCB;
1435
1436static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret)
1437{
1438 QEDWriteZeroesCB *cb = opaque;
1439
1440 cb->done = true;
1441 cb->ret = ret;
1442 if (cb->co) {
1443 qemu_coroutine_enter(cb->co, NULL);
1444 }
1445}
1446
1447static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs,
1448 int64_t sector_num,
aa7bfbff
PL
1449 int nb_sectors,
1450 BdrvRequestFlags flags)
0e71be19
SH
1451{
1452 BlockDriverAIOCB *blockacb;
ef72f76e 1453 BDRVQEDState *s = bs->opaque;
0e71be19
SH
1454 QEDWriteZeroesCB cb = { .done = false };
1455 QEMUIOVector qiov;
1456 struct iovec iov;
1457
ef72f76e
SH
1458 /* Refuse if there are untouched backing file sectors */
1459 if (bs->backing_hd) {
1460 if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) {
1461 return -ENOTSUP;
1462 }
1463 if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) {
1464 return -ENOTSUP;
1465 }
1466 }
1467
0e71be19
SH
1468 /* Zero writes start without an I/O buffer. If a buffer becomes necessary
1469 * then it will be allocated during request processing.
1470 */
1471 iov.iov_base = NULL,
1472 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE,
1473
1474 qemu_iovec_init_external(&qiov, &iov, 1);
1475 blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors,
1476 qed_co_write_zeroes_cb, &cb,
1477 QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1478 if (!blockacb) {
1479 return -EIO;
1480 }
1481 if (!cb.done) {
1482 cb.co = qemu_coroutine_self();
1483 qemu_coroutine_yield();
1484 }
1485 assert(cb.done);
1486 return cb.ret;
1487}
1488
75411d23
SH
1489static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset)
1490{
77a5a000
SH
1491 BDRVQEDState *s = bs->opaque;
1492 uint64_t old_image_size;
1493 int ret;
1494
1495 if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1496 s->header.table_size)) {
1497 return -EINVAL;
1498 }
1499
1500 /* Shrinking is currently not supported */
1501 if ((uint64_t)offset < s->header.image_size) {
1502 return -ENOTSUP;
1503 }
1504
1505 old_image_size = s->header.image_size;
1506 s->header.image_size = offset;
1507 ret = qed_write_header_sync(s);
1508 if (ret < 0) {
1509 s->header.image_size = old_image_size;
1510 }
1511 return ret;
75411d23
SH
1512}
1513
1514static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1515{
1516 BDRVQEDState *s = bs->opaque;
1517 return s->header.image_size;
1518}
1519
1520static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1521{
1522 BDRVQEDState *s = bs->opaque;
1523
1524 memset(bdi, 0, sizeof(*bdi));
1525 bdi->cluster_size = s->header.cluster_size;
d68dbee8 1526 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
95de6d70
PB
1527 bdi->unallocated_blocks_are_zero = true;
1528 bdi->can_write_zeroes_with_unmap = true;
75411d23
SH
1529 return 0;
1530}
1531
1532static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1533 const char *backing_file,
1534 const char *backing_fmt)
1535{
1536 BDRVQEDState *s = bs->opaque;
1537 QEDHeader new_header, le_header;
1538 void *buffer;
1539 size_t buffer_len, backing_file_len;
1540 int ret;
1541
1542 /* Refuse to set backing filename if unknown compat feature bits are
1543 * active. If the image uses an unknown compat feature then we may not
1544 * know the layout of data following the header structure and cannot safely
1545 * add a new string.
1546 */
1547 if (backing_file && (s->header.compat_features &
1548 ~QED_COMPAT_FEATURE_MASK)) {
1549 return -ENOTSUP;
1550 }
1551
1552 memcpy(&new_header, &s->header, sizeof(new_header));
1553
1554 new_header.features &= ~(QED_F_BACKING_FILE |
1555 QED_F_BACKING_FORMAT_NO_PROBE);
1556
1557 /* Adjust feature flags */
1558 if (backing_file) {
1559 new_header.features |= QED_F_BACKING_FILE;
1560
1561 if (qed_fmt_is_raw(backing_fmt)) {
1562 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1563 }
1564 }
1565
1566 /* Calculate new header size */
1567 backing_file_len = 0;
1568
1569 if (backing_file) {
1570 backing_file_len = strlen(backing_file);
1571 }
1572
1573 buffer_len = sizeof(new_header);
1574 new_header.backing_filename_offset = buffer_len;
1575 new_header.backing_filename_size = backing_file_len;
1576 buffer_len += backing_file_len;
1577
1578 /* Make sure we can rewrite header without failing */
1579 if (buffer_len > new_header.header_size * new_header.cluster_size) {
1580 return -ENOSPC;
1581 }
1582
1583 /* Prepare new header */
7267c094 1584 buffer = g_malloc(buffer_len);
75411d23
SH
1585
1586 qed_header_cpu_to_le(&new_header, &le_header);
1587 memcpy(buffer, &le_header, sizeof(le_header));
1588 buffer_len = sizeof(le_header);
1589
feba23b1
PB
1590 if (backing_file) {
1591 memcpy(buffer + buffer_len, backing_file, backing_file_len);
1592 buffer_len += backing_file_len;
1593 }
75411d23
SH
1594
1595 /* Write new header */
1596 ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
7267c094 1597 g_free(buffer);
75411d23
SH
1598 if (ret == 0) {
1599 memcpy(&s->header, &new_header, sizeof(new_header));
1600 }
1601 return ret;
1602}
1603
5a8a30db 1604static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp)
c82954e5
BC
1605{
1606 BDRVQEDState *s = bs->opaque;
5a8a30db
KW
1607 Error *local_err = NULL;
1608 int ret;
c82954e5
BC
1609
1610 bdrv_qed_close(bs);
3456a8d1 1611
5a8a30db
KW
1612 bdrv_invalidate_cache(bs->file, &local_err);
1613 if (local_err) {
1614 error_propagate(errp, local_err);
1615 return;
1616 }
3456a8d1 1617
c82954e5 1618 memset(s, 0, sizeof(BDRVQEDState));
5a8a30db
KW
1619 ret = bdrv_qed_open(bs, NULL, bs->open_flags, &local_err);
1620 if (local_err) {
1621 error_setg(errp, "Could not reopen qed layer: %s",
1622 error_get_pretty(local_err));
1623 error_free(local_err);
1624 return;
1625 } else if (ret < 0) {
1626 error_setg_errno(errp, -ret, "Could not reopen qed layer");
1627 return;
1628 }
c82954e5
BC
1629}
1630
4534ff54
KW
1631static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
1632 BdrvCheckMode fix)
75411d23 1633{
01979a98
SH
1634 BDRVQEDState *s = bs->opaque;
1635
4534ff54 1636 return qed_check(s, result, !!fix);
75411d23
SH
1637}
1638
7ab74849
CL
1639static QemuOptsList qed_create_opts = {
1640 .name = "qed-create-opts",
1641 .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1642 .desc = {
1643 {
1644 .name = BLOCK_OPT_SIZE,
1645 .type = QEMU_OPT_SIZE,
1646 .help = "Virtual disk size"
1647 },
1648 {
1649 .name = BLOCK_OPT_BACKING_FILE,
1650 .type = QEMU_OPT_STRING,
1651 .help = "File name of a base image"
1652 },
1653 {
1654 .name = BLOCK_OPT_BACKING_FMT,
1655 .type = QEMU_OPT_STRING,
1656 .help = "Image format of the base image"
1657 },
1658 {
1659 .name = BLOCK_OPT_CLUSTER_SIZE,
1660 .type = QEMU_OPT_SIZE,
1661 .help = "Cluster size (in bytes)",
1662 .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1663 },
1664 {
1665 .name = BLOCK_OPT_TABLE_SIZE,
1666 .type = QEMU_OPT_SIZE,
1667 .help = "L1/L2 table size (in clusters)"
1668 },
1669 { /* end of list */ }
1670 }
75411d23
SH
1671};
1672
1673static BlockDriver bdrv_qed = {
1674 .format_name = "qed",
1675 .instance_size = sizeof(BDRVQEDState),
7ab74849 1676 .create_opts = &qed_create_opts,
8ee79e70 1677 .supports_backing = true,
75411d23
SH
1678
1679 .bdrv_probe = bdrv_qed_probe,
e023b2e2 1680 .bdrv_rebind = bdrv_qed_rebind,
75411d23
SH
1681 .bdrv_open = bdrv_qed_open,
1682 .bdrv_close = bdrv_qed_close,
f9cb20f1 1683 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
c282e1fd 1684 .bdrv_create = bdrv_qed_create,
3ac21627 1685 .bdrv_has_zero_init = bdrv_has_zero_init_1,
b6b8a333 1686 .bdrv_co_get_block_status = bdrv_qed_co_get_block_status,
75411d23
SH
1687 .bdrv_aio_readv = bdrv_qed_aio_readv,
1688 .bdrv_aio_writev = bdrv_qed_aio_writev,
0e71be19 1689 .bdrv_co_write_zeroes = bdrv_qed_co_write_zeroes,
75411d23
SH
1690 .bdrv_truncate = bdrv_qed_truncate,
1691 .bdrv_getlength = bdrv_qed_getlength,
1692 .bdrv_get_info = bdrv_qed_get_info,
d34682cd 1693 .bdrv_refresh_limits = bdrv_qed_refresh_limits,
75411d23 1694 .bdrv_change_backing_file = bdrv_qed_change_backing_file,
c82954e5 1695 .bdrv_invalidate_cache = bdrv_qed_invalidate_cache,
75411d23 1696 .bdrv_check = bdrv_qed_check,
a8c868c3
SH
1697 .bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
1698 .bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
75411d23
SH
1699};
1700
1701static void bdrv_qed_init(void)
1702{
1703 bdrv_register(&bdrv_qed);
1704}
1705
1706block_init(bdrv_qed_init);
This page took 0.523506 seconds and 4 git commands to generate.