2 * QEMU Enhanced Disk Format
4 * Copyright IBM, Corp. 2010
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qemu/timer.h"
19 #include "qapi/qmp/qerror.h"
20 #include "migration/migration.h"
22 static const AIOCBInfo qed_aiocb_info = {
23 .aiocb_size = sizeof(QEDAIOCB),
26 static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
29 const QEDHeader *header = (const QEDHeader *)buf;
31 if (buf_size < sizeof(*header)) {
34 if (le32_to_cpu(header->magic) != QED_MAGIC) {
41 * Check whether an image format is raw
43 * @fmt: Backing file format, may be NULL
45 static bool qed_fmt_is_raw(const char *fmt)
47 return fmt && strcmp(fmt, "raw") == 0;
50 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
52 cpu->magic = le32_to_cpu(le->magic);
53 cpu->cluster_size = le32_to_cpu(le->cluster_size);
54 cpu->table_size = le32_to_cpu(le->table_size);
55 cpu->header_size = le32_to_cpu(le->header_size);
56 cpu->features = le64_to_cpu(le->features);
57 cpu->compat_features = le64_to_cpu(le->compat_features);
58 cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
59 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
60 cpu->image_size = le64_to_cpu(le->image_size);
61 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
62 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
65 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
67 le->magic = cpu_to_le32(cpu->magic);
68 le->cluster_size = cpu_to_le32(cpu->cluster_size);
69 le->table_size = cpu_to_le32(cpu->table_size);
70 le->header_size = cpu_to_le32(cpu->header_size);
71 le->features = cpu_to_le64(cpu->features);
72 le->compat_features = cpu_to_le64(cpu->compat_features);
73 le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
74 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
75 le->image_size = cpu_to_le64(cpu->image_size);
76 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
77 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
80 int qed_write_header_sync(BDRVQEDState *s)
85 qed_header_cpu_to_le(&s->header, &le);
86 ret = bdrv_pwrite(s->bs->file->bs, 0, &le, sizeof(le));
87 if (ret != sizeof(le)) {
102 static void qed_write_header_cb(void *opaque, int ret)
104 QEDWriteHeaderCB *write_header_cb = opaque;
106 qemu_vfree(write_header_cb->buf);
107 gencb_complete(write_header_cb, ret);
110 static void qed_write_header_read_cb(void *opaque, int ret)
112 QEDWriteHeaderCB *write_header_cb = opaque;
113 BDRVQEDState *s = write_header_cb->s;
116 qed_write_header_cb(write_header_cb, ret);
121 qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
123 bdrv_aio_writev(s->bs->file->bs, 0, &write_header_cb->qiov,
124 write_header_cb->nsectors, qed_write_header_cb,
129 * Update header in-place (does not rewrite backing filename or other strings)
131 * This function only updates known header fields in-place and does not affect
132 * extra data after the QED header.
134 static void qed_write_header(BDRVQEDState *s, BlockCompletionFunc cb,
137 /* We must write full sectors for O_DIRECT but cannot necessarily generate
138 * the data following the header if an unrecognized compat feature is
139 * active. Therefore, first read the sectors containing the header, update
140 * them, and write back.
143 int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) /
145 size_t len = nsectors * BDRV_SECTOR_SIZE;
146 QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
149 write_header_cb->s = s;
150 write_header_cb->nsectors = nsectors;
151 write_header_cb->buf = qemu_blockalign(s->bs, len);
152 write_header_cb->iov.iov_base = write_header_cb->buf;
153 write_header_cb->iov.iov_len = len;
154 qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
156 bdrv_aio_readv(s->bs->file->bs, 0, &write_header_cb->qiov, nsectors,
157 qed_write_header_read_cb, write_header_cb);
160 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
162 uint64_t table_entries;
165 table_entries = (table_size * cluster_size) / sizeof(uint64_t);
166 l2_size = table_entries * cluster_size;
168 return l2_size * table_entries;
171 static bool qed_is_cluster_size_valid(uint32_t cluster_size)
173 if (cluster_size < QED_MIN_CLUSTER_SIZE ||
174 cluster_size > QED_MAX_CLUSTER_SIZE) {
177 if (cluster_size & (cluster_size - 1)) {
178 return false; /* not power of 2 */
183 static bool qed_is_table_size_valid(uint32_t table_size)
185 if (table_size < QED_MIN_TABLE_SIZE ||
186 table_size > QED_MAX_TABLE_SIZE) {
189 if (table_size & (table_size - 1)) {
190 return false; /* not power of 2 */
195 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
198 if (image_size % BDRV_SECTOR_SIZE != 0) {
199 return false; /* not multiple of sector size */
201 if (image_size > qed_max_image_size(cluster_size, table_size)) {
202 return false; /* image is too large */
208 * Read a string of known length from the image file
211 * @offset: File offset to start of string, in bytes
212 * @n: String length in bytes
213 * @buf: Destination buffer
214 * @buflen: Destination buffer length in bytes
215 * @ret: 0 on success, -errno on failure
217 * The string is NUL-terminated.
219 static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n,
220 char *buf, size_t buflen)
226 ret = bdrv_pread(file, offset, buf, n);
235 * Allocate new clusters
238 * @n: Number of contiguous clusters to allocate
239 * @ret: Offset of first allocated cluster
241 * This function only produces the offset where the new clusters should be
242 * written. It updates BDRVQEDState but does not make any changes to the image
245 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
247 uint64_t offset = s->file_size;
248 s->file_size += n * s->header.cluster_size;
252 QEDTable *qed_alloc_table(BDRVQEDState *s)
254 /* Honor O_DIRECT memory alignment requirements */
255 return qemu_blockalign(s->bs,
256 s->header.cluster_size * s->header.table_size);
260 * Allocate a new zeroed L2 table
262 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
264 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
266 l2_table->table = qed_alloc_table(s);
267 l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
269 memset(l2_table->table->offsets, 0,
270 s->header.cluster_size * s->header.table_size);
274 static void qed_aio_next_io(void *opaque, int ret);
276 static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
278 assert(!s->allocating_write_reqs_plugged);
280 s->allocating_write_reqs_plugged = true;
283 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
287 assert(s->allocating_write_reqs_plugged);
289 s->allocating_write_reqs_plugged = false;
291 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
293 qed_aio_next_io(acb, 0);
297 static void qed_finish_clear_need_check(void *opaque, int ret)
302 static void qed_flush_after_clear_need_check(void *opaque, int ret)
304 BDRVQEDState *s = opaque;
306 bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);
308 /* No need to wait until flush completes */
309 qed_unplug_allocating_write_reqs(s);
312 static void qed_clear_need_check(void *opaque, int ret)
314 BDRVQEDState *s = opaque;
317 qed_unplug_allocating_write_reqs(s);
321 s->header.features &= ~QED_F_NEED_CHECK;
322 qed_write_header(s, qed_flush_after_clear_need_check, s);
325 static void qed_need_check_timer_cb(void *opaque)
327 BDRVQEDState *s = opaque;
329 /* The timer should only fire when allocating writes have drained */
330 assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs));
332 trace_qed_need_check_timer_cb(s);
334 qed_plug_allocating_write_reqs(s);
336 /* Ensure writes are on disk before clearing flag */
337 bdrv_aio_flush(s->bs, qed_clear_need_check, s);
340 static void qed_start_need_check_timer(BDRVQEDState *s)
342 trace_qed_start_need_check_timer(s);
344 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
347 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
348 get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT);
351 /* It's okay to call this multiple times or when no timer is started */
352 static void qed_cancel_need_check_timer(BDRVQEDState *s)
354 trace_qed_cancel_need_check_timer(s);
355 timer_del(s->need_check_timer);
358 static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
360 BDRVQEDState *s = bs->opaque;
362 qed_cancel_need_check_timer(s);
363 timer_free(s->need_check_timer);
366 static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
367 AioContext *new_context)
369 BDRVQEDState *s = bs->opaque;
371 s->need_check_timer = aio_timer_new(new_context,
372 QEMU_CLOCK_VIRTUAL, SCALE_NS,
373 qed_need_check_timer_cb, s);
374 if (s->header.features & QED_F_NEED_CHECK) {
375 qed_start_need_check_timer(s);
379 static void bdrv_qed_drain(BlockDriverState *bs)
381 BDRVQEDState *s = bs->opaque;
383 /* Cancel timer and start doing I/O that were meant to happen as if it
384 * fired, that way we get bdrv_drain() taking care of the ongoing requests
386 qed_cancel_need_check_timer(s);
387 qed_plug_allocating_write_reqs(s);
388 bdrv_aio_flush(s->bs, qed_clear_need_check, s);
391 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
394 BDRVQEDState *s = bs->opaque;
400 QSIMPLEQ_INIT(&s->allocating_write_reqs);
402 ret = bdrv_pread(bs->file->bs, 0, &le_header, sizeof(le_header));
406 qed_header_le_to_cpu(&le_header, &s->header);
408 if (s->header.magic != QED_MAGIC) {
409 error_setg(errp, "Image not in QED format");
412 if (s->header.features & ~QED_FEATURE_MASK) {
413 /* image uses unsupported feature bits */
415 snprintf(buf, sizeof(buf), "%" PRIx64,
416 s->header.features & ~QED_FEATURE_MASK);
417 error_setg(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
418 bdrv_get_device_or_node_name(bs), "QED", buf);
421 if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
425 /* Round down file size to the last cluster */
426 file_size = bdrv_getlength(bs->file->bs);
430 s->file_size = qed_start_of_cluster(s, file_size);
432 if (!qed_is_table_size_valid(s->header.table_size)) {
435 if (!qed_is_image_size_valid(s->header.image_size,
436 s->header.cluster_size,
437 s->header.table_size)) {
440 if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
444 s->table_nelems = (s->header.cluster_size * s->header.table_size) /
446 s->l2_shift = ctz32(s->header.cluster_size);
447 s->l2_mask = s->table_nelems - 1;
448 s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
450 /* Header size calculation must not overflow uint32_t */
451 if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
455 if ((s->header.features & QED_F_BACKING_FILE)) {
456 if ((uint64_t)s->header.backing_filename_offset +
457 s->header.backing_filename_size >
458 s->header.cluster_size * s->header.header_size) {
462 ret = qed_read_string(bs->file->bs, s->header.backing_filename_offset,
463 s->header.backing_filename_size, bs->backing_file,
464 sizeof(bs->backing_file));
469 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
470 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
474 /* Reset unknown autoclear feature bits. This is a backwards
475 * compatibility mechanism that allows images to be opened by older
476 * programs, which "knock out" unknown feature bits. When an image is
477 * opened by a newer program again it can detect that the autoclear
478 * feature is no longer valid.
480 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
481 !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) {
482 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
484 ret = qed_write_header_sync(s);
489 /* From here on only known autoclear feature bits are valid */
490 bdrv_flush(bs->file->bs);
493 s->l1_table = qed_alloc_table(s);
494 qed_init_l2_cache(&s->l2_cache);
496 ret = qed_read_l1_table_sync(s);
501 /* If image was not closed cleanly, check consistency */
502 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
503 /* Read-only images cannot be fixed. There is no risk of corruption
504 * since write operations are not possible. Therefore, allow
505 * potentially inconsistent images to be opened read-only. This can
506 * aid data recovery from an otherwise inconsistent image.
508 if (!bdrv_is_read_only(bs->file->bs) &&
509 !(flags & BDRV_O_INACTIVE)) {
510 BdrvCheckResult result = {0};
512 ret = qed_check(s, &result, true);
519 bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
523 qed_free_l2_cache(&s->l2_cache);
524 qemu_vfree(s->l1_table);
529 static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
531 BDRVQEDState *s = bs->opaque;
533 bs->bl.write_zeroes_alignment = s->header.cluster_size >> BDRV_SECTOR_BITS;
536 /* We have nothing to do for QED reopen, stubs just return
538 static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
539 BlockReopenQueue *queue, Error **errp)
544 static void bdrv_qed_close(BlockDriverState *bs)
546 BDRVQEDState *s = bs->opaque;
548 bdrv_qed_detach_aio_context(bs);
550 /* Ensure writes reach stable storage */
551 bdrv_flush(bs->file->bs);
553 /* Clean shutdown, no check required on next open */
554 if (s->header.features & QED_F_NEED_CHECK) {
555 s->header.features &= ~QED_F_NEED_CHECK;
556 qed_write_header_sync(s);
559 qed_free_l2_cache(&s->l2_cache);
560 qemu_vfree(s->l1_table);
563 static int qed_create(const char *filename, uint32_t cluster_size,
564 uint64_t image_size, uint32_t table_size,
565 const char *backing_file, const char *backing_fmt,
566 QemuOpts *opts, Error **errp)
570 .cluster_size = cluster_size,
571 .table_size = table_size,
574 .compat_features = 0,
575 .l1_table_offset = cluster_size,
576 .image_size = image_size,
579 uint8_t *l1_table = NULL;
580 size_t l1_size = header.cluster_size * header.table_size;
581 Error *local_err = NULL;
583 BlockDriverState *bs;
585 ret = bdrv_create_file(filename, opts, &local_err);
587 error_propagate(errp, local_err);
592 ret = bdrv_open(&bs, filename, NULL, NULL,
593 BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_PROTOCOL,
596 error_propagate(errp, local_err);
600 /* File must start empty and grow, check truncate is supported */
601 ret = bdrv_truncate(bs, 0);
607 header.features |= QED_F_BACKING_FILE;
608 header.backing_filename_offset = sizeof(le_header);
609 header.backing_filename_size = strlen(backing_file);
611 if (qed_fmt_is_raw(backing_fmt)) {
612 header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
616 qed_header_cpu_to_le(&header, &le_header);
617 ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header));
621 ret = bdrv_pwrite(bs, sizeof(le_header), backing_file,
622 header.backing_filename_size);
627 l1_table = g_malloc0(l1_size);
628 ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size);
633 ret = 0; /* success */
640 static int bdrv_qed_create(const char *filename, QemuOpts *opts, Error **errp)
642 uint64_t image_size = 0;
643 uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
644 uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
645 char *backing_file = NULL;
646 char *backing_fmt = NULL;
649 image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
651 backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
652 backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT);
653 cluster_size = qemu_opt_get_size_del(opts,
654 BLOCK_OPT_CLUSTER_SIZE,
655 QED_DEFAULT_CLUSTER_SIZE);
656 table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE,
657 QED_DEFAULT_TABLE_SIZE);
659 if (!qed_is_cluster_size_valid(cluster_size)) {
660 error_setg(errp, "QED cluster size must be within range [%u, %u] "
662 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
666 if (!qed_is_table_size_valid(table_size)) {
667 error_setg(errp, "QED table size must be within range [%u, %u] "
669 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
673 if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
674 error_setg(errp, "QED image size must be a non-zero multiple of "
675 "cluster size and less than %" PRIu64 " bytes",
676 qed_max_image_size(cluster_size, table_size));
681 ret = qed_create(filename, cluster_size, image_size, table_size,
682 backing_file, backing_fmt, opts, errp);
685 g_free(backing_file);
691 BlockDriverState *bs;
698 static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
700 QEDIsAllocatedCB *cb = opaque;
701 BDRVQEDState *s = cb->bs->opaque;
702 *cb->pnum = len / BDRV_SECTOR_SIZE;
704 case QED_CLUSTER_FOUND:
705 offset |= qed_offset_into_cluster(s, cb->pos);
706 cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
708 case QED_CLUSTER_ZERO:
709 cb->status = BDRV_BLOCK_ZERO;
722 qemu_coroutine_enter(cb->co, NULL);
726 static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs,
728 int nb_sectors, int *pnum,
729 BlockDriverState **file)
731 BDRVQEDState *s = bs->opaque;
732 size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
733 QEDIsAllocatedCB cb = {
735 .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE,
736 .status = BDRV_BLOCK_OFFSET_MASK,
739 QEDRequest request = { .l2_table = NULL };
741 qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb);
743 /* Now sleep if the callback wasn't invoked immediately */
744 while (cb.status == BDRV_BLOCK_OFFSET_MASK) {
745 cb.co = qemu_coroutine_self();
746 qemu_coroutine_yield();
749 qed_unref_l2_cache_entry(request.l2_table);
754 static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
756 return acb->common.bs->opaque;
760 * Read from the backing file or zero-fill if no backing file
763 * @pos: Byte position in device
764 * @qiov: Destination I/O vector
765 * @backing_qiov: Possibly shortened copy of qiov, to be allocated here
766 * @cb: Completion function
767 * @opaque: User data for completion function
769 * This function reads qiov->size bytes starting at pos from the backing file.
770 * If there is no backing file then zeroes are read.
772 static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
774 QEMUIOVector **backing_qiov,
775 BlockCompletionFunc *cb, void *opaque)
777 uint64_t backing_length = 0;
780 /* If there is a backing file, get its length. Treat the absence of a
781 * backing file like a zero length backing file.
783 if (s->bs->backing) {
784 int64_t l = bdrv_getlength(s->bs->backing->bs);
792 /* Zero all sectors if reading beyond the end of the backing file */
793 if (pos >= backing_length ||
794 pos + qiov->size > backing_length) {
795 qemu_iovec_memset(qiov, 0, 0, qiov->size);
798 /* Complete now if there are no backing file sectors to read */
799 if (pos >= backing_length) {
804 /* If the read straddles the end of the backing file, shorten it */
805 size = MIN((uint64_t)backing_length - pos, qiov->size);
807 assert(*backing_qiov == NULL);
808 *backing_qiov = g_new(QEMUIOVector, 1);
809 qemu_iovec_init(*backing_qiov, qiov->niov);
810 qemu_iovec_concat(*backing_qiov, qiov, 0, size);
812 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
813 bdrv_aio_readv(s->bs->backing->bs, pos / BDRV_SECTOR_SIZE,
814 *backing_qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
821 QEMUIOVector *backing_qiov;
824 } CopyFromBackingFileCB;
826 static void qed_copy_from_backing_file_cb(void *opaque, int ret)
828 CopyFromBackingFileCB *copy_cb = opaque;
829 qemu_vfree(copy_cb->iov.iov_base);
830 gencb_complete(©_cb->gencb, ret);
833 static void qed_copy_from_backing_file_write(void *opaque, int ret)
835 CopyFromBackingFileCB *copy_cb = opaque;
836 BDRVQEDState *s = copy_cb->s;
838 if (copy_cb->backing_qiov) {
839 qemu_iovec_destroy(copy_cb->backing_qiov);
840 g_free(copy_cb->backing_qiov);
841 copy_cb->backing_qiov = NULL;
845 qed_copy_from_backing_file_cb(copy_cb, ret);
849 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
850 bdrv_aio_writev(s->bs->file->bs, copy_cb->offset / BDRV_SECTOR_SIZE,
851 ©_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE,
852 qed_copy_from_backing_file_cb, copy_cb);
856 * Copy data from backing file into the image
859 * @pos: Byte position in device
860 * @len: Number of bytes
861 * @offset: Byte offset in image file
862 * @cb: Completion function
863 * @opaque: User data for completion function
865 static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
866 uint64_t len, uint64_t offset,
867 BlockCompletionFunc *cb,
870 CopyFromBackingFileCB *copy_cb;
872 /* Skip copy entirely if there is no work to do */
878 copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
880 copy_cb->offset = offset;
881 copy_cb->backing_qiov = NULL;
882 copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
883 copy_cb->iov.iov_len = len;
884 qemu_iovec_init_external(©_cb->qiov, ©_cb->iov, 1);
886 qed_read_backing_file(s, pos, ©_cb->qiov, ©_cb->backing_qiov,
887 qed_copy_from_backing_file_write, copy_cb);
891 * Link one or more contiguous clusters into a table
895 * @index: First cluster index
896 * @n: Number of contiguous clusters
897 * @cluster: First cluster offset
899 * The cluster offset may be an allocated byte offset in the image file, the
900 * zero cluster marker, or the unallocated cluster marker.
902 static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
903 unsigned int n, uint64_t cluster)
906 for (i = index; i < index + n; i++) {
907 table->offsets[i] = cluster;
908 if (!qed_offset_is_unalloc_cluster(cluster) &&
909 !qed_offset_is_zero_cluster(cluster)) {
910 cluster += s->header.cluster_size;
915 static void qed_aio_complete_bh(void *opaque)
917 QEDAIOCB *acb = opaque;
918 BlockCompletionFunc *cb = acb->common.cb;
919 void *user_opaque = acb->common.opaque;
920 int ret = acb->bh_ret;
922 qemu_bh_delete(acb->bh);
925 /* Invoke callback */
926 cb(user_opaque, ret);
929 static void qed_aio_complete(QEDAIOCB *acb, int ret)
931 BDRVQEDState *s = acb_to_s(acb);
933 trace_qed_aio_complete(s, acb, ret);
936 qemu_iovec_destroy(&acb->cur_qiov);
937 qed_unref_l2_cache_entry(acb->request.l2_table);
939 /* Free the buffer we may have allocated for zero writes */
940 if (acb->flags & QED_AIOCB_ZERO) {
941 qemu_vfree(acb->qiov->iov[0].iov_base);
942 acb->qiov->iov[0].iov_base = NULL;
945 /* Arrange for a bh to invoke the completion function */
947 acb->bh = aio_bh_new(bdrv_get_aio_context(acb->common.bs),
948 qed_aio_complete_bh, acb);
949 qemu_bh_schedule(acb->bh);
951 /* Start next allocating write request waiting behind this one. Note that
952 * requests enqueue themselves when they first hit an unallocated cluster
953 * but they wait until the entire request is finished before waking up the
954 * next request in the queue. This ensures that we don't cycle through
955 * requests multiple times but rather finish one at a time completely.
957 if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
958 QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
959 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
961 qed_aio_next_io(acb, 0);
962 } else if (s->header.features & QED_F_NEED_CHECK) {
963 qed_start_need_check_timer(s);
969 * Commit the current L2 table to the cache
971 static void qed_commit_l2_update(void *opaque, int ret)
973 QEDAIOCB *acb = opaque;
974 BDRVQEDState *s = acb_to_s(acb);
975 CachedL2Table *l2_table = acb->request.l2_table;
976 uint64_t l2_offset = l2_table->offset;
978 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
980 /* This is guaranteed to succeed because we just committed the entry to the
983 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
984 assert(acb->request.l2_table != NULL);
986 qed_aio_next_io(opaque, ret);
990 * Update L1 table with new L2 table offset and write it out
992 static void qed_aio_write_l1_update(void *opaque, int ret)
994 QEDAIOCB *acb = opaque;
995 BDRVQEDState *s = acb_to_s(acb);
999 qed_aio_complete(acb, ret);
1003 index = qed_l1_index(s, acb->cur_pos);
1004 s->l1_table->offsets[index] = acb->request.l2_table->offset;
1006 qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
1010 * Update L2 table with new cluster offsets and write them out
1012 static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
1014 BDRVQEDState *s = acb_to_s(acb);
1015 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
1023 qed_unref_l2_cache_entry(acb->request.l2_table);
1024 acb->request.l2_table = qed_new_l2_table(s);
1027 index = qed_l2_index(s, acb->cur_pos);
1028 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
1032 /* Write out the whole new L2 table */
1033 qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
1034 qed_aio_write_l1_update, acb);
1036 /* Write out only the updated part of the L2 table */
1037 qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
1038 qed_aio_next_io, acb);
1043 qed_aio_complete(acb, ret);
1046 static void qed_aio_write_l2_update_cb(void *opaque, int ret)
1048 QEDAIOCB *acb = opaque;
1049 qed_aio_write_l2_update(acb, ret, acb->cur_cluster);
1053 * Flush new data clusters before updating the L2 table
1055 * This flush is necessary when a backing file is in use. A crash during an
1056 * allocating write could result in empty clusters in the image. If the write
1057 * only touched a subregion of the cluster, then backing image sectors have
1058 * been lost in the untouched region. The solution is to flush after writing a
1059 * new data cluster and before updating the L2 table.
1061 static void qed_aio_write_flush_before_l2_update(void *opaque, int ret)
1063 QEDAIOCB *acb = opaque;
1064 BDRVQEDState *s = acb_to_s(acb);
1066 if (!bdrv_aio_flush(s->bs->file->bs, qed_aio_write_l2_update_cb, opaque)) {
1067 qed_aio_complete(acb, -EIO);
1072 * Write data to the image file
1074 static void qed_aio_write_main(void *opaque, int ret)
1076 QEDAIOCB *acb = opaque;
1077 BDRVQEDState *s = acb_to_s(acb);
1078 uint64_t offset = acb->cur_cluster +
1079 qed_offset_into_cluster(s, acb->cur_pos);
1080 BlockCompletionFunc *next_fn;
1082 trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
1085 qed_aio_complete(acb, ret);
1089 if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
1090 next_fn = qed_aio_next_io;
1092 if (s->bs->backing) {
1093 next_fn = qed_aio_write_flush_before_l2_update;
1095 next_fn = qed_aio_write_l2_update_cb;
1099 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1100 bdrv_aio_writev(s->bs->file->bs, offset / BDRV_SECTOR_SIZE,
1101 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1106 * Populate back untouched region of new data cluster
1108 static void qed_aio_write_postfill(void *opaque, int ret)
1110 QEDAIOCB *acb = opaque;
1111 BDRVQEDState *s = acb_to_s(acb);
1112 uint64_t start = acb->cur_pos + acb->cur_qiov.size;
1114 qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1115 uint64_t offset = acb->cur_cluster +
1116 qed_offset_into_cluster(s, acb->cur_pos) +
1120 qed_aio_complete(acb, ret);
1124 trace_qed_aio_write_postfill(s, acb, start, len, offset);
1125 qed_copy_from_backing_file(s, start, len, offset,
1126 qed_aio_write_main, acb);
1130 * Populate front untouched region of new data cluster
1132 static void qed_aio_write_prefill(void *opaque, int ret)
1134 QEDAIOCB *acb = opaque;
1135 BDRVQEDState *s = acb_to_s(acb);
1136 uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
1137 uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);
1139 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1140 qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
1141 qed_aio_write_postfill, acb);
1145 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1147 static bool qed_should_set_need_check(BDRVQEDState *s)
1149 /* The flush before L2 update path ensures consistency */
1150 if (s->bs->backing) {
1154 return !(s->header.features & QED_F_NEED_CHECK);
1157 static void qed_aio_write_zero_cluster(void *opaque, int ret)
1159 QEDAIOCB *acb = opaque;
1162 qed_aio_complete(acb, ret);
1166 qed_aio_write_l2_update(acb, 0, 1);
1170 * Write new data cluster
1172 * @acb: Write request
1173 * @len: Length in bytes
1175 * This path is taken when writing to previously unallocated clusters.
1177 static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1179 BDRVQEDState *s = acb_to_s(acb);
1180 BlockCompletionFunc *cb;
1182 /* Cancel timer when the first allocating request comes in */
1183 if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
1184 qed_cancel_need_check_timer(s);
1187 /* Freeze this request if another allocating write is in progress */
1188 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1189 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next);
1191 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
1192 s->allocating_write_reqs_plugged) {
1193 return; /* wait for existing request to finish */
1196 acb->cur_nclusters = qed_bytes_to_clusters(s,
1197 qed_offset_into_cluster(s, acb->cur_pos) + len);
1198 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1200 if (acb->flags & QED_AIOCB_ZERO) {
1201 /* Skip ahead if the clusters are already zero */
1202 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1203 qed_aio_next_io(acb, 0);
1207 cb = qed_aio_write_zero_cluster;
1209 cb = qed_aio_write_prefill;
1210 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1213 if (qed_should_set_need_check(s)) {
1214 s->header.features |= QED_F_NEED_CHECK;
1215 qed_write_header(s, cb, acb);
1222 * Write data cluster in place
1224 * @acb: Write request
1225 * @offset: Cluster offset in bytes
1226 * @len: Length in bytes
1228 * This path is taken when writing to already allocated clusters.
1230 static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
1232 /* Allocate buffer for zero writes */
1233 if (acb->flags & QED_AIOCB_ZERO) {
1234 struct iovec *iov = acb->qiov->iov;
1236 if (!iov->iov_base) {
1237 iov->iov_base = qemu_try_blockalign(acb->common.bs, iov->iov_len);
1238 if (iov->iov_base == NULL) {
1239 qed_aio_complete(acb, -ENOMEM);
1242 memset(iov->iov_base, 0, iov->iov_len);
1246 /* Calculate the I/O vector */
1247 acb->cur_cluster = offset;
1248 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1250 /* Do the actual write */
1251 qed_aio_write_main(acb, 0);
1255 * Write data cluster
1257 * @opaque: Write request
1258 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1260 * @offset: Cluster offset in bytes
1261 * @len: Length in bytes
1263 * Callback from qed_find_cluster().
1265 static void qed_aio_write_data(void *opaque, int ret,
1266 uint64_t offset, size_t len)
1268 QEDAIOCB *acb = opaque;
1270 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1272 acb->find_cluster_ret = ret;
1275 case QED_CLUSTER_FOUND:
1276 qed_aio_write_inplace(acb, offset, len);
1279 case QED_CLUSTER_L2:
1280 case QED_CLUSTER_L1:
1281 case QED_CLUSTER_ZERO:
1282 qed_aio_write_alloc(acb, len);
1286 qed_aio_complete(acb, ret);
1294 * @opaque: Read request
1295 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1297 * @offset: Cluster offset in bytes
1298 * @len: Length in bytes
1300 * Callback from qed_find_cluster().
1302 static void qed_aio_read_data(void *opaque, int ret,
1303 uint64_t offset, size_t len)
1305 QEDAIOCB *acb = opaque;
1306 BDRVQEDState *s = acb_to_s(acb);
1307 BlockDriverState *bs = acb->common.bs;
1309 /* Adjust offset into cluster */
1310 offset += qed_offset_into_cluster(s, acb->cur_pos);
1312 trace_qed_aio_read_data(s, acb, ret, offset, len);
1318 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1320 /* Handle zero cluster and backing file reads */
1321 if (ret == QED_CLUSTER_ZERO) {
1322 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1323 qed_aio_next_io(acb, 0);
1325 } else if (ret != QED_CLUSTER_FOUND) {
1326 qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1327 &acb->backing_qiov, qed_aio_next_io, acb);
1331 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1332 bdrv_aio_readv(bs->file->bs, offset / BDRV_SECTOR_SIZE,
1333 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1334 qed_aio_next_io, acb);
1338 qed_aio_complete(acb, ret);
1342 * Begin next I/O or complete the request
1344 static void qed_aio_next_io(void *opaque, int ret)
1346 QEDAIOCB *acb = opaque;
1347 BDRVQEDState *s = acb_to_s(acb);
1348 QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ?
1349 qed_aio_write_data : qed_aio_read_data;
1351 trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);
1353 if (acb->backing_qiov) {
1354 qemu_iovec_destroy(acb->backing_qiov);
1355 g_free(acb->backing_qiov);
1356 acb->backing_qiov = NULL;
1359 /* Handle I/O error */
1361 qed_aio_complete(acb, ret);
1365 acb->qiov_offset += acb->cur_qiov.size;
1366 acb->cur_pos += acb->cur_qiov.size;
1367 qemu_iovec_reset(&acb->cur_qiov);
1369 /* Complete request */
1370 if (acb->cur_pos >= acb->end_pos) {
1371 qed_aio_complete(acb, 0);
1375 /* Find next cluster and start I/O */
1376 qed_find_cluster(s, &acb->request,
1377 acb->cur_pos, acb->end_pos - acb->cur_pos,
1381 static BlockAIOCB *qed_aio_setup(BlockDriverState *bs,
1383 QEMUIOVector *qiov, int nb_sectors,
1384 BlockCompletionFunc *cb,
1385 void *opaque, int flags)
1387 QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque);
1389 trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
1394 acb->qiov_offset = 0;
1395 acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
1396 acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
1397 acb->backing_qiov = NULL;
1398 acb->request.l2_table = NULL;
1399 qemu_iovec_init(&acb->cur_qiov, qiov->niov);
1402 qed_aio_next_io(acb, 0);
1403 return &acb->common;
1406 static BlockAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
1408 QEMUIOVector *qiov, int nb_sectors,
1409 BlockCompletionFunc *cb,
1412 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
1415 static BlockAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
1417 QEMUIOVector *qiov, int nb_sectors,
1418 BlockCompletionFunc *cb,
1421 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb,
1422 opaque, QED_AIOCB_WRITE);
1431 static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret)
1433 QEDWriteZeroesCB *cb = opaque;
1438 qemu_coroutine_enter(cb->co, NULL);
1442 static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs,
1445 BdrvRequestFlags flags)
1447 BlockAIOCB *blockacb;
1448 BDRVQEDState *s = bs->opaque;
1449 QEDWriteZeroesCB cb = { .done = false };
1453 /* Refuse if there are untouched backing file sectors */
1455 if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) {
1458 if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) {
1463 /* Zero writes start without an I/O buffer. If a buffer becomes necessary
1464 * then it will be allocated during request processing.
1466 iov.iov_base = NULL,
1467 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE,
1469 qemu_iovec_init_external(&qiov, &iov, 1);
1470 blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors,
1471 qed_co_write_zeroes_cb, &cb,
1472 QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1477 cb.co = qemu_coroutine_self();
1478 qemu_coroutine_yield();
1484 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset)
1486 BDRVQEDState *s = bs->opaque;
1487 uint64_t old_image_size;
1490 if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1491 s->header.table_size)) {
1495 /* Shrinking is currently not supported */
1496 if ((uint64_t)offset < s->header.image_size) {
1500 old_image_size = s->header.image_size;
1501 s->header.image_size = offset;
1502 ret = qed_write_header_sync(s);
1504 s->header.image_size = old_image_size;
1509 static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1511 BDRVQEDState *s = bs->opaque;
1512 return s->header.image_size;
1515 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1517 BDRVQEDState *s = bs->opaque;
1519 memset(bdi, 0, sizeof(*bdi));
1520 bdi->cluster_size = s->header.cluster_size;
1521 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1522 bdi->unallocated_blocks_are_zero = true;
1523 bdi->can_write_zeroes_with_unmap = true;
1527 static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1528 const char *backing_file,
1529 const char *backing_fmt)
1531 BDRVQEDState *s = bs->opaque;
1532 QEDHeader new_header, le_header;
1534 size_t buffer_len, backing_file_len;
1537 /* Refuse to set backing filename if unknown compat feature bits are
1538 * active. If the image uses an unknown compat feature then we may not
1539 * know the layout of data following the header structure and cannot safely
1542 if (backing_file && (s->header.compat_features &
1543 ~QED_COMPAT_FEATURE_MASK)) {
1547 memcpy(&new_header, &s->header, sizeof(new_header));
1549 new_header.features &= ~(QED_F_BACKING_FILE |
1550 QED_F_BACKING_FORMAT_NO_PROBE);
1552 /* Adjust feature flags */
1554 new_header.features |= QED_F_BACKING_FILE;
1556 if (qed_fmt_is_raw(backing_fmt)) {
1557 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1561 /* Calculate new header size */
1562 backing_file_len = 0;
1565 backing_file_len = strlen(backing_file);
1568 buffer_len = sizeof(new_header);
1569 new_header.backing_filename_offset = buffer_len;
1570 new_header.backing_filename_size = backing_file_len;
1571 buffer_len += backing_file_len;
1573 /* Make sure we can rewrite header without failing */
1574 if (buffer_len > new_header.header_size * new_header.cluster_size) {
1578 /* Prepare new header */
1579 buffer = g_malloc(buffer_len);
1581 qed_header_cpu_to_le(&new_header, &le_header);
1582 memcpy(buffer, &le_header, sizeof(le_header));
1583 buffer_len = sizeof(le_header);
1586 memcpy(buffer + buffer_len, backing_file, backing_file_len);
1587 buffer_len += backing_file_len;
1590 /* Write new header */
1591 ret = bdrv_pwrite_sync(bs->file->bs, 0, buffer, buffer_len);
1594 memcpy(&s->header, &new_header, sizeof(new_header));
1599 static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp)
1601 BDRVQEDState *s = bs->opaque;
1602 Error *local_err = NULL;
1607 bdrv_invalidate_cache(bs->file->bs, &local_err);
1609 error_propagate(errp, local_err);
1613 memset(s, 0, sizeof(BDRVQEDState));
1614 ret = bdrv_qed_open(bs, NULL, bs->open_flags, &local_err);
1616 error_propagate(errp, local_err);
1617 error_prepend(errp, "Could not reopen qed layer: ");
1619 } else if (ret < 0) {
1620 error_setg_errno(errp, -ret, "Could not reopen qed layer");
1625 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
1628 BDRVQEDState *s = bs->opaque;
1630 return qed_check(s, result, !!fix);
1633 static QemuOptsList qed_create_opts = {
1634 .name = "qed-create-opts",
1635 .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1638 .name = BLOCK_OPT_SIZE,
1639 .type = QEMU_OPT_SIZE,
1640 .help = "Virtual disk size"
1643 .name = BLOCK_OPT_BACKING_FILE,
1644 .type = QEMU_OPT_STRING,
1645 .help = "File name of a base image"
1648 .name = BLOCK_OPT_BACKING_FMT,
1649 .type = QEMU_OPT_STRING,
1650 .help = "Image format of the base image"
1653 .name = BLOCK_OPT_CLUSTER_SIZE,
1654 .type = QEMU_OPT_SIZE,
1655 .help = "Cluster size (in bytes)",
1656 .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1659 .name = BLOCK_OPT_TABLE_SIZE,
1660 .type = QEMU_OPT_SIZE,
1661 .help = "L1/L2 table size (in clusters)"
1663 { /* end of list */ }
1667 static BlockDriver bdrv_qed = {
1668 .format_name = "qed",
1669 .instance_size = sizeof(BDRVQEDState),
1670 .create_opts = &qed_create_opts,
1671 .supports_backing = true,
1673 .bdrv_probe = bdrv_qed_probe,
1674 .bdrv_open = bdrv_qed_open,
1675 .bdrv_close = bdrv_qed_close,
1676 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
1677 .bdrv_create = bdrv_qed_create,
1678 .bdrv_has_zero_init = bdrv_has_zero_init_1,
1679 .bdrv_co_get_block_status = bdrv_qed_co_get_block_status,
1680 .bdrv_aio_readv = bdrv_qed_aio_readv,
1681 .bdrv_aio_writev = bdrv_qed_aio_writev,
1682 .bdrv_co_write_zeroes = bdrv_qed_co_write_zeroes,
1683 .bdrv_truncate = bdrv_qed_truncate,
1684 .bdrv_getlength = bdrv_qed_getlength,
1685 .bdrv_get_info = bdrv_qed_get_info,
1686 .bdrv_refresh_limits = bdrv_qed_refresh_limits,
1687 .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1688 .bdrv_invalidate_cache = bdrv_qed_invalidate_cache,
1689 .bdrv_check = bdrv_qed_check,
1690 .bdrv_detach_aio_context = bdrv_qed_detach_aio_context,
1691 .bdrv_attach_aio_context = bdrv_qed_attach_aio_context,
1692 .bdrv_drain = bdrv_qed_drain,
1695 static void bdrv_qed_init(void)
1697 bdrv_register(&bdrv_qed);
1700 block_init(bdrv_qed_init);