+enum ImgConvertBlockStatus {
+ BLK_DATA,
+ BLK_ZERO,
+ BLK_BACKING_FILE,
+};
+
+typedef struct ImgConvertState {
+ BlockBackend **src;
+ int64_t *src_sectors;
+ int src_cur, src_num;
+ int64_t src_cur_offset;
+ int64_t total_sectors;
+ int64_t allocated_sectors;
+ enum ImgConvertBlockStatus status;
+ int64_t sector_next_status;
+ BlockBackend *target;
+ bool has_zero_init;
+ bool compressed;
+ bool target_has_backing;
+ int min_sparse;
+ size_t cluster_sectors;
+ size_t buf_sectors;
+} ImgConvertState;
+
+static void convert_select_part(ImgConvertState *s, int64_t sector_num)
+{
+ assert(sector_num >= s->src_cur_offset);
+ while (sector_num - s->src_cur_offset >= s->src_sectors[s->src_cur]) {
+ s->src_cur_offset += s->src_sectors[s->src_cur];
+ s->src_cur++;
+ assert(s->src_cur < s->src_num);
+ }
+}
+
+static int convert_iteration_sectors(ImgConvertState *s, int64_t sector_num)
+{
+ int64_t ret;
+ int n;
+
+ convert_select_part(s, sector_num);
+
+ assert(s->total_sectors > sector_num);
+ n = MIN(s->total_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
+
+ if (s->sector_next_status <= sector_num) {
+ ret = bdrv_get_block_status(blk_bs(s->src[s->src_cur]),
+ sector_num - s->src_cur_offset,
+ n, &n);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (ret & BDRV_BLOCK_ZERO) {
+ s->status = BLK_ZERO;
+ } else if (ret & BDRV_BLOCK_DATA) {
+ s->status = BLK_DATA;
+ } else if (!s->target_has_backing) {
+ /* Without a target backing file we must copy over the contents of
+ * the backing file as well. */
+ /* TODO Check block status of the backing file chain to avoid
+ * needlessly reading zeroes and limiting the iteration to the
+ * buffer size */
+ s->status = BLK_DATA;
+ } else {
+ s->status = BLK_BACKING_FILE;
+ }
+
+ s->sector_next_status = sector_num + n;
+ }
+
+ n = MIN(n, s->sector_next_status - sector_num);
+ if (s->status == BLK_DATA) {
+ n = MIN(n, s->buf_sectors);
+ }
+
+ /* We need to write complete clusters for compressed images, so if an
+ * unallocated area is shorter than that, we must consider the whole
+ * cluster allocated. */
+ if (s->compressed) {
+ if (n < s->cluster_sectors) {
+ n = MIN(s->cluster_sectors, s->total_sectors - sector_num);
+ s->status = BLK_DATA;
+ } else {
+ n = QEMU_ALIGN_DOWN(n, s->cluster_sectors);
+ }
+ }
+
+ return n;
+}
+
+static int convert_read(ImgConvertState *s, int64_t sector_num, int nb_sectors,
+ uint8_t *buf)
+{
+ int n;
+ int ret;
+
+ if (s->status == BLK_ZERO || s->status == BLK_BACKING_FILE) {
+ return 0;
+ }
+
+ assert(nb_sectors <= s->buf_sectors);
+ while (nb_sectors > 0) {
+ BlockBackend *blk;
+ int64_t bs_sectors;
+
+ /* In the case of compression with multiple source files, we can get a
+ * nb_sectors that spreads into the next part. So we must be able to
+ * read across multiple BDSes for one convert_read() call. */
+ convert_select_part(s, sector_num);
+ blk = s->src[s->src_cur];
+ bs_sectors = s->src_sectors[s->src_cur];
+
+ n = MIN(nb_sectors, bs_sectors - (sector_num - s->src_cur_offset));
+ ret = blk_read(blk, sector_num - s->src_cur_offset, buf, n);
+ if (ret < 0) {
+ return ret;
+ }
+
+ sector_num += n;
+ nb_sectors -= n;
+ buf += n * BDRV_SECTOR_SIZE;
+ }
+
+ return 0;
+}
+
+static int convert_write(ImgConvertState *s, int64_t sector_num, int nb_sectors,
+ const uint8_t *buf)
+{
+ int ret;
+
+ while (nb_sectors > 0) {
+ int n = nb_sectors;
+
+ switch (s->status) {
+ case BLK_BACKING_FILE:
+ /* If we have a backing file, leave clusters unallocated that are
+ * unallocated in the source image, so that the backing file is
+ * visible at the respective offset. */
+ assert(s->target_has_backing);
+ break;
+
+ case BLK_DATA:
+ /* We must always write compressed clusters as a whole, so don't
+ * try to find zeroed parts in the buffer. We can only save the
+ * write if the buffer is completely zeroed and we're allowed to
+ * keep the target sparse. */
+ if (s->compressed) {
+ if (s->has_zero_init && s->min_sparse &&
+ buffer_is_zero(buf, n * BDRV_SECTOR_SIZE))
+ {
+ assert(!s->target_has_backing);
+ break;
+ }
+
+ ret = blk_write_compressed(s->target, sector_num, buf, n);
+ if (ret < 0) {
+ return ret;
+ }
+ break;
+ }
+
+ /* If there is real non-zero data or we're told to keep the target
+ * fully allocated (-S 0), we must write it. Otherwise we can treat
+ * it as zero sectors. */
+ if (!s->min_sparse ||
+ is_allocated_sectors_min(buf, n, &n, s->min_sparse))
+ {
+ ret = blk_write(s->target, sector_num, buf, n);
+ if (ret < 0) {
+ return ret;
+ }
+ break;
+ }
+ /* fall-through */
+
+ case BLK_ZERO:
+ if (s->has_zero_init) {
+ break;
+ }
+ ret = blk_write_zeroes(s->target, sector_num, n, 0);
+ if (ret < 0) {
+ return ret;
+ }
+ break;
+ }
+
+ sector_num += n;
+ nb_sectors -= n;
+ buf += n * BDRV_SECTOR_SIZE;
+ }
+
+ return 0;
+}
+
+static int convert_do_copy(ImgConvertState *s)
+{
+ uint8_t *buf = NULL;
+ int64_t sector_num, allocated_done;
+ int ret;
+ int n;
+
+ /* Check whether we have zero initialisation or can get it efficiently */
+ s->has_zero_init = s->min_sparse && !s->target_has_backing
+ ? bdrv_has_zero_init(blk_bs(s->target))
+ : false;
+
+ if (!s->has_zero_init && !s->target_has_backing &&
+ bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)))
+ {
+ ret = bdrv_make_zero(blk_bs(s->target), BDRV_REQ_MAY_UNMAP);
+ if (ret == 0) {
+ s->has_zero_init = true;
+ }
+ }
+
+ /* Allocate buffer for copied data. For compressed images, only one cluster
+ * can be copied at a time. */
+ if (s->compressed) {
+ if (s->cluster_sectors <= 0 || s->cluster_sectors > s->buf_sectors) {
+ error_report("invalid cluster size");
+ ret = -EINVAL;
+ goto fail;
+ }
+ s->buf_sectors = s->cluster_sectors;
+ }
+ buf = blk_blockalign(s->target, s->buf_sectors * BDRV_SECTOR_SIZE);
+
+ /* Calculate allocated sectors for progress */
+ s->allocated_sectors = 0;
+ sector_num = 0;
+ while (sector_num < s->total_sectors) {
+ n = convert_iteration_sectors(s, sector_num);
+ if (n < 0) {
+ ret = n;
+ goto fail;
+ }
+ if (s->status == BLK_DATA) {
+ s->allocated_sectors += n;
+ }
+ sector_num += n;
+ }
+
+ /* Do the copy */
+ s->src_cur = 0;
+ s->src_cur_offset = 0;
+ s->sector_next_status = 0;
+
+ sector_num = 0;
+ allocated_done = 0;
+
+ while (sector_num < s->total_sectors) {
+ n = convert_iteration_sectors(s, sector_num);
+ if (n < 0) {
+ ret = n;
+ goto fail;
+ }
+ if (s->status == BLK_DATA) {
+ allocated_done += n;
+ qemu_progress_print(100.0 * allocated_done / s->allocated_sectors,
+ 0);
+ }
+
+ ret = convert_read(s, sector_num, n, buf);
+ if (ret < 0) {
+ error_report("error while reading sector %" PRId64
+ ": %s", sector_num, strerror(-ret));
+ goto fail;
+ }
+
+ ret = convert_write(s, sector_num, n, buf);
+ if (ret < 0) {
+ error_report("error while writing sector %" PRId64
+ ": %s", sector_num, strerror(-ret));
+ goto fail;
+ }
+
+ sector_num += n;
+ }
+
+ if (s->compressed) {
+ /* signal EOF to align */
+ ret = blk_write_compressed(s->target, 0, NULL, 0);
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+
+ ret = 0;
+fail:
+ qemu_vfree(buf);
+ return ret;
+}
+