" 'snapshot_param' is param used for internal snapshot, format\n"
" is 'snapshot.id=[ID],snapshot.name=[NAME]', or\n"
" '[ID_OR_NAME]'\n"
- " 'snapshot_id_or_name' is deprecated, use 'snapshot_param'\n"
- " instead\n"
" '-c' indicates that target image must be compressed (qcow format only)\n"
" '-u' allows unsafe backing chains. For rebasing, it is assumed that old and\n"
" new backing file match exactly. The image doesn't need a working\n"
return 1;
}
+ if (!drv->create_opts) {
+ error_report("Format driver '%s' does not support image creation", fmt);
+ return 1;
+ }
+
create_opts = qemu_opts_append(create_opts, drv->create_opts);
if (filename) {
proto_drv = bdrv_find_protocol(filename, true, &local_err);
qemu_opts_free(create_opts);
return 1;
}
+ if (!proto_drv->create_opts) {
+ error_report("Protocal driver '%s' does not support image creation",
+ proto_drv->format_name);
+ return 1;
+ }
create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
}
+ printf("Supported options:\n");
qemu_opts_print_help(create_opts);
qemu_opts_free(create_opts);
return 0;
aio_context_acquire(aio_context);
job_ref(&job->job);
do {
+ float progress = 0.0f;
aio_poll(aio_context, true);
- qemu_progress_print(job->len ?
- ((float)job->offset / job->len * 100.f) : 0.0f, 0);
- } while (!job->ready && !job_is_completed(&job->job));
+ if (job->job.progress_total) {
+ progress = (float)job->job.progress_current /
+ job->job.progress_total * 100.f;
+ }
+ qemu_progress_print(progress, 0);
+ } while (!job_is_ready(&job->job) && !job_is_completed(&job->job));
if (!job_is_completed(&job->job)) {
- ret = block_job_complete_sync(job, errp);
+ ret = job_complete_sync(&job->job, errp);
} else {
- ret = job->ret;
+ ret = job->job.ret;
}
job_unref(&job->job);
aio_context_release(aio_context);
*
* 'pnum' is set to the number of sectors (including and immediately following
* the first one) that are known to be in the same allocated/unallocated state.
+ * The function will try to align the end offset to alignment boundaries so
+ * that the request will at least end aligned and consequtive requests will
+ * also start at an aligned offset.
*/
-static int is_allocated_sectors(const uint8_t *buf, int n, int *pnum)
+static int is_allocated_sectors(const uint8_t *buf, int n, int *pnum,
+ int64_t sector_num, int alignment)
{
bool is_zero;
- int i;
+ int i, tail;
if (n <= 0) {
*pnum = 0;
break;
}
}
+
+ tail = (sector_num + i) & (alignment - 1);
+ if (tail) {
+ if (is_zero && i <= tail) {
+ /* treat unallocated areas which only consist
+ * of a small tail as allocated. */
+ is_zero = false;
+ }
+ if (!is_zero) {
+ /* align up end offset of allocated areas. */
+ i += alignment - tail;
+ i = MIN(i, n);
+ } else {
+ /* align down end offset of zero areas. */
+ i -= tail;
+ }
+ }
*pnum = i;
return !is_zero;
}
* breaking up write requests for only small sparse areas.
*/
static int is_allocated_sectors_min(const uint8_t *buf, int n, int *pnum,
- int min)
+ int min, int64_t sector_num, int alignment)
{
int ret;
int num_checked, num_used;
min = n;
}
- ret = is_allocated_sectors(buf, n, pnum);
+ ret = is_allocated_sectors(buf, n, pnum, sector_num, alignment);
if (!ret) {
return ret;
}
num_used = *pnum;
buf += BDRV_SECTOR_SIZE * *pnum;
n -= *pnum;
+ sector_num += *pnum;
num_checked = num_used;
while (n > 0) {
- ret = is_allocated_sectors(buf, n, pnum);
+ ret = is_allocated_sectors(buf, n, pnum, sector_num, alignment);
buf += BDRV_SECTOR_SIZE * *pnum;
n -= *pnum;
+ sector_num += *pnum;
num_checked += *pnum;
if (ret) {
num_used = num_checked;
BlockBackend *target;
bool has_zero_init;
bool compressed;
+ bool unallocated_blocks_are_zero;
bool target_has_backing;
+ int64_t target_backing_sectors; /* negative if unknown */
bool wr_in_order;
+ bool copy_range;
int min_sparse;
+ int alignment;
size_t cluster_sectors;
size_t buf_sectors;
long num_coroutines;
{
int64_t src_cur_offset;
int ret, n, src_cur;
+ bool post_backing_zero = false;
convert_select_part(s, sector_num, &src_cur, &src_cur_offset);
assert(s->total_sectors > sector_num);
n = MIN(s->total_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
+ if (s->target_backing_sectors >= 0) {
+ if (sector_num >= s->target_backing_sectors) {
+ post_backing_zero = s->unallocated_blocks_are_zero;
+ } else if (sector_num + n > s->target_backing_sectors) {
+ /* Split requests around target_backing_sectors (because
+ * starting from there, zeros are handled differently) */
+ n = s->target_backing_sectors - sector_num;
+ }
+ }
+
if (s->sector_next_status <= sector_num) {
int64_t count = n * BDRV_SECTOR_SIZE;
n = DIV_ROUND_UP(count, BDRV_SECTOR_SIZE);
if (ret & BDRV_BLOCK_ZERO) {
- s->status = BLK_ZERO;
+ s->status = post_backing_zero ? BLK_BACKING_FILE : BLK_ZERO;
} else if (ret & BDRV_BLOCK_DATA) {
s->status = BLK_DATA;
} else {
* zeroed. */
if (!s->min_sparse ||
(!s->compressed &&
- is_allocated_sectors_min(buf, n, &n, s->min_sparse)) ||
+ is_allocated_sectors_min(buf, n, &n, s->min_sparse,
+ sector_num, s->alignment)) ||
(s->compressed &&
!buffer_is_zero(buf, n * BDRV_SECTOR_SIZE)))
{
return 0;
}
+static int coroutine_fn convert_co_copy_range(ImgConvertState *s, int64_t sector_num,
+ int nb_sectors)
+{
+ int n, ret;
+
+ while (nb_sectors > 0) {
+ BlockBackend *blk;
+ int src_cur;
+ int64_t bs_sectors, src_cur_offset;
+ int64_t offset;
+
+ convert_select_part(s, sector_num, &src_cur, &src_cur_offset);
+ offset = (sector_num - src_cur_offset) << BDRV_SECTOR_BITS;
+ blk = s->src[src_cur];
+ bs_sectors = s->src_sectors[src_cur];
+
+ n = MIN(nb_sectors, bs_sectors - (sector_num - src_cur_offset));
+
+ ret = blk_co_copy_range(blk, offset, s->target,
+ sector_num << BDRV_SECTOR_BITS,
+ n << BDRV_SECTOR_BITS, 0, 0);
+ if (ret < 0) {
+ return ret;
+ }
+
+ sector_num += n;
+ nb_sectors -= n;
+ }
+ return 0;
+}
+
static void coroutine_fn convert_co_do_copy(void *opaque)
{
ImgConvertState *s = opaque;
int n;
int64_t sector_num;
enum ImgConvertBlockStatus status;
+ bool copy_range;
qemu_co_mutex_lock(&s->lock);
if (s->ret != -EINPROGRESS || s->sector_num >= s->total_sectors) {
s->allocated_sectors, 0);
}
- if (status == BLK_DATA) {
+retry:
+ copy_range = s->copy_range && s->status == BLK_DATA;
+ if (status == BLK_DATA && !copy_range) {
ret = convert_co_read(s, sector_num, n, buf);
if (ret < 0) {
error_report("error while reading sector %" PRId64
}
if (s->ret == -EINPROGRESS) {
- ret = convert_co_write(s, sector_num, n, buf, status);
+ if (copy_range) {
+ ret = convert_co_copy_range(s, sector_num, n);
+ if (ret) {
+ s->copy_range = false;
+ goto retry;
+ }
+ } else {
+ ret = convert_co_write(s, sector_num, n, buf, status);
+ }
if (ret < 0) {
error_report("error while writing sector %" PRId64
": %s", sector_num, strerror(-ret));
ImgConvertState s = (ImgConvertState) {
/* Need at least 4k of zeros for sparse detection */
.min_sparse = 8,
+ .copy_range = true,
.buf_sectors = IO_BUF_SIZE / BDRV_SECTOR_SIZE,
.wr_in_order = true,
.num_coroutines = 8,
{"target-image-opts", no_argument, 0, OPTION_TARGET_IMAGE_OPTS},
{0, 0, 0, 0}
};
- c = getopt_long(argc, argv, ":hf:O:B:co:s:l:S:pt:T:qnm:WU",
+ c = getopt_long(argc, argv, ":hf:O:B:co:l:S:pt:T:qnm:WU",
long_options, NULL);
if (c == -1) {
break;
break;
case 'c':
s.compressed = true;
+ s.copy_range = false;
break;
case 'o':
if (!is_valid_option_list(optarg)) {
g_free(old_options);
}
break;
- case 's':
- snapshot_name = optarg;
- break;
case 'l':
if (strstart(optarg, SNAPSHOT_OPT_BASE, NULL)) {
sn_opts = qemu_opts_parse_noisily(&internal_snapshot_opts,
}
s.min_sparse = sval / BDRV_SECTOR_SIZE;
+ s.copy_range = false;
break;
}
case 'p':
goto fail_getopt;
}
- if (!s.wr_in_order && s.compressed) {
- error_report("Out of order write and compress are mutually exclusive");
- goto fail_getopt;
- }
-
if (tgt_image_opts && !skip_create) {
error_report("--target-image-opts requires use of -n flag");
goto fail_getopt;
out_bs->bl.pdiscard_alignment >>
BDRV_SECTOR_BITS)));
+ /* try to align the write requests to the destination to avoid unnecessary
+ * RMW cycles. */
+ s.alignment = MAX(pow2floor(s.min_sparse),
+ DIV_ROUND_UP(out_bs->bl.request_alignment,
+ BDRV_SECTOR_SIZE));
+ assert(is_power_of_2(s.alignment));
+
if (skip_create) {
int64_t output_sectors = blk_nb_sectors(s.target);
if (output_sectors < 0) {
}
}
+ if (s.target_has_backing) {
+ /* Errors are treated as "backing length unknown" (which means
+ * s.target_backing_sectors has to be negative, which it will
+ * be automatically). The backing file length is used only
+ * for optimizations, so such a case is not fatal. */
+ s.target_backing_sectors = bdrv_nb_sectors(out_bs->backing->bs);
+ } else {
+ s.target_backing_sectors = -1;
+ }
+
ret = bdrv_get_info(out_bs, &bdi);
if (ret < 0) {
if (s.compressed) {
} else {
s.compressed = s.compressed || bdi.needs_compressed_writes;
s.cluster_sectors = bdi.cluster_size / BDRV_SECTOR_SIZE;
+ s.unallocated_blocks_are_zero = bdi.unallocated_blocks_are_zero;
}
ret = convert_do_copy(&s);
int64_t n;
/* Probe up to 1 GiB at a time. */
- n = QEMU_ALIGN_DOWN(MIN(1 << 30, length - offset), BDRV_SECTOR_SIZE);
+ n = MIN(1 << 30, length - offset);
ret = get_block_status(bs, offset, n, &next);
if (ret < 0) {
}
if (out_baseimg[0]) {
+ const char *overlay_filename;
+ char *out_real_path;
+
options = qdict_new();
if (out_basefmt) {
qdict_put_str(options, "driver", out_basefmt);
qdict_put_bool(options, BDRV_OPT_FORCE_SHARE, true);
}
- blk_new_backing = blk_new_open(out_baseimg, NULL,
+ overlay_filename = bs->exact_filename[0] ? bs->exact_filename
+ : bs->filename;
+ out_real_path = g_malloc(PATH_MAX);
+
+ bdrv_get_full_backing_filename_from_filename(overlay_filename,
+ out_baseimg,
+ out_real_path,
+ PATH_MAX,
+ &local_err);
+ if (local_err) {
+ error_reportf_err(local_err,
+ "Could not resolve backing filename: ");
+ ret = -1;
+ g_free(out_real_path);
+ goto out;
+ }
+
+ blk_new_backing = blk_new_open(out_real_path, NULL,
options, src_flags, &local_err);
+ g_free(out_real_path);
if (!blk_new_backing) {
error_reportf_err(local_err,
"Could not open new backing file '%s': ",
qemu_progress_print(100.f * offset / total_work_size, 0);
}
+static int print_amend_option_help(const char *format)
+{
+ BlockDriver *drv;
+
+ /* Find driver and parse its options */
+ drv = bdrv_find_format(format);
+ if (!drv) {
+ error_report("Unknown file format '%s'", format);
+ return 1;
+ }
+
+ if (!drv->bdrv_amend_options) {
+ error_report("Format driver '%s' does not support option amendment",
+ format);
+ return 1;
+ }
+
+ /* Every driver supporting amendment must have create_opts */
+ assert(drv->create_opts);
+
+ printf("Creation options for '%s':\n", format);
+ qemu_opts_print_help(drv->create_opts);
+ printf("\nNote that not all of these options may be amendable.\n");
+ return 0;
+}
+
static int img_amend(int argc, char **argv)
{
Error *err = NULL;
if (fmt && has_help_option(options)) {
/* If a format is explicitly specified (and possibly no filename is
* given), print option help here */
- ret = print_block_option_help(filename, fmt);
+ ret = print_amend_option_help(fmt);
goto out;
}
if (has_help_option(options)) {
/* If the format was auto-detected, print option help here */
- ret = print_block_option_help(filename, fmt);
+ ret = print_amend_option_help(fmt);
goto out;
}
- if (!bs->drv->create_opts) {
- error_report("Format driver '%s' does not support any options to amend",
+ if (!bs->drv->bdrv_amend_options) {
+ error_report("Format driver '%s' does not support option amendment",
fmt);
ret = -1;
goto out;
}
+ /* Every driver supporting amendment must have create_opts */
+ assert(bs->drv->create_opts);
+
create_opts = qemu_opts_append(create_opts, bs->drv->create_opts);
opts = qemu_opts_create(create_opts, NULL, 0, &error_abort);
qemu_opts_do_parse(opts, options, NULL, &err);
/* In case the driver does not call amend_status_cb() */
qemu_progress_print(0.f, 0);
- ret = bdrv_amend_options(bs, opts, &amend_status_cb, NULL);
+ ret = bdrv_amend_options(bs, opts, &amend_status_cb, NULL, &err);
qemu_progress_print(100.f, 0);
if (ret < 0) {
- error_report("Error while amending options: %s", strerror(-ret));
+ error_report_err(err);
goto out;
}