#include "qemu/option.h"
#include "qemu/error-report.h"
#include "qemu/log.h"
+#include "qemu/units.h"
#include "qom/object_interfaces.h"
#include "sysemu/sysemu.h"
#include "sysemu/block-backend.h"
{
va_list ap;
- error_printf("qemu-img: ");
-
va_start(ap, fmt);
- error_vprintf(fmt, ap);
+ error_vreport(fmt, ap);
va_end(ap);
- error_printf("\nTry 'qemu-img --help' for more information\n");
+ error_printf("Try 'qemu-img --help' for more information\n");
exit(EXIT_FAILURE);
}
" 'skip=N' skip N bs-sized blocks at the start of input\n";
printf("%s\nSupported formats:", help_msg);
- bdrv_iterate_format(format_print, NULL);
+ bdrv_iterate_format(format_print, NULL, false);
printf("\n\n" QEMU_HELP_BOTTOM "\n");
exit(EXIT_SUCCESS);
}
return 1;
}
if (!proto_drv->create_opts) {
- error_report("Protocal driver '%s' does not support image creation",
+ error_report("Protocol driver '%s' does not support image creation",
proto_drv->format_name);
+ qemu_opts_free(create_opts);
return 1;
}
create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
}
- printf("Supported options:\n");
- qemu_opts_print_help(create_opts);
+ if (filename) {
+ printf("Supported options:\n");
+ } else {
+ printf("Supported %s options:\n", fmt);
+ }
+ qemu_opts_print_help(create_opts, false);
qemu_opts_free(create_opts);
+
+ if (!filename) {
+ printf("\n"
+ "The protocol level may support further options.\n"
+ "Specify the target filename to include those options.\n");
+ }
+
return 0;
}
return 0;
}
-static BlockBackend *img_open_new_file(const char *filename,
- QemuOpts *create_opts,
- const char *fmt, int flags,
- bool writethrough, bool quiet,
- bool force_share)
-{
- QDict *options = NULL;
-
- options = qdict_new();
- qemu_opt_foreach(create_opts, img_add_key_secrets, options, &error_abort);
-
- return img_open_file(filename, options, fmt, flags, writethrough, quiet,
- force_share);
-}
-
static BlockBackend *img_open(bool image_opts,
const char *filename,
if (qemu_opts_foreach(&qemu_object_opts,
user_creatable_add_opts_foreach,
- NULL, NULL)) {
+ NULL, &error_fatal)) {
goto fail;
}
if (qemu_opts_foreach(&qemu_object_opts,
user_creatable_add_opts_foreach,
- NULL, NULL)) {
+ NULL, &error_fatal)) {
return 1;
}
if (qemu_opts_foreach(&qemu_object_opts,
user_creatable_add_opts_foreach,
- NULL, NULL)) {
+ NULL, &error_fatal)) {
return 1;
}
}
job = block_job_get("commit");
+ assert(job);
run_block_job(job, &local_err);
if (local_err) {
goto unref_backing;
*
* 'pnum' is set to the number of sectors (including and immediately following
* the first one) that are known to be in the same allocated/unallocated state.
+ * The function will try to align the end offset to alignment boundaries so
+ * that the request will at least end aligned and consequtive requests will
+ * also start at an aligned offset.
*/
-static int is_allocated_sectors(const uint8_t *buf, int n, int *pnum)
+static int is_allocated_sectors(const uint8_t *buf, int n, int *pnum,
+ int64_t sector_num, int alignment)
{
bool is_zero;
- int i;
+ int i, tail;
if (n <= 0) {
*pnum = 0;
break;
}
}
+
+ tail = (sector_num + i) & (alignment - 1);
+ if (tail) {
+ if (is_zero && i <= tail) {
+ /* treat unallocated areas which only consist
+ * of a small tail as allocated. */
+ is_zero = false;
+ }
+ if (!is_zero) {
+ /* align up end offset of allocated areas. */
+ i += alignment - tail;
+ i = MIN(i, n);
+ } else {
+ /* align down end offset of zero areas. */
+ i -= tail;
+ }
+ }
*pnum = i;
return !is_zero;
}
* breaking up write requests for only small sparse areas.
*/
static int is_allocated_sectors_min(const uint8_t *buf, int n, int *pnum,
- int min)
+ int min, int64_t sector_num, int alignment)
{
int ret;
int num_checked, num_used;
min = n;
}
- ret = is_allocated_sectors(buf, n, pnum);
+ ret = is_allocated_sectors(buf, n, pnum, sector_num, alignment);
if (!ret) {
return ret;
}
num_used = *pnum;
buf += BDRV_SECTOR_SIZE * *pnum;
n -= *pnum;
+ sector_num += *pnum;
num_checked = num_used;
while (n > 0) {
- ret = is_allocated_sectors(buf, n, pnum);
+ ret = is_allocated_sectors(buf, n, pnum, sector_num, alignment);
buf += BDRV_SECTOR_SIZE * *pnum;
n -= *pnum;
+ sector_num += *pnum;
num_checked += *pnum;
if (ret) {
num_used = num_checked;
return res;
}
-#define IO_BUF_SIZE (2 * 1024 * 1024)
+#define IO_BUF_SIZE (2 * MiB)
/*
* Check if passed sectors are empty (not allocated or contain only 0 bytes)
if (qemu_opts_foreach(&qemu_object_opts,
user_creatable_add_opts_foreach,
- NULL, NULL)) {
+ NULL, &error_fatal)) {
ret = 2;
goto out4;
}
bool wr_in_order;
bool copy_range;
int min_sparse;
+ int alignment;
size_t cluster_sectors;
size_t buf_sectors;
long num_coroutines;
count, &count, NULL, NULL);
}
if (ret < 0) {
+ error_report("error while reading block status of sector %" PRId64
+ ": %s", sector_num, strerror(-ret));
return ret;
}
n = DIV_ROUND_UP(count, BDRV_SECTOR_SIZE);
int nb_sectors, uint8_t *buf)
{
int n, ret;
- QEMUIOVector qiov;
- struct iovec iov;
assert(nb_sectors <= s->buf_sectors);
while (nb_sectors > 0) {
bs_sectors = s->src_sectors[src_cur];
n = MIN(nb_sectors, bs_sectors - (sector_num - src_cur_offset));
- iov.iov_base = buf;
- iov.iov_len = n << BDRV_SECTOR_BITS;
- qemu_iovec_init_external(&qiov, &iov, 1);
- ret = blk_co_preadv(
+ ret = blk_co_pread(
blk, (sector_num - src_cur_offset) << BDRV_SECTOR_BITS,
- n << BDRV_SECTOR_BITS, &qiov, 0);
+ n << BDRV_SECTOR_BITS, buf, 0);
if (ret < 0) {
return ret;
}
enum ImgConvertBlockStatus status)
{
int ret;
- QEMUIOVector qiov;
- struct iovec iov;
while (nb_sectors > 0) {
int n = nb_sectors;
* zeroed. */
if (!s->min_sparse ||
(!s->compressed &&
- is_allocated_sectors_min(buf, n, &n, s->min_sparse)) ||
+ is_allocated_sectors_min(buf, n, &n, s->min_sparse,
+ sector_num, s->alignment)) ||
(s->compressed &&
!buffer_is_zero(buf, n * BDRV_SECTOR_SIZE)))
{
- iov.iov_base = buf;
- iov.iov_len = n << BDRV_SECTOR_BITS;
- qemu_iovec_init_external(&qiov, &iov, 1);
-
- ret = blk_co_pwritev(s->target, sector_num << BDRV_SECTOR_BITS,
- n << BDRV_SECTOR_BITS, &qiov, flags);
+ ret = blk_co_pwrite(s->target, sector_num << BDRV_SECTOR_BITS,
+ n << BDRV_SECTOR_BITS, buf, flags);
if (ret < 0) {
return ret;
}
}
ret = blk_co_pwrite_zeroes(s->target,
sector_num << BDRV_SECTOR_BITS,
- n << BDRV_SECTOR_BITS, 0);
+ n << BDRV_SECTOR_BITS,
+ BDRV_REQ_MAY_UNMAP);
if (ret < 0) {
return ret;
}
ret = blk_co_copy_range(blk, offset, s->target,
sector_num << BDRV_SECTOR_BITS,
- n << BDRV_SECTOR_BITS, 0);
+ n << BDRV_SECTOR_BITS, 0, 0);
if (ret < 0) {
return ret;
}
if (!s->has_zero_init && !s->target_has_backing &&
bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)))
{
- ret = blk_make_zero(s->target, BDRV_REQ_MAY_UNMAP);
+ ret = blk_make_zero(s->target, BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK);
if (ret == 0) {
s->has_zero_init = true;
}
return s->ret;
}
+#define MAX_BUF_SECTORS 32768
+
static int img_convert(int argc, char **argv)
{
int c, bs_i, flags, src_flags = 0;
BlockDriverState *out_bs;
QemuOpts *opts = NULL, *sn_opts = NULL;
QemuOptsList *create_opts = NULL;
+ QDict *open_opts = NULL;
char *options = NULL;
Error *local_err = NULL;
bool writethrough, src_writethrough, quiet = false, image_opts = false,
skip_create = false, progress = false, tgt_image_opts = false;
int64_t ret = -EINVAL;
bool force_share = false;
+ bool explict_min_sparse = false;
ImgConvertState s = (ImgConvertState) {
/* Need at least 4k of zeros for sparse detection */
.min_sparse = 8,
- .copy_range = true,
+ .copy_range = false,
.buf_sectors = IO_BUF_SIZE / BDRV_SECTOR_SIZE,
.wr_in_order = true,
.num_coroutines = 8,
{"target-image-opts", no_argument, 0, OPTION_TARGET_IMAGE_OPTS},
{0, 0, 0, 0}
};
- c = getopt_long(argc, argv, ":hf:O:B:co:l:S:pt:T:qnm:WU",
+ c = getopt_long(argc, argv, ":hf:O:B:Cco:l:S:pt:T:qnm:WU",
long_options, NULL);
if (c == -1) {
break;
case 'B':
out_baseimg = optarg;
break;
+ case 'C':
+ s.copy_range = true;
+ break;
case 'c':
s.compressed = true;
- s.copy_range = false;
break;
case 'o':
if (!is_valid_option_list(optarg)) {
int64_t sval;
sval = cvtnum(optarg);
- if (sval < 0) {
- error_report("Invalid minimum zero buffer size for sparse output specified");
+ if (sval < 0 || sval & (BDRV_SECTOR_SIZE - 1) ||
+ sval / BDRV_SECTOR_SIZE > MAX_BUF_SECTORS) {
+ error_report("Invalid buffer size for sparse output specified. "
+ "Valid sizes are multiples of %llu up to %llu. Select "
+ "0 to disable sparse detection (fully allocates output).",
+ BDRV_SECTOR_SIZE, MAX_BUF_SECTORS * BDRV_SECTOR_SIZE);
goto fail_getopt;
}
s.min_sparse = sval / BDRV_SECTOR_SIZE;
- s.copy_range = false;
+ explict_min_sparse = true;
break;
}
case 'p':
if (qemu_opts_foreach(&qemu_object_opts,
user_creatable_add_opts_foreach,
- NULL, NULL)) {
+ NULL, &error_fatal)) {
+ goto fail_getopt;
+ }
+
+ if (s.compressed && s.copy_range) {
+ error_report("Cannot enable copy offloading when -c is used");
+ goto fail_getopt;
+ }
+
+ if (explict_min_sparse && s.copy_range) {
+ error_report("Cannot enable copy offloading when -S is used");
goto fail_getopt;
}
}
}
+ /*
+ * The later open call will need any decryption secrets, and
+ * bdrv_create() will purge "opts", so extract them now before
+ * they are lost.
+ */
+ if (!skip_create) {
+ open_opts = qdict_new();
+ qemu_opt_foreach(opts, img_add_key_secrets, open_opts, &error_abort);
+ }
+
if (!skip_create) {
/* Create the new image */
ret = bdrv_create(drv, out_filename, opts, &local_err);
* That has to wait for bdrv_create to be improved
* to allow filenames in option syntax
*/
- s.target = img_open_new_file(out_filename, opts, out_fmt,
- flags, writethrough, quiet, false);
+ s.target = img_open_file(out_filename, open_opts, out_fmt,
+ flags, writethrough, quiet, false);
+ open_opts = NULL; /* blk_new_open will have freed it */
}
if (!s.target) {
ret = -1;
}
/* increase bufsectors from the default 4096 (2M) if opt_transfer
- * or discard_alignment of the out_bs is greater. Limit to 32768 (16MB)
- * as maximum. */
- s.buf_sectors = MIN(32768,
+ * or discard_alignment of the out_bs is greater. Limit to
+ * MAX_BUF_SECTORS as maximum which is currently 32768 (16MB). */
+ s.buf_sectors = MIN(MAX_BUF_SECTORS,
MAX(s.buf_sectors,
MAX(out_bs->bl.opt_transfer >> BDRV_SECTOR_BITS,
out_bs->bl.pdiscard_alignment >>
BDRV_SECTOR_BITS)));
+ /* try to align the write requests to the destination to avoid unnecessary
+ * RMW cycles. */
+ s.alignment = MAX(pow2floor(s.min_sparse),
+ DIV_ROUND_UP(out_bs->bl.request_alignment,
+ BDRV_SECTOR_SIZE));
+ assert(is_power_of_2(s.alignment));
+
if (skip_create) {
int64_t output_sectors = blk_nb_sectors(s.target);
if (output_sectors < 0) {
qemu_opts_del(opts);
qemu_opts_free(create_opts);
qemu_opts_del(sn_opts);
+ qobject_unref(open_opts);
blk_unref(s.target);
if (s.src) {
for (bs_i = 0; bs_i < s.src_num; bs_i++) {
if (nb_sns <= 0)
return;
printf("Snapshot list:\n");
- bdrv_snapshot_dump(fprintf, stdout, NULL);
+ bdrv_snapshot_dump(NULL);
printf("\n");
for(i = 0; i < nb_sns; i++) {
sn = &sn_tab[i];
- bdrv_snapshot_dump(fprintf, stdout, sn);
+ bdrv_snapshot_dump(sn);
printf("\n");
}
g_free(sn_tab);
}
delim = true;
- bdrv_image_info_dump(fprintf, stdout, elem->value);
+ bdrv_image_info_dump(elem->value);
}
}
if (qemu_opts_foreach(&qemu_object_opts,
user_creatable_add_opts_foreach,
- NULL, NULL)) {
+ NULL, &error_fatal)) {
return 1;
}
return 0;
}
-static void dump_map_entry(OutputFormat output_format, MapEntry *e,
- MapEntry *next)
+static int dump_map_entry(OutputFormat output_format, MapEntry *e,
+ MapEntry *next)
{
switch (output_format) {
case OFORMAT_HUMAN:
if (e->data && !e->has_offset) {
error_report("File contains external, encrypted or compressed clusters.");
- exit(1);
+ return -1;
}
if (e->data && !e->zero) {
printf("%#-16"PRIx64"%#-16"PRIx64"%#-16"PRIx64"%s\n",
}
break;
}
+ return 0;
}
static int get_block_status(BlockDriverState *bs, int64_t offset,
BlockDriverState *file;
bool has_offset;
int64_t map;
+ char *filename = NULL;
/* As an optimization, we could cache the current range of unallocated
* clusters in each file of the chain, and avoid querying the same
has_offset = !!(ret & BDRV_BLOCK_OFFSET_VALID);
+ if (file && has_offset) {
+ bdrv_refresh_filename(file);
+ filename = file->filename;
+ }
+
*e = (MapEntry) {
.start = offset,
.length = bytes,
.offset = map,
.has_offset = has_offset,
.depth = depth,
- .has_filename = file && has_offset,
- .filename = file && has_offset ? file->filename : NULL,
+ .has_filename = filename,
+ .filename = filename,
};
return 0;
if (qemu_opts_foreach(&qemu_object_opts,
user_creatable_add_opts_foreach,
- NULL, NULL)) {
+ NULL, &error_fatal)) {
return 1;
}
int64_t n;
/* Probe up to 1 GiB at a time. */
- n = MIN(1 << 30, length - offset);
+ n = MIN(1 * GiB, length - offset);
ret = get_block_status(bs, offset, n, &next);
if (ret < 0) {
}
if (curr.length > 0) {
- dump_map_entry(output_format, &curr, &next);
+ ret = dump_map_entry(output_format, &curr, &next);
+ if (ret < 0) {
+ goto out;
+ }
}
curr = next;
}
- dump_map_entry(output_format, &curr, NULL);
+ ret = dump_map_entry(output_format, &curr, NULL);
out:
blk_unref(blk);
if (qemu_opts_foreach(&qemu_object_opts,
user_creatable_add_opts_foreach,
- NULL, NULL)) {
+ NULL, &error_fatal)) {
return 1;
}
break;
case SNAPSHOT_DELETE:
- bdrv_snapshot_delete_by_id_or_name(bs, snapshot_name, &err);
- if (err) {
- error_reportf_err(err, "Could not delete snapshot '%s': ",
- snapshot_name);
+ ret = bdrv_snapshot_find(bs, &sn, snapshot_name);
+ if (ret < 0) {
+ error_report("Could not delete snapshot '%s': snapshot not "
+ "found", snapshot_name);
ret = 1;
+ } else {
+ ret = bdrv_snapshot_delete(bs, sn.id_str, sn.name, &err);
+ if (ret < 0) {
+ error_reportf_err(err, "Could not delete snapshot '%s': ",
+ snapshot_name);
+ ret = 1;
+ }
}
break;
}
if (qemu_opts_foreach(&qemu_object_opts,
user_creatable_add_opts_foreach,
- NULL, NULL)) {
+ NULL, &error_fatal)) {
return 1;
}
char backing_name[PATH_MAX];
QDict *options = NULL;
- if (bs->backing_format[0] != '\0') {
- options = qdict_new();
- qdict_put_str(options, "driver", bs->backing_format);
- }
-
- if (force_share) {
- if (!options) {
+ if (bs->backing) {
+ if (bs->backing_format[0] != '\0') {
options = qdict_new();
+ qdict_put_str(options, "driver", bs->backing_format);
}
- qdict_put_bool(options, BDRV_OPT_FORCE_SHARE, true);
- }
- bdrv_get_backing_filename(bs, backing_name, sizeof(backing_name));
- blk_old_backing = blk_new_open(backing_name, NULL,
- options, src_flags, &local_err);
- if (!blk_old_backing) {
- error_reportf_err(local_err,
- "Could not open old backing file '%s': ",
- backing_name);
- ret = -1;
- goto out;
+
+ if (force_share) {
+ if (!options) {
+ options = qdict_new();
+ }
+ qdict_put_bool(options, BDRV_OPT_FORCE_SHARE, true);
+ }
+ bdrv_get_backing_filename(bs, backing_name, sizeof(backing_name));
+ blk_old_backing = blk_new_open(backing_name, NULL,
+ options, src_flags, &local_err);
+ if (!blk_old_backing) {
+ error_reportf_err(local_err,
+ "Could not open old backing file '%s': ",
+ backing_name);
+ ret = -1;
+ goto out;
+ }
+ } else {
+ blk_old_backing = NULL;
}
if (out_baseimg[0]) {
qdict_put_bool(options, BDRV_OPT_FORCE_SHARE, true);
}
+ bdrv_refresh_filename(bs);
overlay_filename = bs->exact_filename[0] ? bs->exact_filename
: bs->filename;
- out_real_path = g_malloc(PATH_MAX);
-
- bdrv_get_full_backing_filename_from_filename(overlay_filename,
- out_baseimg,
- out_real_path,
- PATH_MAX,
- &local_err);
+ out_real_path =
+ bdrv_get_full_backing_filename_from_filename(overlay_filename,
+ out_baseimg,
+ &local_err);
if (local_err) {
error_reportf_err(local_err,
"Could not resolve backing filename: ");
ret = -1;
- g_free(out_real_path);
goto out;
}
*/
if (!unsafe) {
int64_t size;
- int64_t old_backing_size;
+ int64_t old_backing_size = 0;
int64_t new_backing_size = 0;
uint64_t offset;
int64_t n;
ret = -1;
goto out;
}
- old_backing_size = blk_getlength(blk_old_backing);
- if (old_backing_size < 0) {
- char backing_name[PATH_MAX];
+ if (blk_old_backing) {
+ old_backing_size = blk_getlength(blk_old_backing);
+ if (old_backing_size < 0) {
+ char backing_name[PATH_MAX];
- bdrv_get_backing_filename(bs, backing_name, sizeof(backing_name));
- error_report("Could not get size of '%s': %s",
- backing_name, strerror(-old_backing_size));
- ret = -1;
- goto out;
+ bdrv_get_backing_filename(bs, backing_name,
+ sizeof(backing_name));
+ error_report("Could not get size of '%s': %s",
+ backing_name, strerror(-old_backing_size));
+ ret = -1;
+ goto out;
+ }
}
if (blk_new_backing) {
new_backing_size = blk_getlength(blk_new_backing);
}
for (offset = 0; offset < size; offset += n) {
+ bool buf_old_is_zero = false;
+
/* How many bytes can we handle with the next read? */
n = MIN(IO_BUF_SIZE, size - offset);
*/
if (offset >= old_backing_size) {
memset(buf_old, 0, n);
+ buf_old_is_zero = true;
} else {
if (offset + n > old_backing_size) {
n = old_backing_size - offset;
if (compare_buffers(buf_old + written, buf_new + written,
n - written, &pnum))
{
- ret = blk_pwrite(blk, offset + written,
- buf_old + written, pnum, 0);
+ if (buf_old_is_zero) {
+ ret = blk_pwrite_zeroes(blk, offset + written, pnum, 0);
+ } else {
+ ret = blk_pwrite(blk, offset + written,
+ buf_old + written, pnum, 0);
+ }
if (ret < 0) {
error_report("Error while writing to COW image: %s",
strerror(-ret));
if (qemu_opts_foreach(&qemu_object_opts,
user_creatable_add_opts_foreach,
- NULL, NULL)) {
+ NULL, &error_fatal)) {
return 1;
}
assert(drv->create_opts);
printf("Creation options for '%s':\n", format);
- qemu_opts_print_help(drv->create_opts);
+ qemu_opts_print_help(drv->create_opts, false);
printf("\nNote that not all of these options may be amendable.\n");
return 0;
}
if (qemu_opts_foreach(&qemu_object_opts,
user_creatable_add_opts_foreach,
- NULL, NULL)) {
+ NULL, &error_fatal)) {
ret = -1;
goto out_no_progress;
}
if (qemu_opts_foreach(&qemu_object_opts,
user_creatable_add_opts_foreach,
- NULL, NULL)) {
+ NULL, &error_fatal)) {
ret = -1;
goto out;
}
if (qemu_opts_foreach(&qemu_object_opts,
user_creatable_add_opts_foreach,
- NULL, NULL)) {
+ NULL, &error_fatal)) {
goto out;
}
signal(SIGPIPE, SIG_IGN);
#endif
+ error_init(argv[0]);
module_call_init(MODULE_INIT_TRACE);
- error_set_progname(argv[0]);
qemu_init_exec_dir(argv[0]);
if (qemu_init_main_loop(&local_error)) {
return 0;
}
argv += optind;
- optind = 0;
+ qemu_reset_optind();
if (!trace_init_backends()) {
exit(1);