return 0;
}
-static BlockBackend *img_open_new_file(const char *filename,
- QemuOpts *create_opts,
- const char *fmt, int flags,
- bool writethrough, bool quiet,
- bool force_share)
-{
- QDict *options = NULL;
-
- options = qdict_new();
- qemu_opt_foreach(create_opts, img_add_key_secrets, options, &error_abort);
-
- return img_open_file(filename, options, fmt, flags, writethrough, quiet,
- force_share);
-}
-
static BlockBackend *img_open(bool image_opts,
const char *filename,
*
* 'pnum' is set to the number of sectors (including and immediately following
* the first one) that are known to be in the same allocated/unallocated state.
+ * The function will try to align the end offset to alignment boundaries so
+ * that the request will at least end aligned and consequtive requests will
+ * also start at an aligned offset.
*/
-static int is_allocated_sectors(const uint8_t *buf, int n, int *pnum)
+static int is_allocated_sectors(const uint8_t *buf, int n, int *pnum,
+ int64_t sector_num, int alignment)
{
bool is_zero;
- int i;
+ int i, tail;
if (n <= 0) {
*pnum = 0;
break;
}
}
+
+ tail = (sector_num + i) & (alignment - 1);
+ if (tail) {
+ if (is_zero && i <= tail) {
+ /* treat unallocated areas which only consist
+ * of a small tail as allocated. */
+ is_zero = false;
+ }
+ if (!is_zero) {
+ /* align up end offset of allocated areas. */
+ i += alignment - tail;
+ i = MIN(i, n);
+ } else {
+ /* align down end offset of zero areas. */
+ i -= tail;
+ }
+ }
*pnum = i;
return !is_zero;
}
* breaking up write requests for only small sparse areas.
*/
static int is_allocated_sectors_min(const uint8_t *buf, int n, int *pnum,
- int min)
+ int min, int64_t sector_num, int alignment)
{
int ret;
int num_checked, num_used;
min = n;
}
- ret = is_allocated_sectors(buf, n, pnum);
+ ret = is_allocated_sectors(buf, n, pnum, sector_num, alignment);
if (!ret) {
return ret;
}
num_used = *pnum;
buf += BDRV_SECTOR_SIZE * *pnum;
n -= *pnum;
+ sector_num += *pnum;
num_checked = num_used;
while (n > 0) {
- ret = is_allocated_sectors(buf, n, pnum);
+ ret = is_allocated_sectors(buf, n, pnum, sector_num, alignment);
buf += BDRV_SECTOR_SIZE * *pnum;
n -= *pnum;
+ sector_num += *pnum;
num_checked += *pnum;
if (ret) {
num_used = num_checked;
bool wr_in_order;
bool copy_range;
int min_sparse;
+ int alignment;
size_t cluster_sectors;
size_t buf_sectors;
long num_coroutines;
* zeroed. */
if (!s->min_sparse ||
(!s->compressed &&
- is_allocated_sectors_min(buf, n, &n, s->min_sparse)) ||
+ is_allocated_sectors_min(buf, n, &n, s->min_sparse,
+ sector_num, s->alignment)) ||
(s->compressed &&
!buffer_is_zero(buf, n * BDRV_SECTOR_SIZE)))
{
ret = blk_co_copy_range(blk, offset, s->target,
sector_num << BDRV_SECTOR_BITS,
- n << BDRV_SECTOR_BITS, 0);
+ n << BDRV_SECTOR_BITS, 0, 0);
if (ret < 0) {
return ret;
}
return s->ret;
}
+#define MAX_BUF_SECTORS 32768
+
static int img_convert(int argc, char **argv)
{
int c, bs_i, flags, src_flags = 0;
BlockDriverState *out_bs;
QemuOpts *opts = NULL, *sn_opts = NULL;
QemuOptsList *create_opts = NULL;
+ QDict *open_opts = NULL;
char *options = NULL;
Error *local_err = NULL;
bool writethrough, src_writethrough, quiet = false, image_opts = false,
skip_create = false, progress = false, tgt_image_opts = false;
int64_t ret = -EINVAL;
bool force_share = false;
+ bool explict_min_sparse = false;
ImgConvertState s = (ImgConvertState) {
/* Need at least 4k of zeros for sparse detection */
.min_sparse = 8,
- .copy_range = true,
+ .copy_range = false,
.buf_sectors = IO_BUF_SIZE / BDRV_SECTOR_SIZE,
.wr_in_order = true,
.num_coroutines = 8,
{"target-image-opts", no_argument, 0, OPTION_TARGET_IMAGE_OPTS},
{0, 0, 0, 0}
};
- c = getopt_long(argc, argv, ":hf:O:B:co:l:S:pt:T:qnm:WU",
+ c = getopt_long(argc, argv, ":hf:O:B:Cco:l:S:pt:T:qnm:WU",
long_options, NULL);
if (c == -1) {
break;
case 'B':
out_baseimg = optarg;
break;
+ case 'C':
+ s.copy_range = true;
+ break;
case 'c':
s.compressed = true;
- s.copy_range = false;
break;
case 'o':
if (!is_valid_option_list(optarg)) {
int64_t sval;
sval = cvtnum(optarg);
- if (sval < 0) {
- error_report("Invalid minimum zero buffer size for sparse output specified");
+ if (sval < 0 || sval & (BDRV_SECTOR_SIZE - 1) ||
+ sval / BDRV_SECTOR_SIZE > MAX_BUF_SECTORS) {
+ error_report("Invalid buffer size for sparse output specified. "
+ "Valid sizes are multiples of %llu up to %llu. Select "
+ "0 to disable sparse detection (fully allocates output).",
+ BDRV_SECTOR_SIZE, MAX_BUF_SECTORS * BDRV_SECTOR_SIZE);
goto fail_getopt;
}
s.min_sparse = sval / BDRV_SECTOR_SIZE;
- s.copy_range = false;
+ explict_min_sparse = true;
break;
}
case 'p':
goto fail_getopt;
}
+ if (s.compressed && s.copy_range) {
+ error_report("Cannot enable copy offloading when -c is used");
+ goto fail_getopt;
+ }
+
+ if (explict_min_sparse && s.copy_range) {
+ error_report("Cannot enable copy offloading when -S is used");
+ goto fail_getopt;
+ }
+
if (tgt_image_opts && !skip_create) {
error_report("--target-image-opts requires use of -n flag");
goto fail_getopt;
}
}
+ /*
+ * The later open call will need any decryption secrets, and
+ * bdrv_create() will purge "opts", so extract them now before
+ * they are lost.
+ */
+ if (!skip_create) {
+ open_opts = qdict_new();
+ qemu_opt_foreach(opts, img_add_key_secrets, open_opts, &error_abort);
+ }
+
if (!skip_create) {
/* Create the new image */
ret = bdrv_create(drv, out_filename, opts, &local_err);
* That has to wait for bdrv_create to be improved
* to allow filenames in option syntax
*/
- s.target = img_open_new_file(out_filename, opts, out_fmt,
- flags, writethrough, quiet, false);
+ s.target = img_open_file(out_filename, open_opts, out_fmt,
+ flags, writethrough, quiet, false);
+ open_opts = NULL; /* blk_new_open will have freed it */
}
if (!s.target) {
ret = -1;
}
/* increase bufsectors from the default 4096 (2M) if opt_transfer
- * or discard_alignment of the out_bs is greater. Limit to 32768 (16MB)
- * as maximum. */
- s.buf_sectors = MIN(32768,
+ * or discard_alignment of the out_bs is greater. Limit to
+ * MAX_BUF_SECTORS as maximum which is currently 32768 (16MB). */
+ s.buf_sectors = MIN(MAX_BUF_SECTORS,
MAX(s.buf_sectors,
MAX(out_bs->bl.opt_transfer >> BDRV_SECTOR_BITS,
out_bs->bl.pdiscard_alignment >>
BDRV_SECTOR_BITS)));
+ /* try to align the write requests to the destination to avoid unnecessary
+ * RMW cycles. */
+ s.alignment = MAX(pow2floor(s.min_sparse),
+ DIV_ROUND_UP(out_bs->bl.request_alignment,
+ BDRV_SECTOR_SIZE));
+ assert(is_power_of_2(s.alignment));
+
if (skip_create) {
int64_t output_sectors = blk_nb_sectors(s.target);
if (output_sectors < 0) {
qemu_opts_del(opts);
qemu_opts_free(create_opts);
qemu_opts_del(sn_opts);
+ qobject_unref(open_opts);
blk_unref(s.target);
if (s.src) {
for (bs_i = 0; bs_i < s.src_num; bs_i++) {