* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
+
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qemu-common.h"
#include "qemu/error-report.h"
#include "block/block_int.h"
+#include "block/qdict.h"
#include "sysemu/block-backend.h"
#include "qemu/module.h"
+#include "qemu/option.h"
#include "qemu/bswap.h"
#include <zlib.h>
-#include "qapi/qmp/qerror.h"
-#include "crypto/cipher.h"
-#include "migration/migration.h"
+#include "qapi/qmp/qdict.h"
+#include "qapi/qmp/qstring.h"
+#include "qapi/qobject-input-visitor.h"
+#include "qapi/qapi-visit-block-core.h"
+#include "crypto/block.h"
+#include "migration/blocker.h"
+#include "crypto.h"
/**************************************************************/
/* QEMU COW block driver with compression and encryption support */
uint8_t *cluster_cache;
uint8_t *cluster_data;
uint64_t cluster_cache_offset;
- QCryptoCipher *cipher; /* NULL if no key yet */
+ QCryptoBlock *crypto; /* Disk encryption format driver */
uint32_t crypt_method_header;
CoMutex lock;
Error *migration_blocker;
} BDRVQcowState;
+static QemuOptsList qcow_create_opts;
+
static int decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset);
static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename)
return 0;
}
+static QemuOptsList qcow_runtime_opts = {
+ .name = "qcow",
+ .head = QTAILQ_HEAD_INITIALIZER(qcow_runtime_opts.head),
+ .desc = {
+ BLOCK_CRYPTO_OPT_DEF_QCOW_KEY_SECRET("encrypt."),
+ { /* end of list */ }
+ },
+};
+
static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
int ret;
QCowHeader header;
Error *local_err = NULL;
+ QCryptoBlockOpenOptions *crypto_opts = NULL;
+ unsigned int cflags = 0;
+ QDict *encryptopts = NULL;
+ const char *encryptfmt;
+
+ qdict_extract_subqdict(options, &encryptopts, "encrypt.");
+ encryptfmt = qdict_get_try_str(encryptopts, "format");
bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
false, errp);
if (!bs->file) {
- return -EINVAL;
+ ret = -EINVAL;
+ goto fail;
}
ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
goto fail;
}
- if (header.crypt_method > QCOW_CRYPT_AES) {
- error_setg(errp, "invalid encryption method in qcow header");
- ret = -EINVAL;
- goto fail;
- }
- if (!qcrypto_cipher_supports(QCRYPTO_CIPHER_ALG_AES_128,
- QCRYPTO_CIPHER_MODE_CBC)) {
- error_setg(errp, "AES cipher not available");
- ret = -EINVAL;
- goto fail;
- }
s->crypt_method_header = header.crypt_method;
if (s->crypt_method_header) {
if (bdrv_uses_whitelist() &&
ret = -ENOSYS;
goto fail;
}
+ if (s->crypt_method_header == QCOW_CRYPT_AES) {
+ if (encryptfmt && !g_str_equal(encryptfmt, "aes")) {
+ error_setg(errp,
+ "Header reported 'aes' encryption format but "
+ "options specify '%s'", encryptfmt);
+ ret = -EINVAL;
+ goto fail;
+ }
+ qdict_del(encryptopts, "format");
+ crypto_opts = block_crypto_open_opts_init(
+ Q_CRYPTO_BLOCK_FORMAT_QCOW, encryptopts, errp);
+ if (!crypto_opts) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ if (flags & BDRV_O_NO_IO) {
+ cflags |= QCRYPTO_BLOCK_OPEN_NO_IO;
+ }
+ s->crypto = qcrypto_block_open(crypto_opts, "encrypt.",
+ NULL, NULL, cflags, errp);
+ if (!s->crypto) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ } else {
+ error_setg(errp, "invalid encryption method in qcow header");
+ ret = -EINVAL;
+ goto fail;
+ }
bs->encrypted = true;
+ } else {
+ if (encryptfmt) {
+ error_setg(errp, "No encryption in image header, but options "
+ "specified format '%s'", encryptfmt);
+ ret = -EINVAL;
+ goto fail;
+ }
}
s->cluster_bits = header.cluster_bits;
s->cluster_size = 1 << s->cluster_bits;
goto fail;
}
+ qobject_unref(encryptopts);
+ qapi_free_QCryptoBlockOpenOptions(crypto_opts);
qemu_co_mutex_init(&s->lock);
return 0;
qemu_vfree(s->l2_cache);
g_free(s->cluster_cache);
g_free(s->cluster_data);
+ qcrypto_block_free(s->crypto);
+ qobject_unref(encryptopts);
+ qapi_free_QCryptoBlockOpenOptions(crypto_opts);
return ret;
}
return 0;
}
-static int qcow_set_key(BlockDriverState *bs, const char *key)
-{
- BDRVQcowState *s = bs->opaque;
- uint8_t keybuf[16];
- int len, i;
- Error *err;
-
- memset(keybuf, 0, 16);
- len = strlen(key);
- if (len > 16)
- len = 16;
- /* XXX: we could compress the chars to 7 bits to increase
- entropy */
- for(i = 0;i < len;i++) {
- keybuf[i] = key[i];
- }
- assert(bs->encrypted);
-
- qcrypto_cipher_free(s->cipher);
- s->cipher = qcrypto_cipher_new(
- QCRYPTO_CIPHER_ALG_AES_128,
- QCRYPTO_CIPHER_MODE_CBC,
- keybuf, G_N_ELEMENTS(keybuf),
- &err);
-
- if (!s->cipher) {
- /* XXX would be nice if errors in this method could
- * be properly propagate to the caller. Would need
- * the bdrv_set_key() API signature to be fixed. */
- error_free(err);
- return -1;
- }
- return 0;
-}
-
-/* The crypt function is compatible with the linux cryptoloop
- algorithm for < 4 GB images. NOTE: out_buf == in_buf is
- supported */
-static int encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
- uint8_t *out_buf, const uint8_t *in_buf,
- int nb_sectors, bool enc, Error **errp)
-{
- union {
- uint64_t ll[2];
- uint8_t b[16];
- } ivec;
- int i;
- int ret;
-
- for(i = 0; i < nb_sectors; i++) {
- ivec.ll[0] = cpu_to_le64(sector_num);
- ivec.ll[1] = 0;
- if (qcrypto_cipher_setiv(s->cipher,
- ivec.b, G_N_ELEMENTS(ivec.b),
- errp) < 0) {
- return -1;
- }
- if (enc) {
- ret = qcrypto_cipher_encrypt(s->cipher,
- in_buf,
- out_buf,
- 512,
- errp);
- } else {
- ret = qcrypto_cipher_decrypt(s->cipher,
- in_buf,
- out_buf,
- 512,
- errp);
- }
- if (ret < 0) {
- return -1;
- }
- sector_num++;
- in_buf += 512;
- out_buf += 512;
- }
- return 0;
-}
/* 'allocate' is:
*
* 'compressed_size'. 'compressed_size' must be > 0 and <
* cluster_size
*
- * return 0 if not allocated.
+ * return 0 if not allocated, 1 if *result is assigned, and negative
+ * errno on failure.
*/
-static uint64_t get_cluster_offset(BlockDriverState *bs,
- uint64_t offset, int allocate,
- int compressed_size,
- int n_start, int n_end)
+static int get_cluster_offset(BlockDriverState *bs,
+ uint64_t offset, int allocate,
+ int compressed_size,
+ int n_start, int n_end, uint64_t *result)
{
BDRVQcowState *s = bs->opaque;
- int min_index, i, j, l1_index, l2_index;
- uint64_t l2_offset, *l2_table, cluster_offset, tmp;
+ int min_index, i, j, l1_index, l2_index, ret;
+ int64_t l2_offset;
+ uint64_t *l2_table, cluster_offset, tmp;
uint32_t min_count;
int new_l2_table;
+ *result = 0;
l1_index = offset >> (s->l2_bits + s->cluster_bits);
l2_offset = s->l1_table[l1_index];
new_l2_table = 0;
return 0;
/* allocate a new l2 entry */
l2_offset = bdrv_getlength(bs->file->bs);
+ if (l2_offset < 0) {
+ return l2_offset;
+ }
/* round to cluster size */
- l2_offset = (l2_offset + s->cluster_size - 1) & ~(s->cluster_size - 1);
+ l2_offset = QEMU_ALIGN_UP(l2_offset, s->cluster_size);
/* update the L1 entry */
s->l1_table[l1_index] = l2_offset;
tmp = cpu_to_be64(l2_offset);
- if (bdrv_pwrite_sync(bs->file,
- s->l1_table_offset + l1_index * sizeof(tmp),
- &tmp, sizeof(tmp)) < 0)
- return 0;
+ BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
+ ret = bdrv_pwrite_sync(bs->file,
+ s->l1_table_offset + l1_index * sizeof(tmp),
+ &tmp, sizeof(tmp));
+ if (ret < 0) {
+ return ret;
+ }
new_l2_table = 1;
}
for(i = 0; i < L2_CACHE_SIZE; i++) {
}
}
l2_table = s->l2_cache + (min_index << s->l2_bits);
+ BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD);
if (new_l2_table) {
memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
- if (bdrv_pwrite_sync(bs->file, l2_offset, l2_table,
- s->l2_size * sizeof(uint64_t)) < 0)
- return 0;
+ ret = bdrv_pwrite_sync(bs->file, l2_offset, l2_table,
+ s->l2_size * sizeof(uint64_t));
+ if (ret < 0) {
+ return ret;
+ }
} else {
- if (bdrv_pread(bs->file, l2_offset, l2_table,
- s->l2_size * sizeof(uint64_t)) !=
- s->l2_size * sizeof(uint64_t))
- return 0;
+ ret = bdrv_pread(bs->file, l2_offset, l2_table,
+ s->l2_size * sizeof(uint64_t));
+ if (ret < 0) {
+ return ret;
+ }
}
s->l2_cache_offsets[min_index] = l2_offset;
s->l2_cache_counts[min_index] = 1;
((cluster_offset & QCOW_OFLAG_COMPRESSED) && allocate == 1)) {
if (!allocate)
return 0;
+ BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
/* allocate a new cluster */
if ((cluster_offset & QCOW_OFLAG_COMPRESSED) &&
(n_end - n_start) < s->cluster_sectors) {
/* if the cluster is already compressed, we must
decompress it in the case it is not completely
overwritten */
- if (decompress_cluster(bs, cluster_offset) < 0)
- return 0;
+ if (decompress_cluster(bs, cluster_offset) < 0) {
+ return -EIO;
+ }
cluster_offset = bdrv_getlength(bs->file->bs);
- cluster_offset = (cluster_offset + s->cluster_size - 1) &
- ~(s->cluster_size - 1);
+ if ((int64_t) cluster_offset < 0) {
+ return cluster_offset;
+ }
+ cluster_offset = QEMU_ALIGN_UP(cluster_offset, s->cluster_size);
/* write the cluster content */
- if (bdrv_pwrite(bs->file, cluster_offset, s->cluster_cache,
- s->cluster_size) !=
- s->cluster_size)
- return -1;
+ BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
+ ret = bdrv_pwrite(bs->file, cluster_offset, s->cluster_cache,
+ s->cluster_size);
+ if (ret < 0) {
+ return ret;
+ }
} else {
cluster_offset = bdrv_getlength(bs->file->bs);
+ if ((int64_t) cluster_offset < 0) {
+ return cluster_offset;
+ }
if (allocate == 1) {
/* round to cluster size */
- cluster_offset = (cluster_offset + s->cluster_size - 1) &
- ~(s->cluster_size - 1);
- bdrv_truncate(bs->file, cluster_offset + s->cluster_size, NULL);
+ cluster_offset = QEMU_ALIGN_UP(cluster_offset, s->cluster_size);
+ if (cluster_offset + s->cluster_size > INT64_MAX) {
+ return -E2BIG;
+ }
+ ret = bdrv_truncate(bs->file, cluster_offset + s->cluster_size,
+ PREALLOC_MODE_OFF, NULL);
+ if (ret < 0) {
+ return ret;
+ }
/* if encrypted, we must initialize the cluster
content which won't be written */
if (bs->encrypted &&
(n_end - n_start) < s->cluster_sectors) {
uint64_t start_sect;
- assert(s->cipher);
+ assert(s->crypto);
start_sect = (offset & ~(s->cluster_size - 1)) >> 9;
- memset(s->cluster_data + 512, 0x00, 512);
for(i = 0; i < s->cluster_sectors; i++) {
if (i < n_start || i >= n_end) {
- Error *err = NULL;
- if (encrypt_sectors(s, start_sect + i,
- s->cluster_data,
- s->cluster_data + 512, 1,
- true, &err) < 0) {
- error_free(err);
- errno = EIO;
- return -1;
+ memset(s->cluster_data, 0x00, 512);
+ if (qcrypto_block_encrypt(s->crypto,
+ (start_sect + i) *
+ BDRV_SECTOR_SIZE,
+ s->cluster_data,
+ BDRV_SECTOR_SIZE,
+ NULL) < 0) {
+ return -EIO;
+ }
+ BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
+ ret = bdrv_pwrite(bs->file,
+ cluster_offset + i * 512,
+ s->cluster_data, 512);
+ if (ret < 0) {
+ return ret;
}
- if (bdrv_pwrite(bs->file,
- cluster_offset + i * 512,
- s->cluster_data, 512) != 512)
- return -1;
}
}
}
/* update L2 table */
tmp = cpu_to_be64(cluster_offset);
l2_table[l2_index] = tmp;
- if (bdrv_pwrite_sync(bs->file, l2_offset + l2_index * sizeof(tmp),
- &tmp, sizeof(tmp)) < 0)
- return 0;
+ if (allocate == 2) {
+ BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED);
+ } else {
+ BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE);
+ }
+ ret = bdrv_pwrite_sync(bs->file, l2_offset + l2_index * sizeof(tmp),
+ &tmp, sizeof(tmp));
+ if (ret < 0) {
+ return ret;
+ }
}
- return cluster_offset;
+ *result = cluster_offset;
+ return 1;
}
-static int64_t coroutine_fn qcow_co_get_block_status(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors, int *pnum, BlockDriverState **file)
+static int coroutine_fn qcow_co_block_status(BlockDriverState *bs,
+ bool want_zero,
+ int64_t offset, int64_t bytes,
+ int64_t *pnum, int64_t *map,
+ BlockDriverState **file)
{
BDRVQcowState *s = bs->opaque;
- int index_in_cluster, n;
+ int index_in_cluster, ret;
+ int64_t n;
uint64_t cluster_offset;
qemu_co_mutex_lock(&s->lock);
- cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0);
+ ret = get_cluster_offset(bs, offset, 0, 0, 0, 0, &cluster_offset);
qemu_co_mutex_unlock(&s->lock);
- index_in_cluster = sector_num & (s->cluster_sectors - 1);
- n = s->cluster_sectors - index_in_cluster;
- if (n > nb_sectors)
- n = nb_sectors;
+ if (ret < 0) {
+ return ret;
+ }
+ index_in_cluster = offset & (s->cluster_size - 1);
+ n = s->cluster_size - index_in_cluster;
+ if (n > bytes) {
+ n = bytes;
+ }
*pnum = n;
if (!cluster_offset) {
return 0;
}
- if ((cluster_offset & QCOW_OFLAG_COMPRESSED) || s->cipher) {
+ if ((cluster_offset & QCOW_OFLAG_COMPRESSED) || s->crypto) {
return BDRV_BLOCK_DATA;
}
- cluster_offset |= (index_in_cluster << BDRV_SECTOR_BITS);
+ *map = cluster_offset | index_in_cluster;
*file = bs->file->bs;
- return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | cluster_offset;
+ return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
}
static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
if (s->cluster_cache_offset != coffset) {
csize = cluster_offset >> (63 - s->cluster_bits);
csize &= (s->cluster_size - 1);
+ BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
ret = bdrv_pread(bs->file, coffset, s->cluster_data, csize);
if (ret != csize)
return -1;
QEMUIOVector hd_qiov;
uint8_t *buf;
void *orig_buf;
- Error *err = NULL;
if (qiov->niov > 1) {
buf = orig_buf = qemu_try_blockalign(bs, qiov->size);
while (nb_sectors != 0) {
/* prepare next request */
- cluster_offset = get_cluster_offset(bs, sector_num << 9,
- 0, 0, 0, 0);
+ ret = get_cluster_offset(bs, sector_num << 9,
+ 0, 0, 0, 0, &cluster_offset);
+ if (ret < 0) {
+ break;
+ }
index_in_cluster = sector_num & (s->cluster_sectors - 1);
n = s->cluster_sectors - index_in_cluster;
if (n > nb_sectors) {
hd_iov.iov_len = n * 512;
qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
qemu_co_mutex_unlock(&s->lock);
+ /* qcow2 emits this on bs->file instead of bs->backing */
+ BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
ret = bdrv_co_readv(bs->backing, sector_num, n, &hd_qiov);
qemu_co_mutex_lock(&s->lock);
if (ret < 0) {
- goto fail;
+ break;
}
} else {
/* Note: in this case, no need to wait */
} else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
/* add AIO support for compressed blocks ? */
if (decompress_cluster(bs, cluster_offset) < 0) {
- goto fail;
+ ret = -EIO;
+ break;
}
memcpy(buf,
s->cluster_cache + index_in_cluster * 512, 512 * n);
} else {
if ((cluster_offset & 511) != 0) {
- goto fail;
+ ret = -EIO;
+ break;
}
hd_iov.iov_base = (void *)buf;
hd_iov.iov_len = n * 512;
qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
qemu_co_mutex_unlock(&s->lock);
+ BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
ret = bdrv_co_readv(bs->file,
(cluster_offset >> 9) + index_in_cluster,
n, &hd_qiov);
break;
}
if (bs->encrypted) {
- assert(s->cipher);
- if (encrypt_sectors(s, sector_num, buf, buf,
- n, false, &err) < 0) {
- goto fail;
+ assert(s->crypto);
+ if (qcrypto_block_decrypt(s->crypto,
+ sector_num * BDRV_SECTOR_SIZE, buf,
+ n * BDRV_SECTOR_SIZE, NULL) < 0) {
+ ret = -EIO;
+ break;
}
}
}
buf += n * 512;
}
-done:
qemu_co_mutex_unlock(&s->lock);
if (qiov->niov > 1) {
}
return ret;
-
-fail:
- error_free(err);
- ret = -EIO;
- goto done;
}
static coroutine_fn int qcow_co_writev(BlockDriverState *bs, int64_t sector_num,
- int nb_sectors, QEMUIOVector *qiov)
+ int nb_sectors, QEMUIOVector *qiov,
+ int flags)
{
BDRVQcowState *s = bs->opaque;
int index_in_cluster;
uint64_t cluster_offset;
- const uint8_t *src_buf;
int ret = 0, n;
- uint8_t *cluster_data = NULL;
struct iovec hd_iov;
QEMUIOVector hd_qiov;
uint8_t *buf;
void *orig_buf;
+ assert(!flags);
s->cluster_cache_offset = -1; /* disable compressed cache */
- if (qiov->niov > 1) {
+ /* We must always copy the iov when encrypting, so we
+ * don't modify the original data buffer during encryption */
+ if (bs->encrypted || qiov->niov > 1) {
buf = orig_buf = qemu_try_blockalign(bs, qiov->size);
if (buf == NULL) {
return -ENOMEM;
if (n > nb_sectors) {
n = nb_sectors;
}
- cluster_offset = get_cluster_offset(bs, sector_num << 9, 1, 0,
- index_in_cluster,
- index_in_cluster + n);
+ ret = get_cluster_offset(bs, sector_num << 9, 1, 0,
+ index_in_cluster,
+ index_in_cluster + n, &cluster_offset);
+ if (ret < 0) {
+ break;
+ }
if (!cluster_offset || (cluster_offset & 511) != 0) {
ret = -EIO;
break;
}
if (bs->encrypted) {
- Error *err = NULL;
- assert(s->cipher);
- if (!cluster_data) {
- cluster_data = g_malloc0(s->cluster_size);
- }
- if (encrypt_sectors(s, sector_num, cluster_data, buf,
- n, true, &err) < 0) {
- error_free(err);
+ assert(s->crypto);
+ if (qcrypto_block_encrypt(s->crypto, sector_num * BDRV_SECTOR_SIZE,
+ buf, n * BDRV_SECTOR_SIZE, NULL) < 0) {
ret = -EIO;
break;
}
- src_buf = cluster_data;
- } else {
- src_buf = buf;
}
- hd_iov.iov_base = (void *)src_buf;
+ hd_iov.iov_base = (void *)buf;
hd_iov.iov_len = n * 512;
qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
qemu_co_mutex_unlock(&s->lock);
+ BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
ret = bdrv_co_writev(bs->file,
(cluster_offset >> 9) + index_in_cluster,
n, &hd_qiov);
}
qemu_co_mutex_unlock(&s->lock);
- if (qiov->niov > 1) {
- qemu_vfree(orig_buf);
- }
- g_free(cluster_data);
+ qemu_vfree(orig_buf);
return ret;
}
{
BDRVQcowState *s = bs->opaque;
- qcrypto_cipher_free(s->cipher);
- s->cipher = NULL;
+ qcrypto_block_free(s->crypto);
+ s->crypto = NULL;
g_free(s->l1_table);
qemu_vfree(s->l2_cache);
g_free(s->cluster_cache);
error_free(s->migration_blocker);
}
-static int qcow_create(const char *filename, QemuOpts *opts, Error **errp)
+static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts,
+ Error **errp)
{
+ BlockdevCreateOptionsQcow *qcow_opts;
int header_size, backing_filename_len, l1_size, shift, i;
QCowHeader header;
uint8_t *tmp;
int64_t total_size = 0;
- char *backing_file = NULL;
- int flags = 0;
- Error *local_err = NULL;
int ret;
+ BlockDriverState *bs;
BlockBackend *qcow_blk;
+ QCryptoBlock *crypto = NULL;
+
+ assert(opts->driver == BLOCKDEV_DRIVER_QCOW);
+ qcow_opts = &opts->u.qcow;
- /* Read out options */
- total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
- BDRV_SECTOR_SIZE);
- backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
- if (qemu_opt_get_bool_del(opts, BLOCK_OPT_ENCRYPT, false)) {
- flags |= BLOCK_FLAG_ENCRYPT;
+ /* Sanity checks */
+ total_size = qcow_opts->size;
+ if (total_size == 0) {
+ error_setg(errp, "Image size is too small, cannot be zero length");
+ return -EINVAL;
}
- ret = bdrv_create_file(filename, opts, &local_err);
- if (ret < 0) {
- error_propagate(errp, local_err);
- goto cleanup;
+ if (qcow_opts->has_encrypt &&
+ qcow_opts->encrypt->format != Q_CRYPTO_BLOCK_FORMAT_QCOW)
+ {
+ error_setg(errp, "Unsupported encryption format");
+ return -EINVAL;
}
- qcow_blk = blk_new_open(filename, NULL, NULL,
- BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
- &local_err);
- if (qcow_blk == NULL) {
- error_propagate(errp, local_err);
- ret = -EIO;
- goto cleanup;
+ /* Create BlockBackend to write to the image */
+ bs = bdrv_open_blockdev_ref(qcow_opts->file, errp);
+ if (bs == NULL) {
+ return -EIO;
}
+ qcow_blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
+ ret = blk_insert_bs(qcow_blk, bs, errp);
+ if (ret < 0) {
+ goto exit;
+ }
blk_set_allow_write_beyond_eof(qcow_blk, true);
- ret = blk_truncate(qcow_blk, 0, errp);
+ /* Create image format */
+ ret = blk_truncate(qcow_blk, 0, PREALLOC_MODE_OFF, errp);
if (ret < 0) {
goto exit;
}
header.size = cpu_to_be64(total_size);
header_size = sizeof(header);
backing_filename_len = 0;
- if (backing_file) {
- if (strcmp(backing_file, "fat:")) {
+ if (qcow_opts->has_backing_file) {
+ if (strcmp(qcow_opts->backing_file, "fat:")) {
header.backing_file_offset = cpu_to_be64(header_size);
- backing_filename_len = strlen(backing_file);
+ backing_filename_len = strlen(qcow_opts->backing_file);
header.backing_file_size = cpu_to_be32(backing_filename_len);
header_size += backing_filename_len;
} else {
/* special backing file for vvfat */
- backing_file = NULL;
+ qcow_opts->has_backing_file = false;
}
header.cluster_bits = 9; /* 512 byte cluster to avoid copying
unmodified sectors */
l1_size = (total_size + (1LL << shift) - 1) >> shift;
header.l1_table_offset = cpu_to_be64(header_size);
- if (flags & BLOCK_FLAG_ENCRYPT) {
+
+ if (qcow_opts->has_encrypt) {
header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES);
+
+ crypto = qcrypto_block_create(qcow_opts->encrypt, "encrypt.",
+ NULL, NULL, NULL, errp);
+ if (!crypto) {
+ ret = -EINVAL;
+ goto exit;
+ }
} else {
header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
}
goto exit;
}
- if (backing_file) {
+ if (qcow_opts->has_backing_file) {
ret = blk_pwrite(qcow_blk, sizeof(header),
- backing_file, backing_filename_len, 0);
+ qcow_opts->backing_file, backing_filename_len, 0);
if (ret != backing_filename_len) {
goto exit;
}
ret = 0;
exit:
blk_unref(qcow_blk);
-cleanup:
- g_free(backing_file);
+ qcrypto_block_free(crypto);
+ return ret;
+}
+
+static int coroutine_fn qcow_co_create_opts(const char *filename,
+ QemuOpts *opts, Error **errp)
+{
+ BlockdevCreateOptions *create_options = NULL;
+ BlockDriverState *bs = NULL;
+ QDict *qdict;
+ QObject *qobj;
+ Visitor *v;
+ const char *val;
+ Error *local_err = NULL;
+ int ret;
+
+ static const QDictRenames opt_renames[] = {
+ { BLOCK_OPT_BACKING_FILE, "backing-file" },
+ { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT },
+ { NULL, NULL },
+ };
+
+ /* Parse options and convert legacy syntax */
+ qdict = qemu_opts_to_qdict_filtered(opts, NULL, &qcow_create_opts, true);
+
+ val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT);
+ if (val && !strcmp(val, "on")) {
+ qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow");
+ } else if (val && !strcmp(val, "off")) {
+ qdict_del(qdict, BLOCK_OPT_ENCRYPT);
+ }
+
+ val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT);
+ if (val && !strcmp(val, "aes")) {
+ qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow");
+ }
+
+ if (!qdict_rename_keys(qdict, opt_renames, errp)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* Create and open the file (protocol layer) */
+ ret = bdrv_create_file(filename, opts, &local_err);
+ if (ret < 0) {
+ error_propagate(errp, local_err);
+ goto fail;
+ }
+
+ bs = bdrv_open(filename, NULL, NULL,
+ BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
+ if (bs == NULL) {
+ ret = -EIO;
+ goto fail;
+ }
+
+ /* Now get the QAPI type BlockdevCreateOptions */
+ qdict_put_str(qdict, "driver", "qcow");
+ qdict_put_str(qdict, "file", bs->node_name);
+
+ qobj = qdict_crumple_for_keyval_qiv(qdict, errp);
+ if (!qobj) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ v = qobject_input_visitor_new_keyval(qobj);
+ qobject_unref(qobj);
+ visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err);
+ visit_free(v);
+
+ if (local_err) {
+ error_propagate(errp, local_err);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* Silently round up size */
+ assert(create_options->driver == BLOCKDEV_DRIVER_QCOW);
+ create_options->u.qcow.size =
+ ROUND_UP(create_options->u.qcow.size, BDRV_SECTOR_SIZE);
+
+ /* Create the qcow image (format layer) */
+ ret = qcow_co_create(create_options, errp);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ ret = 0;
+fail:
+ qobject_unref(qdict);
+ bdrv_unref(bs);
+ qapi_free_BlockdevCreateOptions(create_options);
return ret;
}
if (bdrv_pwrite_sync(bs->file, s->l1_table_offset, s->l1_table,
l1_length) < 0)
return -1;
- ret = bdrv_truncate(bs->file, s->l1_table_offset + l1_length, NULL);
+ ret = bdrv_truncate(bs->file, s->l1_table_offset + l1_length,
+ PREALLOC_MODE_OFF, NULL);
if (ret < 0)
return ret;
if (ret != Z_STREAM_END || out_len >= s->cluster_size) {
/* could not compress: write normal cluster */
ret = qcow_co_writev(bs, offset >> BDRV_SECTOR_BITS,
- bytes >> BDRV_SECTOR_BITS, qiov);
+ bytes >> BDRV_SECTOR_BITS, qiov, 0);
if (ret < 0) {
goto fail;
}
goto success;
}
qemu_co_mutex_lock(&s->lock);
- cluster_offset = get_cluster_offset(bs, offset, 2, out_len, 0, 0);
+ ret = get_cluster_offset(bs, offset, 2, out_len, 0, 0, &cluster_offset);
qemu_co_mutex_unlock(&s->lock);
+ if (ret < 0) {
+ goto fail;
+ }
if (cluster_offset == 0) {
ret = -EIO;
goto fail;
.iov_len = out_len,
};
qemu_iovec_init_external(&hd_qiov, &iov, 1);
+ BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED);
ret = bdrv_co_pwritev(bs->file, cluster_offset, out_len, &hd_qiov, 0);
if (ret < 0) {
goto fail;
{
.name = BLOCK_OPT_ENCRYPT,
.type = QEMU_OPT_BOOL,
- .help = "Encrypt the image",
- .def_value_str = "off"
+ .help = "Encrypt the image with format 'aes'. (Deprecated "
+ "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)",
+ },
+ {
+ .name = BLOCK_OPT_ENCRYPT_FORMAT,
+ .type = QEMU_OPT_STRING,
+ .help = "Encrypt the image, format choices: 'aes'",
},
+ BLOCK_CRYPTO_OPT_DEF_QCOW_KEY_SECRET("encrypt."),
{ /* end of list */ }
}
};
.bdrv_close = qcow_close,
.bdrv_child_perm = bdrv_format_default_perms,
.bdrv_reopen_prepare = qcow_reopen_prepare,
- .bdrv_create = qcow_create,
+ .bdrv_co_create = qcow_co_create,
+ .bdrv_co_create_opts = qcow_co_create_opts,
.bdrv_has_zero_init = bdrv_has_zero_init_1,
.supports_backing = true,
.bdrv_co_readv = qcow_co_readv,
.bdrv_co_writev = qcow_co_writev,
- .bdrv_co_get_block_status = qcow_co_get_block_status,
+ .bdrv_co_block_status = qcow_co_block_status,
- .bdrv_set_key = qcow_set_key,
.bdrv_make_empty = qcow_make_empty,
.bdrv_co_pwritev_compressed = qcow_co_pwritev_compressed,
.bdrv_get_info = qcow_get_info,