#ifdef __linux__
#include <sys/ioctl.h>
#include <sys/param.h>
+#include <sys/syscall.h>
#include <linux/cdrom.h>
#include <linux/fd.h>
#include <linux/fs.h>
bool page_cache_inconsistent:1;
bool has_fallocate;
bool needs_alignment;
+ bool check_cache_dropped;
PRManager *pr_mgr;
} BDRVRawState;
typedef struct BDRVRawReopenState {
int fd;
int open_flags;
+ bool check_cache_dropped;
} BDRVRawReopenState;
static int fd_open(BlockDriverState *bs);
#define aio_ioctl_cmd aio_nbytes /* for QEMU_AIO_IOCTL */
off_t aio_offset;
int aio_type;
+ union {
+ struct {
+ int aio_fd2;
+ off_t aio_offset2;
+ };
+ struct {
+ PreallocMode prealloc;
+ Error **errp;
+ };
+ };
} RawPosixAIOData;
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
.type = QEMU_OPT_STRING,
.help = "id of persistent reservation manager object (default: none)",
},
+ {
+ .name = "x-check-cache-dropped",
+ .type = QEMU_OPT_BOOL,
+ .help = "check that page cache was dropped on live migration (default: off)"
+ },
{ /* end of list */ }
},
};
static int raw_open_common(BlockDriverState *bs, QDict *options,
- int bdrv_flags, int open_flags, Error **errp)
+ int bdrv_flags, int open_flags,
+ bool device, Error **errp)
{
BDRVRawState *s = bs->opaque;
QemuOpts *opts;
}
}
+ s->check_cache_dropped = qemu_opt_get_bool(opts, "x-check-cache-dropped",
+ false);
+
s->open_flags = open_flags;
raw_parse_flags(bdrv_flags, &s->open_flags);
#ifdef CONFIG_LINUX_AIO
/* Currently Linux does AIO only for files opened with O_DIRECT */
- if (s->use_linux_aio && !(s->open_flags & O_DIRECT)) {
- error_setg(errp, "aio=native was specified, but it requires "
- "cache.direct=on, which was not specified.");
- ret = -EINVAL;
- goto fail;
+ if (s->use_linux_aio) {
+ if (!(s->open_flags & O_DIRECT)) {
+ error_setg(errp, "aio=native was specified, but it requires "
+ "cache.direct=on, which was not specified.");
+ ret = -EINVAL;
+ goto fail;
+ }
+ if (!aio_setup_linux_aio(bdrv_get_aio_context(bs), errp)) {
+ error_prepend(errp, "Unable to use native AIO: ");
+ goto fail;
+ }
}
#else
if (s->use_linux_aio) {
error_setg_errno(errp, errno, "Could not stat file");
goto fail;
}
- if (S_ISREG(st.st_mode)) {
- s->discard_zeroes = true;
- s->has_fallocate = true;
+
+ if (!device) {
+ if (S_ISBLK(st.st_mode)) {
+ warn_report("Opening a block device as a file using the '%s' "
+ "driver is deprecated", bs->drv->format_name);
+ } else if (S_ISCHR(st.st_mode)) {
+ warn_report("Opening a character device as a file using the '%s' "
+ "driver is deprecated", bs->drv->format_name);
+ } else if (!S_ISREG(st.st_mode)) {
+ error_setg(errp, "A regular file was expected by the '%s' driver, "
+ "but something else was given", bs->drv->format_name);
+ ret = -EINVAL;
+ goto fail;
+ } else {
+ s->discard_zeroes = true;
+ s->has_fallocate = true;
+ }
+ } else {
+ if (!(S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) {
+ error_setg(errp, "'%s' driver expects either "
+ "a character or block device", bs->drv->format_name);
+ ret = -EINVAL;
+ goto fail;
+ }
}
+
if (S_ISBLK(st.st_mode)) {
#ifdef BLKDISCARDZEROES
unsigned int arg;
}
#endif
- bs->supported_zero_flags = s->discard_zeroes ? BDRV_REQ_MAY_UNMAP : 0;
+ bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP;
ret = 0;
fail:
if (filename && (bdrv_flags & BDRV_O_TEMPORARY)) {
BDRVRawState *s = bs->opaque;
s->type = FTYPE_FILE;
- return raw_open_common(bs, options, flags, 0, errp);
+ return raw_open_common(bs, options, flags, 0, false, errp);
}
typedef enum {
* file; if @unlock == true, also unlock the unneeded bytes.
* @shared_perm_lock_bits is the mask of all permissions that are NOT shared.
*/
-static int raw_apply_lock_bytes(BDRVRawState *s,
+static int raw_apply_lock_bytes(int fd,
uint64_t perm_lock_bits,
uint64_t shared_perm_lock_bits,
bool unlock, Error **errp)
PERM_FOREACH(i) {
int off = RAW_LOCK_PERM_BASE + i;
if (perm_lock_bits & (1ULL << i)) {
- ret = qemu_lock_fd(s->lock_fd, off, 1, false);
+ ret = qemu_lock_fd(fd, off, 1, false);
if (ret) {
error_setg(errp, "Failed to lock byte %d", off);
return ret;
}
} else if (unlock) {
- ret = qemu_unlock_fd(s->lock_fd, off, 1);
+ ret = qemu_unlock_fd(fd, off, 1);
if (ret) {
error_setg(errp, "Failed to unlock byte %d", off);
return ret;
PERM_FOREACH(i) {
int off = RAW_LOCK_SHARED_BASE + i;
if (shared_perm_lock_bits & (1ULL << i)) {
- ret = qemu_lock_fd(s->lock_fd, off, 1, false);
+ ret = qemu_lock_fd(fd, off, 1, false);
if (ret) {
error_setg(errp, "Failed to lock byte %d", off);
return ret;
}
} else if (unlock) {
- ret = qemu_unlock_fd(s->lock_fd, off, 1);
+ ret = qemu_unlock_fd(fd, off, 1);
if (ret) {
error_setg(errp, "Failed to unlock byte %d", off);
return ret;
}
/* Check "unshared" bytes implied by @perm and ~@shared_perm in the file. */
-static int raw_check_lock_bytes(BDRVRawState *s,
- uint64_t perm, uint64_t shared_perm,
+static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm,
Error **errp)
{
int ret;
int off = RAW_LOCK_SHARED_BASE + i;
uint64_t p = 1ULL << i;
if (perm & p) {
- ret = qemu_lock_fd_test(s->lock_fd, off, 1, true);
+ ret = qemu_lock_fd_test(fd, off, 1, true);
if (ret) {
char *perm_name = bdrv_perm_names(p);
error_setg(errp,
"Failed to get \"%s\" lock",
perm_name);
g_free(perm_name);
- error_append_hint(errp,
- "Is another process using the image?\n");
return ret;
}
}
int off = RAW_LOCK_PERM_BASE + i;
uint64_t p = 1ULL << i;
if (!(shared_perm & p)) {
- ret = qemu_lock_fd_test(s->lock_fd, off, 1, true);
+ ret = qemu_lock_fd_test(fd, off, 1, true);
if (ret) {
char *perm_name = bdrv_perm_names(p);
error_setg(errp,
"Failed to get shared \"%s\" lock",
perm_name);
g_free(perm_name);
- error_append_hint(errp,
- "Is another process using the image?\n");
return ret;
}
}
switch (op) {
case RAW_PL_PREPARE:
- ret = raw_apply_lock_bytes(s, s->perm | new_perm,
+ ret = raw_apply_lock_bytes(s->lock_fd, s->perm | new_perm,
~s->shared_perm | ~new_shared,
false, errp);
if (!ret) {
- ret = raw_check_lock_bytes(s, new_perm, new_shared, errp);
+ ret = raw_check_lock_bytes(s->lock_fd, new_perm, new_shared, errp);
if (!ret) {
return 0;
}
+ error_append_hint(errp,
+ "Is another process using the image [%s]?\n",
+ bs->filename);
}
op = RAW_PL_ABORT;
/* fall through to unlock bytes. */
case RAW_PL_ABORT:
- raw_apply_lock_bytes(s, s->perm, ~s->shared_perm, true, &local_err);
+ raw_apply_lock_bytes(s->lock_fd, s->perm, ~s->shared_perm,
+ true, &local_err);
if (local_err) {
/* Theoretically the above call only unlocks bytes and it cannot
* fail. Something weird happened, report it.
}
break;
case RAW_PL_COMMIT:
- raw_apply_lock_bytes(s, new_perm, ~new_shared, true, &local_err);
+ raw_apply_lock_bytes(s->lock_fd, new_perm, ~new_shared,
+ true, &local_err);
if (local_err) {
/* Theoretically the above call only unlocks bytes and it cannot
* fail. Something weird happened, report it.
{
BDRVRawState *s;
BDRVRawReopenState *rs;
+ QemuOpts *opts;
int ret = 0;
Error *local_err = NULL;
state->opaque = g_new0(BDRVRawReopenState, 1);
rs = state->opaque;
+ rs->fd = -1;
+
+ /* Handle options changes */
+ opts = qemu_opts_create(&raw_runtime_opts, NULL, 0, &error_abort);
+ qemu_opts_absorb_qdict(opts, state->options, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ rs->check_cache_dropped =
+ qemu_opt_get_bool_del(opts, "x-check-cache-dropped", false);
+
+ /* This driver's reopen function doesn't currently allow changing
+ * other options, so let's put them back in the original QDict and
+ * bdrv_reopen_prepare() will detect changes and complain. */
+ qemu_opts_to_qdict(opts, state->options);
if (s->type == FTYPE_CD) {
rs->open_flags |= O_NONBLOCK;
raw_parse_flags(state->flags, &rs->open_flags);
- rs->fd = -1;
-
int fcntl_flags = O_APPEND | O_NONBLOCK;
#ifdef O_NOATIME
fcntl_flags |= O_NOATIME;
}
}
+out:
+ qemu_opts_del(opts);
return ret;
}
BDRVRawReopenState *rs = state->opaque;
BDRVRawState *s = state->bs->opaque;
+ s->check_cache_dropped = rs->check_cache_dropped;
s->open_flags = rs->open_flags;
qemu_close(s->fd);
return -ENOTSUP;
}
+static ssize_t handle_aiocb_write_zeroes_unmap(RawPosixAIOData *aiocb)
+{
+ BDRVRawState *s G_GNUC_UNUSED = aiocb->bs->opaque;
+ int ret;
+
+ /* First try to write zeros and unmap at the same time */
+
+#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
+ ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ aiocb->aio_offset, aiocb->aio_nbytes);
+ if (ret != -ENOTSUP) {
+ return ret;
+ }
+#endif
+
+#ifdef CONFIG_XFS
+ if (s->is_xfs) {
+ /* xfs_discard() guarantees that the discarded area reads as all-zero
+ * afterwards, so we can use it here. */
+ return xfs_discard(s, aiocb->aio_offset, aiocb->aio_nbytes);
+ }
+#endif
+
+ /* If we couldn't manage to unmap while guaranteed that the area reads as
+ * all-zero afterwards, just write zeroes without unmapping */
+ ret = handle_aiocb_write_zeroes(aiocb);
+ return ret;
+}
+
+#ifndef HAVE_COPY_FILE_RANGE
+static off_t copy_file_range(int in_fd, off_t *in_off, int out_fd,
+ off_t *out_off, size_t len, unsigned int flags)
+{
+#ifdef __NR_copy_file_range
+ return syscall(__NR_copy_file_range, in_fd, in_off, out_fd,
+ out_off, len, flags);
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+#endif
+
+static ssize_t handle_aiocb_copy_range(RawPosixAIOData *aiocb)
+{
+ uint64_t bytes = aiocb->aio_nbytes;
+ off_t in_off = aiocb->aio_offset;
+ off_t out_off = aiocb->aio_offset2;
+
+ while (bytes) {
+ ssize_t ret = copy_file_range(aiocb->aio_fildes, &in_off,
+ aiocb->aio_fd2, &out_off,
+ bytes, 0);
+ trace_file_copy_file_range(aiocb->bs, aiocb->aio_fildes, in_off,
+ aiocb->aio_fd2, out_off, bytes, 0, ret);
+ if (ret == 0) {
+ /* No progress (e.g. when beyond EOF), let the caller fall back to
+ * buffer I/O. */
+ return -ENOSPC;
+ }
+ if (ret < 0) {
+ switch (errno) {
+ case ENOSYS:
+ return -ENOTSUP;
+ case EINTR:
+ continue;
+ default:
+ return -errno;
+ }
+ }
+ bytes -= ret;
+ }
+ return 0;
+}
+
static ssize_t handle_aiocb_discard(RawPosixAIOData *aiocb)
{
int ret = -EOPNOTSUPP;
return ret;
}
+static int handle_aiocb_truncate(RawPosixAIOData *aiocb)
+{
+ int result = 0;
+ int64_t current_length = 0;
+ char *buf = NULL;
+ struct stat st;
+ int fd = aiocb->aio_fildes;
+ int64_t offset = aiocb->aio_offset;
+ Error **errp = aiocb->errp;
+
+ if (fstat(fd, &st) < 0) {
+ result = -errno;
+ error_setg_errno(errp, -result, "Could not stat file");
+ return result;
+ }
+
+ current_length = st.st_size;
+ if (current_length > offset && aiocb->prealloc != PREALLOC_MODE_OFF) {
+ error_setg(errp, "Cannot use preallocation for shrinking files");
+ return -ENOTSUP;
+ }
+
+ switch (aiocb->prealloc) {
+#ifdef CONFIG_POSIX_FALLOCATE
+ case PREALLOC_MODE_FALLOC:
+ /*
+ * Truncating before posix_fallocate() makes it about twice slower on
+ * file systems that do not support fallocate(), trying to check if a
+ * block is allocated before allocating it, so don't do that here.
+ */
+ if (offset != current_length) {
+ result = -posix_fallocate(fd, current_length,
+ offset - current_length);
+ if (result != 0) {
+ /* posix_fallocate() doesn't set errno. */
+ error_setg_errno(errp, -result,
+ "Could not preallocate new data");
+ }
+ } else {
+ result = 0;
+ }
+ goto out;
+#endif
+ case PREALLOC_MODE_FULL:
+ {
+ int64_t num = 0, left = offset - current_length;
+ off_t seek_result;
+
+ /*
+ * Knowing the final size from the beginning could allow the file
+ * system driver to do less allocations and possibly avoid
+ * fragmentation of the file.
+ */
+ if (ftruncate(fd, offset) != 0) {
+ result = -errno;
+ error_setg_errno(errp, -result, "Could not resize file");
+ goto out;
+ }
+
+ buf = g_malloc0(65536);
+
+ seek_result = lseek(fd, current_length, SEEK_SET);
+ if (seek_result < 0) {
+ result = -errno;
+ error_setg_errno(errp, -result,
+ "Failed to seek to the old end of file");
+ goto out;
+ }
+
+ while (left > 0) {
+ num = MIN(left, 65536);
+ result = write(fd, buf, num);
+ if (result < 0) {
+ if (errno == EINTR) {
+ continue;
+ }
+ result = -errno;
+ error_setg_errno(errp, -result,
+ "Could not write zeros for preallocation");
+ goto out;
+ }
+ left -= result;
+ }
+ if (result >= 0) {
+ result = fsync(fd);
+ if (result < 0) {
+ result = -errno;
+ error_setg_errno(errp, -result,
+ "Could not flush file to disk");
+ goto out;
+ }
+ }
+ goto out;
+ }
+ case PREALLOC_MODE_OFF:
+ if (ftruncate(fd, offset) != 0) {
+ result = -errno;
+ error_setg_errno(errp, -result, "Could not resize file");
+ }
+ return result;
+ default:
+ result = -ENOTSUP;
+ error_setg(errp, "Unsupported preallocation mode: %s",
+ PreallocMode_str(aiocb->prealloc));
+ return result;
+ }
+
+out:
+ if (result < 0) {
+ if (ftruncate(fd, current_length) < 0) {
+ error_report("Failed to restore old file length: %s",
+ strerror(errno));
+ }
+ }
+
+ g_free(buf);
+ return result;
+}
+
static int aio_worker(void *arg)
{
RawPosixAIOData *aiocb = arg;
case QEMU_AIO_WRITE_ZEROES:
ret = handle_aiocb_write_zeroes(aiocb);
break;
+ case QEMU_AIO_WRITE_ZEROES | QEMU_AIO_DISCARD:
+ ret = handle_aiocb_write_zeroes_unmap(aiocb);
+ break;
+ case QEMU_AIO_COPY_RANGE:
+ ret = handle_aiocb_copy_range(aiocb);
+ break;
+ case QEMU_AIO_TRUNCATE:
+ ret = handle_aiocb_truncate(aiocb);
+ break;
default:
fprintf(stderr, "invalid aio request (0x%x)\n", aiocb->aio_type);
ret = -EINVAL;
return ret;
}
-static int paio_submit_co(BlockDriverState *bs, int fd,
- int64_t offset, QEMUIOVector *qiov,
- int bytes, int type)
+static int paio_submit_co_full(BlockDriverState *bs, int fd,
+ int64_t offset, int fd2, int64_t offset2,
+ QEMUIOVector *qiov,
+ int bytes, int type)
{
RawPosixAIOData *acb = g_new(RawPosixAIOData, 1);
ThreadPool *pool;
acb->bs = bs;
acb->aio_type = type;
acb->aio_fildes = fd;
+ acb->aio_fd2 = fd2;
+ acb->aio_offset2 = offset2;
acb->aio_nbytes = bytes;
acb->aio_offset = offset;
assert(qiov->size == bytes);
}
- trace_paio_submit_co(offset, bytes, type);
+ trace_file_paio_submit_co(offset, bytes, type);
pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
return thread_pool_submit_co(pool, aio_worker, acb);
}
-static BlockAIOCB *paio_submit(BlockDriverState *bs, int fd,
- int64_t offset, QEMUIOVector *qiov, int bytes,
- BlockCompletionFunc *cb, void *opaque, int type)
+static inline int paio_submit_co(BlockDriverState *bs, int fd,
+ int64_t offset, QEMUIOVector *qiov,
+ int bytes, int type)
{
- RawPosixAIOData *acb = g_new(RawPosixAIOData, 1);
- ThreadPool *pool;
-
- acb->bs = bs;
- acb->aio_type = type;
- acb->aio_fildes = fd;
-
- acb->aio_nbytes = bytes;
- acb->aio_offset = offset;
-
- if (qiov) {
- acb->aio_iov = qiov->iov;
- acb->aio_niov = qiov->niov;
- assert(qiov->size == acb->aio_nbytes);
- }
-
- trace_paio_submit(acb, opaque, offset, bytes, type);
- pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
- return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque);
+ return paio_submit_co_full(bs, fd, offset, -1, 0, qiov, bytes, type);
}
static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
#endif
}
-static BlockAIOCB *raw_aio_flush(BlockDriverState *bs,
- BlockCompletionFunc *cb, void *opaque)
+static int raw_co_flush_to_disk(BlockDriverState *bs)
{
BDRVRawState *s = bs->opaque;
+ int ret;
- if (fd_open(bs) < 0)
- return NULL;
+ ret = fd_open(bs);
+ if (ret < 0) {
+ return ret;
+ }
- return paio_submit(bs, s->fd, 0, NULL, 0, cb, opaque, QEMU_AIO_FLUSH);
+ return paio_submit_co(bs, s->fd, 0, NULL, 0, QEMU_AIO_FLUSH);
+}
+
+static void raw_aio_attach_aio_context(BlockDriverState *bs,
+ AioContext *new_context)
+{
+#ifdef CONFIG_LINUX_AIO
+ BDRVRawState *s = bs->opaque;
+ if (s->use_linux_aio) {
+ Error *local_err;
+ if (!aio_setup_linux_aio(new_context, &local_err)) {
+ error_reportf_err(local_err, "Unable to use native AIO, "
+ "falling back to thread pool: ");
+ s->use_linux_aio = false;
+ }
+ }
+#endif
}
static void raw_close(BlockDriverState *bs)
*
* Returns: 0 on success, -errno on failure.
*/
-static int raw_regular_truncate(int fd, int64_t offset, PreallocMode prealloc,
- Error **errp)
+static int coroutine_fn
+raw_regular_truncate(BlockDriverState *bs, int fd, int64_t offset,
+ PreallocMode prealloc, Error **errp)
{
- int result = 0;
- int64_t current_length = 0;
- char *buf = NULL;
- struct stat st;
-
- if (fstat(fd, &st) < 0) {
- result = -errno;
- error_setg_errno(errp, -result, "Could not stat file");
- return result;
- }
-
- current_length = st.st_size;
- if (current_length > offset && prealloc != PREALLOC_MODE_OFF) {
- error_setg(errp, "Cannot use preallocation for shrinking files");
- return -ENOTSUP;
- }
-
- switch (prealloc) {
-#ifdef CONFIG_POSIX_FALLOCATE
- case PREALLOC_MODE_FALLOC:
- /*
- * Truncating before posix_fallocate() makes it about twice slower on
- * file systems that do not support fallocate(), trying to check if a
- * block is allocated before allocating it, so don't do that here.
- */
- result = -posix_fallocate(fd, current_length, offset - current_length);
- if (result != 0) {
- /* posix_fallocate() doesn't set errno. */
- error_setg_errno(errp, -result,
- "Could not preallocate new data");
- }
- goto out;
-#endif
- case PREALLOC_MODE_FULL:
- {
- int64_t num = 0, left = offset - current_length;
-
- /*
- * Knowing the final size from the beginning could allow the file
- * system driver to do less allocations and possibly avoid
- * fragmentation of the file.
- */
- if (ftruncate(fd, offset) != 0) {
- result = -errno;
- error_setg_errno(errp, -result, "Could not resize file");
- goto out;
- }
-
- buf = g_malloc0(65536);
-
- result = lseek(fd, current_length, SEEK_SET);
- if (result < 0) {
- result = -errno;
- error_setg_errno(errp, -result,
- "Failed to seek to the old end of file");
- goto out;
- }
-
- while (left > 0) {
- num = MIN(left, 65536);
- result = write(fd, buf, num);
- if (result < 0) {
- result = -errno;
- error_setg_errno(errp, -result,
- "Could not write zeros for preallocation");
- goto out;
- }
- left -= result;
- }
- if (result >= 0) {
- result = fsync(fd);
- if (result < 0) {
- result = -errno;
- error_setg_errno(errp, -result,
- "Could not flush file to disk");
- goto out;
- }
- }
- goto out;
- }
- case PREALLOC_MODE_OFF:
- if (ftruncate(fd, offset) != 0) {
- result = -errno;
- error_setg_errno(errp, -result, "Could not resize file");
- }
- return result;
- default:
- result = -ENOTSUP;
- error_setg(errp, "Unsupported preallocation mode: %s",
- PreallocMode_str(prealloc));
- return result;
- }
+ RawPosixAIOData *acb = g_new(RawPosixAIOData, 1);
+ ThreadPool *pool;
-out:
- if (result < 0) {
- if (ftruncate(fd, current_length) < 0) {
- error_report("Failed to restore old file length: %s",
- strerror(errno));
- }
- }
+ *acb = (RawPosixAIOData) {
+ .bs = bs,
+ .aio_fildes = fd,
+ .aio_type = QEMU_AIO_TRUNCATE,
+ .aio_offset = offset,
+ .prealloc = prealloc,
+ .errp = errp,
+ };
- g_free(buf);
- return result;
+ /* @bs can be NULL, bdrv_get_aio_context() returns the main context then */
+ pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
+ return thread_pool_submit_co(pool, aio_worker, acb);
}
-static int raw_truncate(BlockDriverState *bs, int64_t offset,
- PreallocMode prealloc, Error **errp)
+static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset,
+ PreallocMode prealloc, Error **errp)
{
BDRVRawState *s = bs->opaque;
struct stat st;
}
if (S_ISREG(st.st_mode)) {
- return raw_regular_truncate(s->fd, offset, prealloc, errp);
+ return raw_regular_truncate(bs, s->fd, offset, prealloc, errp);
}
if (prealloc != PREALLOC_MODE_OFF) {
return (int64_t)st.st_blocks * 512;
}
-static int raw_create(const char *filename, QemuOpts *opts, Error **errp)
+static int coroutine_fn
+raw_co_create(BlockdevCreateOptions *options, Error **errp)
{
+ BlockdevCreateOptionsFile *file_opts;
+ Error *local_err = NULL;
int fd;
+ uint64_t perm, shared;
int result = 0;
- int64_t total_size = 0;
- bool nocow = false;
- PreallocMode prealloc;
- char *buf = NULL;
- Error *local_err = NULL;
- strstart(filename, "file:", &filename);
+ /* Validate options and set default values */
+ assert(options->driver == BLOCKDEV_DRIVER_FILE);
+ file_opts = &options->u.file;
- /* Read out options */
- total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
- BDRV_SECTOR_SIZE);
- nocow = qemu_opt_get_bool(opts, BLOCK_OPT_NOCOW, false);
- buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
- prealloc = qapi_enum_parse(&PreallocMode_lookup, buf,
- PREALLOC_MODE_OFF, &local_err);
- g_free(buf);
- if (local_err) {
- error_propagate(errp, local_err);
- result = -EINVAL;
- goto out;
+ if (!file_opts->has_nocow) {
+ file_opts->nocow = false;
+ }
+ if (!file_opts->has_preallocation) {
+ file_opts->preallocation = PREALLOC_MODE_OFF;
}
- fd = qemu_open(filename, O_RDWR | O_CREAT | O_TRUNC | O_BINARY,
- 0644);
+ /* Create file */
+ fd = qemu_open(file_opts->filename, O_RDWR | O_CREAT | O_BINARY, 0644);
if (fd < 0) {
result = -errno;
error_setg_errno(errp, -result, "Could not create file");
goto out;
}
- if (nocow) {
+ /* Take permissions: We want to discard everything, so we need
+ * BLK_PERM_WRITE; and truncation to the desired size requires
+ * BLK_PERM_RESIZE.
+ * On the other hand, we cannot share the RESIZE permission
+ * because we promise that after this function, the file has the
+ * size given in the options. If someone else were to resize it
+ * concurrently, we could not guarantee that.
+ * Note that after this function, we can no longer guarantee that
+ * the file is not touched by a third party, so it may be resized
+ * then. */
+ perm = BLK_PERM_WRITE | BLK_PERM_RESIZE;
+ shared = BLK_PERM_ALL & ~BLK_PERM_RESIZE;
+
+ /* Step one: Take locks */
+ result = raw_apply_lock_bytes(fd, perm, ~shared, false, errp);
+ if (result < 0) {
+ goto out_close;
+ }
+
+ /* Step two: Check that nobody else has taken conflicting locks */
+ result = raw_check_lock_bytes(fd, perm, shared, errp);
+ if (result < 0) {
+ error_append_hint(errp,
+ "Is another process using the image [%s]?\n",
+ file_opts->filename);
+ goto out_unlock;
+ }
+
+ /* Clear the file by truncating it to 0 */
+ result = raw_regular_truncate(NULL, fd, 0, PREALLOC_MODE_OFF, errp);
+ if (result < 0) {
+ goto out_unlock;
+ }
+
+ if (file_opts->nocow) {
#ifdef __linux__
/* Set NOCOW flag to solve performance issue on fs like btrfs.
* This is an optimisation. The FS_IOC_SETFLAGS ioctl return value
#endif
}
- result = raw_regular_truncate(fd, total_size, prealloc, errp);
+ /* Resize and potentially preallocate the file to the desired
+ * final size */
+ result = raw_regular_truncate(NULL, fd, file_opts->size,
+ file_opts->preallocation, errp);
if (result < 0) {
- goto out_close;
+ goto out_unlock;
+ }
+
+out_unlock:
+ raw_apply_lock_bytes(fd, 0, 0, true, &local_err);
+ if (local_err) {
+ /* The above call should not fail, and if it does, that does
+ * not mean the whole creation operation has failed. So
+ * report it the user for their convenience, but do not report
+ * it to the caller. */
+ error_report_err(local_err);
}
out_close:
return result;
}
+static int coroutine_fn raw_co_create_opts(const char *filename, QemuOpts *opts,
+ Error **errp)
+{
+ BlockdevCreateOptions options;
+ int64_t total_size = 0;
+ bool nocow = false;
+ PreallocMode prealloc;
+ char *buf = NULL;
+ Error *local_err = NULL;
+
+ /* Skip file: protocol prefix */
+ strstart(filename, "file:", &filename);
+
+ /* Read out options */
+ total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
+ BDRV_SECTOR_SIZE);
+ nocow = qemu_opt_get_bool(opts, BLOCK_OPT_NOCOW, false);
+ buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
+ prealloc = qapi_enum_parse(&PreallocMode_lookup, buf,
+ PREALLOC_MODE_OFF, &local_err);
+ g_free(buf);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return -EINVAL;
+ }
+
+ options = (BlockdevCreateOptions) {
+ .driver = BLOCKDEV_DRIVER_FILE,
+ .u.file = {
+ .filename = (char *) filename,
+ .size = total_size,
+ .has_preallocation = true,
+ .preallocation = prealloc,
+ .has_nocow = true,
+ .nocow = nocow,
+ },
+ };
+ return raw_co_create(&options, errp);
+}
+
/*
* Find allocation range in @bs around offset @start.
* May change underlying file descriptor's file offset.
if (offs < 0) {
return -errno; /* D3 or D4 */
}
- assert(offs >= start);
+
+ if (offs < start) {
+ /* This is not a valid return by lseek(). We are safe to just return
+ * -EIO in this case, and we'll treat it like D4. */
+ return -EIO;
+ }
if (offs > start) {
/* D2: in hole, next data at offs */
if (offs < 0) {
return -errno; /* D1 and (H3 or H4) */
}
- assert(offs >= start);
+
+ if (offs < start) {
+ /* This is not a valid return by lseek(). We are safe to just return
+ * -EIO in this case, and we'll treat it like H4. */
+ return -EIO;
+ }
if (offs > start) {
/*
}
/*
- * Returns the allocation status of the specified sectors.
+ * Returns the allocation status of the specified offset.
*
- * If 'sector_num' is beyond the end of the disk image the return value is 0
- * and 'pnum' is set to 0.
+ * The block layer guarantees 'offset' and 'bytes' are within bounds.
*
- * 'pnum' is set to the number of sectors (including and immediately following
- * the specified sector) that are known to be in the same
+ * 'pnum' is set to the number of bytes (including and immediately following
+ * the specified offset) that are known to be in the same
* allocated/unallocated state.
*
- * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
- * beyond the end of the disk image it will be clamped.
+ * 'bytes' is the max value 'pnum' should be set to.
*/
-static int64_t coroutine_fn raw_co_get_block_status(BlockDriverState *bs,
- int64_t sector_num,
- int nb_sectors, int *pnum,
- BlockDriverState **file)
-{
- off_t start, data = 0, hole = 0;
- int64_t total_size;
+static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
+ bool want_zero,
+ int64_t offset,
+ int64_t bytes, int64_t *pnum,
+ int64_t *map,
+ BlockDriverState **file)
+{
+ off_t data = 0, hole = 0;
int ret;
ret = fd_open(bs);
return ret;
}
- start = sector_num * BDRV_SECTOR_SIZE;
- total_size = bdrv_getlength(bs);
- if (total_size < 0) {
- return total_size;
- } else if (start >= total_size) {
- *pnum = 0;
- return 0;
- } else if (start + nb_sectors * BDRV_SECTOR_SIZE > total_size) {
- nb_sectors = DIV_ROUND_UP(total_size - start, BDRV_SECTOR_SIZE);
+ if (!want_zero) {
+ *pnum = bytes;
+ *map = offset;
+ *file = bs;
+ return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
}
- ret = find_allocation(bs, start, &data, &hole);
+ ret = find_allocation(bs, offset, &data, &hole);
if (ret == -ENXIO) {
/* Trailing hole */
- *pnum = nb_sectors;
+ *pnum = bytes;
ret = BDRV_BLOCK_ZERO;
} else if (ret < 0) {
/* No info available, so pretend there are no holes */
- *pnum = nb_sectors;
+ *pnum = bytes;
ret = BDRV_BLOCK_DATA;
- } else if (data == start) {
- /* On a data extent, compute sectors to the end of the extent,
+ } else if (data == offset) {
+ /* On a data extent, compute bytes to the end of the extent,
* possibly including a partial sector at EOF. */
- *pnum = MIN(nb_sectors, DIV_ROUND_UP(hole - start, BDRV_SECTOR_SIZE));
+ *pnum = MIN(bytes, hole - offset);
ret = BDRV_BLOCK_DATA;
} else {
- /* On a hole, compute sectors to the beginning of the next extent. */
- assert(hole == start);
- *pnum = MIN(nb_sectors, (data - start) / BDRV_SECTOR_SIZE);
+ /* On a hole, compute bytes to the beginning of the next extent. */
+ assert(hole == offset);
+ *pnum = MIN(bytes, data - offset);
ret = BDRV_BLOCK_ZERO;
}
+ *map = offset;
*file = bs;
- return ret | BDRV_BLOCK_OFFSET_VALID | start;
+ return ret | BDRV_BLOCK_OFFSET_VALID;
}
-static coroutine_fn BlockAIOCB *raw_aio_pdiscard(BlockDriverState *bs,
- int64_t offset, int bytes,
- BlockCompletionFunc *cb, void *opaque)
+#if defined(__linux__)
+/* Verify that the file is not in the page cache */
+static void check_cache_dropped(BlockDriverState *bs, Error **errp)
{
+ const size_t window_size = 128 * 1024 * 1024;
BDRVRawState *s = bs->opaque;
+ void *window = NULL;
+ size_t length = 0;
+ unsigned char *vec;
+ size_t page_size;
+ off_t offset;
+ off_t end;
+
+ /* mincore(2) page status information requires 1 byte per page */
+ page_size = sysconf(_SC_PAGESIZE);
+ vec = g_malloc(DIV_ROUND_UP(window_size, page_size));
+
+ end = raw_getlength(bs);
+
+ for (offset = 0; offset < end; offset += window_size) {
+ void *new_window;
+ size_t new_length;
+ size_t vec_end;
+ size_t i;
+ int ret;
+
+ /* Unmap previous window if size has changed */
+ new_length = MIN(end - offset, window_size);
+ if (new_length != length) {
+ munmap(window, length);
+ window = NULL;
+ length = 0;
+ }
+
+ new_window = mmap(window, new_length, PROT_NONE, MAP_PRIVATE,
+ s->fd, offset);
+ if (new_window == MAP_FAILED) {
+ error_setg_errno(errp, errno, "mmap failed");
+ break;
+ }
+
+ window = new_window;
+ length = new_length;
+
+ ret = mincore(window, length, vec);
+ if (ret < 0) {
+ error_setg_errno(errp, errno, "mincore failed");
+ break;
+ }
- return paio_submit(bs, s->fd, offset, NULL, bytes,
- cb, opaque, QEMU_AIO_DISCARD);
+ vec_end = DIV_ROUND_UP(length, page_size);
+ for (i = 0; i < vec_end; i++) {
+ if (vec[i] & 0x1) {
+ error_setg(errp, "page cache still in use!");
+ break;
+ }
+ }
+ }
+
+ if (window) {
+ munmap(window, length);
+ }
+
+ g_free(vec);
+}
+#endif /* __linux__ */
+
+static void coroutine_fn raw_co_invalidate_cache(BlockDriverState *bs,
+ Error **errp)
+{
+ BDRVRawState *s = bs->opaque;
+ int ret;
+
+ ret = fd_open(bs);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "The file descriptor is not open");
+ return;
+ }
+
+ if (s->open_flags & O_DIRECT) {
+ return; /* No host kernel page cache */
+ }
+
+#if defined(__linux__)
+ /* This sets the scene for the next syscall... */
+ ret = bdrv_co_flush(bs);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "flush failed");
+ return;
+ }
+
+ /* Linux does not invalidate pages that are dirty, locked, or mmapped by a
+ * process. These limitations are okay because we just fsynced the file,
+ * we don't use mmap, and the file should not be in use by other processes.
+ */
+ ret = posix_fadvise(s->fd, 0, 0, POSIX_FADV_DONTNEED);
+ if (ret != 0) { /* the return value is a positive errno */
+ error_setg_errno(errp, ret, "fadvise failed");
+ return;
+ }
+
+ if (s->check_cache_dropped) {
+ check_cache_dropped(bs, errp);
+ }
+#else /* __linux__ */
+ /* Do nothing. Live migration to a remote host with cache.direct=off is
+ * unsupported on other host operating systems. Cache consistency issues
+ * may occur but no error is reported here, partly because that's the
+ * historical behavior and partly because it's hard to differentiate valid
+ * configurations that should not cause errors.
+ */
+#endif /* !__linux__ */
+}
+
+static coroutine_fn int
+raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
+{
+ BDRVRawState *s = bs->opaque;
+
+ return paio_submit_co(bs, s->fd, offset, NULL, bytes, QEMU_AIO_DISCARD);
}
static int coroutine_fn raw_co_pwrite_zeroes(
int bytes, BdrvRequestFlags flags)
{
BDRVRawState *s = bs->opaque;
+ int operation = QEMU_AIO_WRITE_ZEROES;
- if (!(flags & BDRV_REQ_MAY_UNMAP)) {
- return paio_submit_co(bs, s->fd, offset, NULL, bytes,
- QEMU_AIO_WRITE_ZEROES);
- } else if (s->discard_zeroes) {
- return paio_submit_co(bs, s->fd, offset, NULL, bytes,
- QEMU_AIO_DISCARD);
+ if (flags & BDRV_REQ_MAY_UNMAP) {
+ operation |= QEMU_AIO_DISCARD;
}
- return -ENOTSUP;
+
+ return paio_submit_co(bs, s->fd, offset, NULL, bytes, operation);
}
static int raw_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
raw_handle_perm_lock(bs, RAW_PL_ABORT, 0, 0, NULL);
}
+static int coroutine_fn raw_co_copy_range_from(
+ BlockDriverState *bs, BdrvChild *src, uint64_t src_offset,
+ BdrvChild *dst, uint64_t dst_offset, uint64_t bytes,
+ BdrvRequestFlags read_flags, BdrvRequestFlags write_flags)
+{
+ return bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
+ read_flags, write_flags);
+}
+
+static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs,
+ BdrvChild *src,
+ uint64_t src_offset,
+ BdrvChild *dst,
+ uint64_t dst_offset,
+ uint64_t bytes,
+ BdrvRequestFlags read_flags,
+ BdrvRequestFlags write_flags)
+{
+ BDRVRawState *s = bs->opaque;
+ BDRVRawState *src_s;
+
+ assert(dst->bs == bs);
+ if (src->bs->drv->bdrv_co_copy_range_to != raw_co_copy_range_to) {
+ return -ENOTSUP;
+ }
+
+ src_s = src->bs->opaque;
+ if (fd_open(src->bs) < 0 || fd_open(dst->bs) < 0) {
+ return -EIO;
+ }
+ return paio_submit_co_full(bs, src_s->fd, src_offset, s->fd, dst_offset,
+ NULL, bytes, QEMU_AIO_COPY_RANGE);
+}
+
BlockDriver bdrv_file = {
.format_name = "file",
.protocol_name = "file",
.bdrv_reopen_commit = raw_reopen_commit,
.bdrv_reopen_abort = raw_reopen_abort,
.bdrv_close = raw_close,
- .bdrv_create = raw_create,
+ .bdrv_co_create = raw_co_create,
+ .bdrv_co_create_opts = raw_co_create_opts,
.bdrv_has_zero_init = bdrv_has_zero_init_1,
- .bdrv_co_get_block_status = raw_co_get_block_status,
+ .bdrv_co_block_status = raw_co_block_status,
+ .bdrv_co_invalidate_cache = raw_co_invalidate_cache,
.bdrv_co_pwrite_zeroes = raw_co_pwrite_zeroes,
.bdrv_co_preadv = raw_co_preadv,
.bdrv_co_pwritev = raw_co_pwritev,
- .bdrv_aio_flush = raw_aio_flush,
- .bdrv_aio_pdiscard = raw_aio_pdiscard,
+ .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
+ .bdrv_co_pdiscard = raw_co_pdiscard,
+ .bdrv_co_copy_range_from = raw_co_copy_range_from,
+ .bdrv_co_copy_range_to = raw_co_copy_range_to,
.bdrv_refresh_limits = raw_refresh_limits,
.bdrv_io_plug = raw_aio_plug,
.bdrv_io_unplug = raw_aio_unplug,
+ .bdrv_attach_aio_context = raw_aio_attach_aio_context,
- .bdrv_truncate = raw_truncate,
+ .bdrv_co_truncate = raw_co_truncate,
.bdrv_getlength = raw_getlength,
.bdrv_get_info = raw_get_info,
.bdrv_get_allocated_file_size
s->type = FTYPE_FILE;
- ret = raw_open_common(bs, options, flags, 0, &local_err);
+ ret = raw_open_common(bs, options, flags, 0, true, &local_err);
if (ret < 0) {
error_propagate(errp, local_err);
#if defined(__APPLE__) && defined(__MACH__)
return -EIO;
}
-static coroutine_fn BlockAIOCB *hdev_aio_pdiscard(BlockDriverState *bs,
- int64_t offset, int bytes,
- BlockCompletionFunc *cb, void *opaque)
+static coroutine_fn int
+hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
{
BDRVRawState *s = bs->opaque;
+ int ret;
- if (fd_open(bs) < 0) {
- return NULL;
+ ret = fd_open(bs);
+ if (ret < 0) {
+ return ret;
}
- return paio_submit(bs, s->fd, offset, NULL, bytes,
- cb, opaque, QEMU_AIO_DISCARD|QEMU_AIO_BLKDEV);
+ return paio_submit_co(bs, s->fd, offset, NULL, bytes,
+ QEMU_AIO_DISCARD | QEMU_AIO_BLKDEV);
}
static coroutine_fn int hdev_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes, BdrvRequestFlags flags)
{
BDRVRawState *s = bs->opaque;
+ int operation = QEMU_AIO_WRITE_ZEROES | QEMU_AIO_BLKDEV;
int rc;
rc = fd_open(bs);
if (rc < 0) {
return rc;
}
- if (!(flags & BDRV_REQ_MAY_UNMAP)) {
- return paio_submit_co(bs, s->fd, offset, NULL, bytes,
- QEMU_AIO_WRITE_ZEROES|QEMU_AIO_BLKDEV);
- } else if (s->discard_zeroes) {
- return paio_submit_co(bs, s->fd, offset, NULL, bytes,
- QEMU_AIO_DISCARD|QEMU_AIO_BLKDEV);
+
+ if (flags & BDRV_REQ_MAY_UNMAP) {
+ operation |= QEMU_AIO_DISCARD;
}
- return -ENOTSUP;
+
+ return paio_submit_co(bs, s->fd, offset, NULL, bytes, operation);
}
-static int hdev_create(const char *filename, QemuOpts *opts,
- Error **errp)
+static int coroutine_fn hdev_co_create_opts(const char *filename, QemuOpts *opts,
+ Error **errp)
{
int fd;
int ret = 0;
.bdrv_reopen_prepare = raw_reopen_prepare,
.bdrv_reopen_commit = raw_reopen_commit,
.bdrv_reopen_abort = raw_reopen_abort,
- .bdrv_create = hdev_create,
+ .bdrv_co_create_opts = hdev_co_create_opts,
.create_opts = &raw_create_opts,
+ .bdrv_co_invalidate_cache = raw_co_invalidate_cache,
.bdrv_co_pwrite_zeroes = hdev_co_pwrite_zeroes,
.bdrv_co_preadv = raw_co_preadv,
.bdrv_co_pwritev = raw_co_pwritev,
- .bdrv_aio_flush = raw_aio_flush,
- .bdrv_aio_pdiscard = hdev_aio_pdiscard,
+ .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
+ .bdrv_co_pdiscard = hdev_co_pdiscard,
+ .bdrv_co_copy_range_from = raw_co_copy_range_from,
+ .bdrv_co_copy_range_to = raw_co_copy_range_to,
.bdrv_refresh_limits = raw_refresh_limits,
.bdrv_io_plug = raw_aio_plug,
.bdrv_io_unplug = raw_aio_unplug,
+ .bdrv_attach_aio_context = raw_aio_attach_aio_context,
- .bdrv_truncate = raw_truncate,
+ .bdrv_co_truncate = raw_co_truncate,
.bdrv_getlength = raw_getlength,
.bdrv_get_info = raw_get_info,
.bdrv_get_allocated_file_size
s->type = FTYPE_CD;
/* open will not fail even if no CD is inserted, so add O_NONBLOCK */
- return raw_open_common(bs, options, flags, O_NONBLOCK, errp);
+ return raw_open_common(bs, options, flags, O_NONBLOCK, true, errp);
}
static int cdrom_probe_device(const char *filename)
.bdrv_reopen_prepare = raw_reopen_prepare,
.bdrv_reopen_commit = raw_reopen_commit,
.bdrv_reopen_abort = raw_reopen_abort,
- .bdrv_create = hdev_create,
+ .bdrv_co_create_opts = hdev_co_create_opts,
.create_opts = &raw_create_opts,
+ .bdrv_co_invalidate_cache = raw_co_invalidate_cache,
.bdrv_co_preadv = raw_co_preadv,
.bdrv_co_pwritev = raw_co_pwritev,
- .bdrv_aio_flush = raw_aio_flush,
+ .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
.bdrv_refresh_limits = raw_refresh_limits,
.bdrv_io_plug = raw_aio_plug,
.bdrv_io_unplug = raw_aio_unplug,
+ .bdrv_attach_aio_context = raw_aio_attach_aio_context,
- .bdrv_truncate = raw_truncate,
+ .bdrv_co_truncate = raw_co_truncate,
.bdrv_getlength = raw_getlength,
.has_variable_length = true,
.bdrv_get_allocated_file_size
s->type = FTYPE_CD;
- ret = raw_open_common(bs, options, flags, 0, &local_err);
+ ret = raw_open_common(bs, options, flags, 0, true, &local_err);
if (ret) {
error_propagate(errp, local_err);
return ret;
.bdrv_reopen_prepare = raw_reopen_prepare,
.bdrv_reopen_commit = raw_reopen_commit,
.bdrv_reopen_abort = raw_reopen_abort,
- .bdrv_create = hdev_create,
+ .bdrv_co_create_opts = hdev_co_create_opts,
.create_opts = &raw_create_opts,
.bdrv_co_preadv = raw_co_preadv,
.bdrv_co_pwritev = raw_co_pwritev,
- .bdrv_aio_flush = raw_aio_flush,
+ .bdrv_co_flush_to_disk = raw_co_flush_to_disk,
.bdrv_refresh_limits = raw_refresh_limits,
.bdrv_io_plug = raw_aio_plug,
.bdrv_io_unplug = raw_aio_unplug,
+ .bdrv_attach_aio_context = raw_aio_attach_aio_context,
- .bdrv_truncate = raw_truncate,
+ .bdrv_co_truncate = raw_co_truncate,
.bdrv_getlength = raw_getlength,
.has_variable_length = true,
.bdrv_get_allocated_file_size