va_list ap;
va_start(ap, fmt);
- ret = virtio_pdu_vmarshal(pdu, offset, fmt, ap);
+ ret = pdu->s->transport->pdu_vmarshal(pdu, offset, fmt, ap);
va_end(ap);
return ret;
va_list ap;
va_start(ap, fmt);
- ret = virtio_pdu_vunmarshal(pdu, offset, fmt, ap);
+ ret = pdu->s->transport->pdu_vunmarshal(pdu, offset, fmt, ap);
va_end(ap);
return ret;
static void pdu_push_and_notify(V9fsPDU *pdu)
{
- virtio_9p_push_and_notify(pdu);
+ pdu->s->transport->push_and_notify(pdu);
}
static int omode_to_uflags(int8_t mode)
{
int retval = 0;
- if (fidp->fs.xattr.copied_len == -1) {
+ if (fidp->fs.xattr.xattrwalk_fid) {
/* getxattr/listxattr fid */
goto free_value;
}
static void coroutine_fn virtfs_reset(V9fsPDU *pdu)
{
V9fsState *s = pdu->s;
- V9fsFidState *fidp = NULL;
+ V9fsFidState *fidp;
/* Free all fids */
while (s->fid_list) {
free_fid(pdu, fidp);
}
}
- if (fidp) {
- /* One or more unclunked fids found... */
- error_report("9pfs:%s: One or more uncluncked fids "
- "found during reset", __func__);
- }
}
#define P9_QID_TYPE_DIR 0x80
size_t offset = 7;
V9fsQID qid;
ssize_t err;
+ Error *local_err = NULL;
v9fs_string_init(&uname);
v9fs_string_init(&aname);
clunk_fid(s, fid);
goto out;
}
- err = pdu_marshal(pdu, offset, "Q", &qid);
- if (err < 0) {
- clunk_fid(s, fid);
- goto out;
- }
- err += offset;
- memcpy(&s->root_qid, &qid, sizeof(qid));
- trace_v9fs_attach_return(pdu->tag, pdu->id,
- qid.type, qid.version, qid.path);
+
/*
* disable migration if we haven't done already.
* attach could get called multiple times for the same export.
*/
if (!s->migration_blocker) {
- s->root_fid = fid;
error_setg(&s->migration_blocker,
"Migration is disabled when VirtFS export path '%s' is mounted in the guest using mount_tag '%s'",
s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag);
- migrate_add_blocker(s->migration_blocker);
+ err = migrate_add_blocker(s->migration_blocker, &local_err);
+ if (local_err) {
+ error_free(local_err);
+ error_free(s->migration_blocker);
+ s->migration_blocker = NULL;
+ clunk_fid(s, fid);
+ goto out;
+ }
+ s->root_fid = fid;
+ }
+
+ err = pdu_marshal(pdu, offset, "Q", &qid);
+ if (err < 0) {
+ clunk_fid(s, fid);
+ goto out;
}
+ err += offset;
+
+ memcpy(&s->root_qid, &qid, sizeof(qid));
+ trace_v9fs_attach_return(pdu->tag, pdu->id,
+ qid.type, qid.version, qid.path);
out:
put_fid(pdu, fidp);
out_nofid:
memcpy(&qids[name_idx], &qid, sizeof(qid));
}
if (fid == newfid) {
- BUG_ON(fidp->fid_type != P9_FID_NONE);
+ if (fidp->fid_type != P9_FID_NONE) {
+ err = -EINVAL;
+ goto out;
+ }
v9fs_path_copy(&fidp->path, &path);
} else {
newfidp = alloc_fid(s, newfid);
err = -ENOENT;
goto out_nofid;
}
- BUG_ON(fidp->fid_type != P9_FID_NONE);
+ if (fidp->fid_type != P9_FID_NONE) {
+ err = -EINVAL;
+ goto out;
+ }
err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
if (err < 0) {
v9fs_string_free(&name);
}
-static void v9fs_fsync(void *opaque)
+static void coroutine_fn v9fs_fsync(void *opaque)
{
int err;
int32_t fid;
pdu_complete(pdu, err);
}
+/*
+ * Create a QEMUIOVector for a sub-region of PDU iovecs
+ *
+ * @qiov: uninitialized QEMUIOVector
+ * @skip: number of bytes to skip from beginning of PDU
+ * @size: number of bytes to include
+ * @is_write: true - write, false - read
+ *
+ * The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up
+ * with qemu_iovec_destroy().
+ */
+static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
+ size_t skip, size_t size,
+ bool is_write)
+{
+ QEMUIOVector elem;
+ struct iovec *iov;
+ unsigned int niov;
+
+ if (is_write) {
+ pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov);
+ } else {
+ pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size + skip);
+ }
+
+ qemu_iovec_init_external(&elem, iov, niov);
+ qemu_iovec_init(qiov, niov);
+ qemu_iovec_concat(qiov, &elem, skip, size);
+}
+
static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
uint64_t off, uint32_t max_count)
{
ssize_t err;
size_t offset = 7;
- int read_count;
- int64_t xattr_len;
- V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
- VirtQueueElement *elem = v->elems[pdu->idx];
+ uint64_t read_count;
+ QEMUIOVector qiov_full;
- xattr_len = fidp->fs.xattr.len;
- read_count = xattr_len - off;
+ if (fidp->fs.xattr.len < off) {
+ read_count = 0;
+ } else {
+ read_count = fidp->fs.xattr.len - off;
+ }
if (read_count > max_count) {
read_count = max_count;
- } else if (read_count < 0) {
- /*
- * read beyond XATTR value
- */
- read_count = 0;
}
err = pdu_marshal(pdu, offset, "d", read_count);
if (err < 0) {
}
offset += err;
- err = v9fs_pack(elem->in_sg, elem->in_num, offset,
+ v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, read_count, false);
+ err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0,
((char *)fidp->fs.xattr.value) + off,
read_count);
+ qemu_iovec_destroy(&qiov_full);
if (err < 0) {
return err;
}
return count;
}
-/*
- * Create a QEMUIOVector for a sub-region of PDU iovecs
- *
- * @qiov: uninitialized QEMUIOVector
- * @skip: number of bytes to skip from beginning of PDU
- * @size: number of bytes to include
- * @is_write: true - write, false - read
- *
- * The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up
- * with qemu_iovec_destroy().
- */
-static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
- size_t skip, size_t size,
- bool is_write)
-{
- QEMUIOVector elem;
- struct iovec *iov;
- unsigned int niov;
-
- virtio_init_iov_from_pdu(pdu, &iov, &niov, is_write);
-
- qemu_iovec_init_external(&elem, iov, niov);
- qemu_iovec_init(qiov, niov);
- qemu_iovec_concat(qiov, &elem, skip, size);
-}
-
static void coroutine_fn v9fs_read(void *opaque)
{
int32_t fid;
{
int i, to_copy;
ssize_t err = 0;
- int write_count;
- int64_t xattr_len;
+ uint64_t write_count;
size_t offset = 7;
- xattr_len = fidp->fs.xattr.len;
- write_count = xattr_len - off;
- if (write_count > count) {
- write_count = count;
- } else if (write_count < 0) {
- /*
- * write beyond XATTR value len specified in
- * xattrcreate
- */
+ if (fidp->fs.xattr.len < off) {
err = -ENOSPC;
goto out;
}
+ write_count = fidp->fs.xattr.len - off;
+ if (write_count > count) {
+ write_count = count;
+ }
err = pdu_marshal(pdu, offset, "d", write_count);
if (err < 0) {
return err;
v9fs_string_free(&symname);
}
-static void v9fs_flush(void *opaque)
+static void coroutine_fn v9fs_flush(void *opaque)
{
ssize_t err;
int16_t tag;
/*
* Wait for pdu to complete.
*/
- qemu_co_queue_wait(&cancel_pdu->complete);
+ qemu_co_queue_wait(&cancel_pdu->complete, NULL);
cancel_pdu->cancelled = 0;
pdu_free(cancel_pdu);
}
err = -ENOENT;
goto out_nofid;
}
- BUG_ON(dirfidp->fid_type != P9_FID_NONE);
+ if (fidp->fid_type != P9_FID_NONE) {
+ err = -EINVAL;
+ goto out;
+ }
v9fs_co_name_to_path(pdu, &dirfidp->path, name->data, &new_path);
} else {
old_name = fidp->path.data;
err = -ENOENT;
goto out_nofid;
}
- BUG_ON(fidp->fid_type != P9_FID_NONE);
+ if (fidp->fid_type != P9_FID_NONE) {
+ err = -EINVAL;
+ goto out;
+ }
/* if fs driver is not path based, return EOPNOTSUPP */
if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) {
err = -EOPNOTSUPP;
*/
xattr_fidp->fs.xattr.len = size;
xattr_fidp->fid_type = P9_FID_XATTR;
- xattr_fidp->fs.xattr.copied_len = -1;
+ xattr_fidp->fs.xattr.xattrwalk_fid = true;
if (size) {
xattr_fidp->fs.xattr.value = g_malloc(size);
err = v9fs_co_llistxattr(pdu, &xattr_fidp->path,
*/
xattr_fidp->fs.xattr.len = size;
xattr_fidp->fid_type = P9_FID_XATTR;
- xattr_fidp->fs.xattr.copied_len = -1;
+ xattr_fidp->fs.xattr.xattrwalk_fid = true;
if (size) {
xattr_fidp->fs.xattr.value = g_malloc(size);
err = v9fs_co_lgetxattr(pdu, &xattr_fidp->path,
{
int flags;
int32_t fid;
- int64_t size;
+ uint64_t size;
ssize_t err = 0;
V9fsString name;
size_t offset = 7;
}
trace_v9fs_xattrcreate(pdu->tag, pdu->id, fid, name.data, size, flags);
+ if (size > XATTR_SIZE_MAX) {
+ err = -E2BIG;
+ goto out_nofid;
+ }
+
file_fidp = get_fid(pdu, fid);
if (file_fidp == NULL) {
err = -EINVAL;
goto out_nofid;
}
+ if (file_fidp->fid_type != P9_FID_NONE) {
+ err = -EINVAL;
+ goto out_put_fid;
+ }
+
/* Make the file fid point to xattr */
xattr_fidp = file_fidp;
xattr_fidp->fid_type = P9_FID_XATTR;
xattr_fidp->fs.xattr.copied_len = 0;
+ xattr_fidp->fs.xattr.xattrwalk_fid = false;
xattr_fidp->fs.xattr.len = size;
xattr_fidp->fs.xattr.flags = flags;
v9fs_string_init(&xattr_fidp->fs.xattr.name);
v9fs_string_copy(&xattr_fidp->fs.xattr.name, &name);
- g_free(xattr_fidp->fs.xattr.value);
xattr_fidp->fs.xattr.value = g_malloc0(size);
err = offset;
+out_put_fid:
put_fid(pdu, file_fidp);
out_nofid:
pdu_complete(pdu, err);
/* Returns 0 on success, 1 on failure. */
int v9fs_device_realize_common(V9fsState *s, Error **errp)
{
- V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
int i, len;
struct stat stat;
FsDriverEntry *fse;
/* initialize pdu allocator */
QLIST_INIT(&s->free_list);
QLIST_INIT(&s->active_list);
- for (i = 0; i < (MAX_REQ - 1); i++) {
- QLIST_INSERT_HEAD(&s->free_list, &v->pdus[i], next);
- v->pdus[i].s = s;
- v->pdus[i].idx = i;
+ for (i = 0; i < MAX_REQ; i++) {
+ QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next);
+ s->pdus[i].s = s;
+ s->pdus[i].idx = i;
}
v9fs_path_init(&path);
rc = 0;
out:
if (rc) {
- g_free(s->ctx.fs_root);
+ if (s->ops && s->ops->cleanup && s->ctx.private) {
+ s->ops->cleanup(&s->ctx);
+ }
g_free(s->tag);
+ g_free(s->ctx.fs_root);
v9fs_path_free(&path);
}
return rc;
void v9fs_device_unrealize_common(V9fsState *s, Error **errp)
{
- g_free(s->ctx.fs_root);
+ if (s->ops->cleanup) {
+ s->ops->cleanup(&s->ctx);
+ }
g_free(s->tag);
+ g_free(s->ctx.fs_root);
}
typedef struct VirtfsCoResetData {