4 * Copyright IBM, Corp. 2010
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "hw/virtio.h"
16 #include "qemu_socket.h"
17 #include "hw/virtio-pci.h"
18 #include "virtio-9p.h"
19 #include "fsdev/qemu-fsdev.h"
20 #include "virtio-9p-xattr.h"
21 #include "virtio-9p-coth.h"
26 static int open_fd_rc;
40 static int omode_to_uflags(int8_t mode)
74 static int dotl_to_at_flags(int flags)
77 if (flags & P9_DOTL_AT_REMOVEDIR) {
78 rflags |= AT_REMOVEDIR;
83 struct dotl_openflag_map {
88 static int dotl_to_open_flags(int flags)
92 * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY
93 * and P9_DOTL_NOACCESS
95 int oflags = flags & O_ACCMODE;
97 struct dotl_openflag_map dotl_oflag_map[] = {
98 { P9_DOTL_CREATE, O_CREAT },
99 { P9_DOTL_EXCL, O_EXCL },
100 { P9_DOTL_NOCTTY , O_NOCTTY },
101 { P9_DOTL_TRUNC, O_TRUNC },
102 { P9_DOTL_APPEND, O_APPEND },
103 { P9_DOTL_NONBLOCK, O_NONBLOCK } ,
104 { P9_DOTL_DSYNC, O_DSYNC },
105 { P9_DOTL_FASYNC, FASYNC },
106 { P9_DOTL_DIRECT, O_DIRECT },
107 { P9_DOTL_LARGEFILE, O_LARGEFILE },
108 { P9_DOTL_DIRECTORY, O_DIRECTORY },
109 { P9_DOTL_NOFOLLOW, O_NOFOLLOW },
110 { P9_DOTL_NOATIME, O_NOATIME },
111 { P9_DOTL_SYNC, O_SYNC },
114 for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) {
115 if (flags & dotl_oflag_map[i].dotl_flag) {
116 oflags |= dotl_oflag_map[i].open_flag;
123 void cred_init(FsCred *credp)
131 static int get_dotl_openflags(V9fsState *s, int oflags)
135 * Filter the client open flags
137 flags = dotl_to_open_flags(oflags);
138 flags &= ~(O_NOCTTY | O_ASYNC | O_CREAT);
140 * Ignore direct disk access hint until the server supports it.
146 void v9fs_string_init(V9fsString *str)
152 void v9fs_string_free(V9fsString *str)
159 void v9fs_string_null(V9fsString *str)
161 v9fs_string_free(str);
164 static int number_to_string(void *arg, char type)
166 unsigned int ret = 0;
170 unsigned int num = *(unsigned int *)arg;
179 unsigned long num = *(unsigned long *)arg;
187 printf("Number_to_string: Unknown number format\n");
194 static int GCC_FMT_ATTR(2, 0)
195 v9fs_string_alloc_printf(char **strp, const char *fmt, va_list ap)
198 char *iter = (char *)fmt;
202 unsigned int arg_uint;
203 unsigned long arg_ulong;
205 /* Find the number of %'s that denotes an argument */
206 for (iter = strstr(iter, "%"); iter; iter = strstr(iter, "%")) {
211 len = strlen(fmt) - 2*nr_args;
221 /* Now parse the format string */
222 for (iter = strstr(iter, "%"); iter; iter = strstr(iter, "%")) {
226 arg_uint = va_arg(ap2, unsigned int);
227 len += number_to_string((void *)&arg_uint, 'u');
230 if (*++iter == 'u') {
231 arg_ulong = va_arg(ap2, unsigned long);
232 len += number_to_string((void *)&arg_ulong, 'U');
238 arg_char_ptr = va_arg(ap2, char *);
239 len += strlen(arg_char_ptr);
246 "v9fs_string_alloc_printf:Incorrect format %c", *iter);
253 *strp = g_malloc((len + 1) * sizeof(**strp));
255 return vsprintf(*strp, fmt, ap);
258 void GCC_FMT_ATTR(2, 3)
259 v9fs_string_sprintf(V9fsString *str, const char *fmt, ...)
264 v9fs_string_free(str);
267 err = v9fs_string_alloc_printf(&str->data, fmt, ap);
274 void v9fs_string_copy(V9fsString *lhs, V9fsString *rhs)
276 v9fs_string_free(lhs);
277 v9fs_string_sprintf(lhs, "%s", rhs->data);
280 void v9fs_path_init(V9fsPath *path)
286 void v9fs_path_free(V9fsPath *path)
293 void v9fs_path_copy(V9fsPath *lhs, V9fsPath *rhs)
296 lhs->data = g_malloc(rhs->size);
297 memcpy(lhs->data, rhs->data, rhs->size);
298 lhs->size = rhs->size;
301 int v9fs_name_to_path(V9fsState *s, V9fsPath *dirpath,
302 const char *name, V9fsPath *path)
305 err = s->ops->name_to_path(&s->ctx, dirpath, name, path);
313 * Return TRUE if s1 is an ancestor of s2.
315 * E.g. "a/b" is an ancestor of "a/b/c" but not of "a/bc/d".
316 * As a special case, We treat s1 as ancestor of s2 if they are same!
318 static int v9fs_path_is_ancestor(V9fsPath *s1, V9fsPath *s2)
320 if (!strncmp(s1->data, s2->data, s1->size - 1)) {
321 if (s2->data[s1->size - 1] == '\0' || s2->data[s1->size - 1] == '/') {
328 static size_t v9fs_string_size(V9fsString *str)
334 * returns 0 if fid got re-opened, 1 if not, < 0 on error */
335 static int v9fs_reopen_fid(V9fsPDU *pdu, V9fsFidState *f)
338 if (f->fid_type == P9_FID_FILE) {
339 if (f->fs.fd == -1) {
341 err = v9fs_co_open(pdu, f, f->open_flags);
342 } while (err == -EINTR && !pdu->cancelled);
344 } else if (f->fid_type == P9_FID_DIR) {
345 if (f->fs.dir == NULL) {
347 err = v9fs_co_opendir(pdu, f);
348 } while (err == -EINTR && !pdu->cancelled);
354 static V9fsFidState *get_fid(V9fsPDU *pdu, int32_t fid)
358 V9fsState *s = pdu->s;
360 for (f = s->fid_list; f; f = f->next) {
364 * Update the fid ref upfront so that
365 * we don't get reclaimed when we yield
370 * check whether we need to reopen the
371 * file. We might have closed the fd
372 * while trying to free up some file
375 err = v9fs_reopen_fid(pdu, f);
381 * Mark the fid as referenced so that the LRU
382 * reclaim won't close the file descriptor
384 f->flags |= FID_REFERENCED;
391 static V9fsFidState *alloc_fid(V9fsState *s, int32_t fid)
395 for (f = s->fid_list; f; f = f->next) {
396 /* If fid is already there return NULL */
402 f = g_malloc0(sizeof(V9fsFidState));
404 f->fid_type = P9_FID_NONE;
407 * Mark the fid as referenced so that the LRU
408 * reclaim won't close the file descriptor
410 f->flags |= FID_REFERENCED;
411 f->next = s->fid_list;
417 static int v9fs_xattr_fid_clunk(V9fsPDU *pdu, V9fsFidState *fidp)
421 if (fidp->fs.xattr.copied_len == -1) {
422 /* getxattr/listxattr fid */
426 * if this is fid for setxattr. clunk should
427 * result in setxattr localcall
429 if (fidp->fs.xattr.len != fidp->fs.xattr.copied_len) {
430 /* clunk after partial write */
434 if (fidp->fs.xattr.len) {
435 retval = v9fs_co_lsetxattr(pdu, &fidp->path, &fidp->fs.xattr.name,
436 fidp->fs.xattr.value,
438 fidp->fs.xattr.flags);
440 retval = v9fs_co_lremovexattr(pdu, &fidp->path, &fidp->fs.xattr.name);
443 v9fs_string_free(&fidp->fs.xattr.name);
445 if (fidp->fs.xattr.value) {
446 g_free(fidp->fs.xattr.value);
451 static int free_fid(V9fsPDU *pdu, V9fsFidState *fidp)
455 if (fidp->fid_type == P9_FID_FILE) {
456 /* If we reclaimed the fd no need to close */
457 if (fidp->fs.fd != -1) {
458 retval = v9fs_co_close(pdu, fidp->fs.fd);
460 } else if (fidp->fid_type == P9_FID_DIR) {
461 if (fidp->fs.dir != NULL) {
462 retval = v9fs_co_closedir(pdu, fidp->fs.dir);
464 } else if (fidp->fid_type == P9_FID_XATTR) {
465 retval = v9fs_xattr_fid_clunk(pdu, fidp);
467 v9fs_path_free(&fidp->path);
472 static void put_fid(V9fsPDU *pdu, V9fsFidState *fidp)
477 * Don't free the fid if it is in reclaim list
479 if (!fidp->ref && fidp->clunked) {
484 static V9fsFidState *clunk_fid(V9fsState *s, int32_t fid)
486 V9fsFidState **fidpp, *fidp;
488 for (fidpp = &s->fid_list; *fidpp; fidpp = &(*fidpp)->next) {
489 if ((*fidpp)->fid == fid) {
493 if (*fidpp == NULL) {
502 void v9fs_reclaim_fd(V9fsPDU *pdu)
504 int reclaim_count = 0;
505 V9fsState *s = pdu->s;
506 V9fsFidState *f, *reclaim_list = NULL;
508 for (f = s->fid_list; f; f = f->next) {
510 * Unlink fids cannot be reclaimed. Check
511 * for them and skip them. Also skip fids
512 * currently being operated on.
514 if (f->ref || f->flags & FID_NON_RECLAIMABLE) {
518 * if it is a recently referenced fid
519 * we leave the fid untouched and clear the
520 * reference bit. We come back to it later
521 * in the next iteration. (a simple LRU without
522 * moving list elements around)
524 if (f->flags & FID_REFERENCED) {
525 f->flags &= ~FID_REFERENCED;
529 * Add fids to reclaim list.
531 if (f->fid_type == P9_FID_FILE) {
532 if (f->fs.fd != -1) {
534 * Up the reference count so that
535 * a clunk request won't free this fid
538 f->rclm_lst = reclaim_list;
540 f->fs_reclaim.fd = f->fs.fd;
544 } else if (f->fid_type == P9_FID_DIR) {
545 if (f->fs.dir != NULL) {
547 * Up the reference count so that
548 * a clunk request won't free this fid
551 f->rclm_lst = reclaim_list;
553 f->fs_reclaim.dir = f->fs.dir;
558 if (reclaim_count >= open_fd_rc) {
563 * Now close the fid in reclaim list. Free them if they
564 * are already clunked.
566 while (reclaim_list) {
568 reclaim_list = f->rclm_lst;
569 if (f->fid_type == P9_FID_FILE) {
570 v9fs_co_close(pdu, f->fs_reclaim.fd);
571 } else if (f->fid_type == P9_FID_DIR) {
572 v9fs_co_closedir(pdu, f->fs_reclaim.dir);
576 * Now drop the fid reference, free it
583 static int v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path)
586 V9fsState *s = pdu->s;
587 V9fsFidState *fidp, head_fid;
589 head_fid.next = s->fid_list;
590 for (fidp = s->fid_list; fidp; fidp = fidp->next) {
591 if (fidp->path.size != path->size) {
594 if (!memcmp(fidp->path.data, path->data, path->size)) {
595 /* Mark the fid non reclaimable. */
596 fidp->flags |= FID_NON_RECLAIMABLE;
598 /* reopen the file/dir if already closed */
599 err = v9fs_reopen_fid(pdu, fidp);
604 * Go back to head of fid list because
605 * the list could have got updated when
606 * switched to the worker thread
616 #define P9_QID_TYPE_DIR 0x80
617 #define P9_QID_TYPE_SYMLINK 0x02
619 #define P9_STAT_MODE_DIR 0x80000000
620 #define P9_STAT_MODE_APPEND 0x40000000
621 #define P9_STAT_MODE_EXCL 0x20000000
622 #define P9_STAT_MODE_MOUNT 0x10000000
623 #define P9_STAT_MODE_AUTH 0x08000000
624 #define P9_STAT_MODE_TMP 0x04000000
625 #define P9_STAT_MODE_SYMLINK 0x02000000
626 #define P9_STAT_MODE_LINK 0x01000000
627 #define P9_STAT_MODE_DEVICE 0x00800000
628 #define P9_STAT_MODE_NAMED_PIPE 0x00200000
629 #define P9_STAT_MODE_SOCKET 0x00100000
630 #define P9_STAT_MODE_SETUID 0x00080000
631 #define P9_STAT_MODE_SETGID 0x00040000
632 #define P9_STAT_MODE_SETVTX 0x00010000
634 #define P9_STAT_MODE_TYPE_BITS (P9_STAT_MODE_DIR | \
635 P9_STAT_MODE_SYMLINK | \
636 P9_STAT_MODE_LINK | \
637 P9_STAT_MODE_DEVICE | \
638 P9_STAT_MODE_NAMED_PIPE | \
641 /* This is the algorithm from ufs in spfs */
642 static void stat_to_qid(const struct stat *stbuf, V9fsQID *qidp)
646 memset(&qidp->path, 0, sizeof(qidp->path));
647 size = MIN(sizeof(stbuf->st_ino), sizeof(qidp->path));
648 memcpy(&qidp->path, &stbuf->st_ino, size);
649 qidp->version = stbuf->st_mtime ^ (stbuf->st_size << 8);
651 if (S_ISDIR(stbuf->st_mode)) {
652 qidp->type |= P9_QID_TYPE_DIR;
654 if (S_ISLNK(stbuf->st_mode)) {
655 qidp->type |= P9_QID_TYPE_SYMLINK;
659 static int fid_to_qid(V9fsPDU *pdu, V9fsFidState *fidp, V9fsQID *qidp)
664 err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
668 stat_to_qid(&stbuf, qidp);
672 static V9fsPDU *alloc_pdu(V9fsState *s)
676 if (!QLIST_EMPTY(&s->free_list)) {
677 pdu = QLIST_FIRST(&s->free_list);
678 QLIST_REMOVE(pdu, next);
679 QLIST_INSERT_HEAD(&s->active_list, pdu, next);
684 static void free_pdu(V9fsState *s, V9fsPDU *pdu)
688 * Cancelled pdu are added back to the freelist
691 if (!pdu->cancelled) {
692 QLIST_REMOVE(pdu, next);
693 QLIST_INSERT_HEAD(&s->free_list, pdu, next);
698 size_t pdu_packunpack(void *addr, struct iovec *sg, int sg_count,
699 size_t offset, size_t size, int pack)
704 for (i = 0; size && i < sg_count; i++) {
706 if (offset >= sg[i].iov_len) {
708 offset -= sg[i].iov_len;
711 len = MIN(sg[i].iov_len - offset, size);
713 memcpy(sg[i].iov_base + offset, addr, len);
715 memcpy(addr, sg[i].iov_base + offset, len);
730 static size_t pdu_unpack(void *dst, V9fsPDU *pdu, size_t offset, size_t size)
732 return pdu_packunpack(dst, pdu->elem.out_sg, pdu->elem.out_num,
736 static size_t pdu_pack(V9fsPDU *pdu, size_t offset, const void *src,
739 return pdu_packunpack((void *)src, pdu->elem.in_sg, pdu->elem.in_num,
743 static int pdu_copy_sg(V9fsPDU *pdu, size_t offset, int rx, struct iovec *sg)
747 struct iovec *src_sg;
751 src_sg = pdu->elem.in_sg;
752 num = pdu->elem.in_num;
754 src_sg = pdu->elem.out_sg;
755 num = pdu->elem.out_num;
759 for (i = 0; i < num; i++) {
761 sg[j].iov_base = src_sg[i].iov_base;
762 sg[j].iov_len = src_sg[i].iov_len;
764 } else if (offset < (src_sg[i].iov_len + pos)) {
765 sg[j].iov_base = src_sg[i].iov_base;
766 sg[j].iov_len = src_sg[i].iov_len;
767 sg[j].iov_base += (offset - pos);
768 sg[j].iov_len -= (offset - pos);
771 pos += src_sg[i].iov_len;
777 static size_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
779 size_t old_offset = offset;
784 for (i = 0; fmt[i]; i++) {
787 uint8_t *valp = va_arg(ap, uint8_t *);
788 offset += pdu_unpack(valp, pdu, offset, sizeof(*valp));
793 valp = va_arg(ap, uint16_t *);
794 offset += pdu_unpack(&val, pdu, offset, sizeof(val));
795 *valp = le16_to_cpu(val);
800 valp = va_arg(ap, uint32_t *);
801 offset += pdu_unpack(&val, pdu, offset, sizeof(val));
802 *valp = le32_to_cpu(val);
807 valp = va_arg(ap, uint64_t *);
808 offset += pdu_unpack(&val, pdu, offset, sizeof(val));
809 *valp = le64_to_cpu(val);
813 struct iovec *iov = va_arg(ap, struct iovec *);
814 int *iovcnt = va_arg(ap, int *);
815 *iovcnt = pdu_copy_sg(pdu, offset, 0, iov);
819 V9fsString *str = va_arg(ap, V9fsString *);
820 offset += pdu_unmarshal(pdu, offset, "w", &str->size);
821 /* FIXME: sanity check str->size */
822 str->data = g_malloc(str->size + 1);
823 offset += pdu_unpack(str->data, pdu, offset, str->size);
824 str->data[str->size] = 0;
828 V9fsQID *qidp = va_arg(ap, V9fsQID *);
829 offset += pdu_unmarshal(pdu, offset, "bdq",
830 &qidp->type, &qidp->version, &qidp->path);
834 V9fsStat *statp = va_arg(ap, V9fsStat *);
835 offset += pdu_unmarshal(pdu, offset, "wwdQdddqsssssddd",
836 &statp->size, &statp->type, &statp->dev,
837 &statp->qid, &statp->mode, &statp->atime,
838 &statp->mtime, &statp->length,
839 &statp->name, &statp->uid, &statp->gid,
840 &statp->muid, &statp->extension,
841 &statp->n_uid, &statp->n_gid,
846 V9fsIattr *iattr = va_arg(ap, V9fsIattr *);
847 offset += pdu_unmarshal(pdu, offset, "ddddqqqqq",
848 &iattr->valid, &iattr->mode,
849 &iattr->uid, &iattr->gid, &iattr->size,
850 &iattr->atime_sec, &iattr->atime_nsec,
851 &iattr->mtime_sec, &iattr->mtime_nsec);
861 return offset - old_offset;
864 static size_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
866 size_t old_offset = offset;
871 for (i = 0; fmt[i]; i++) {
874 uint8_t val = va_arg(ap, int);
875 offset += pdu_pack(pdu, offset, &val, sizeof(val));
880 cpu_to_le16w(&val, va_arg(ap, int));
881 offset += pdu_pack(pdu, offset, &val, sizeof(val));
886 cpu_to_le32w(&val, va_arg(ap, uint32_t));
887 offset += pdu_pack(pdu, offset, &val, sizeof(val));
892 cpu_to_le64w(&val, va_arg(ap, uint64_t));
893 offset += pdu_pack(pdu, offset, &val, sizeof(val));
897 struct iovec *iov = va_arg(ap, struct iovec *);
898 int *iovcnt = va_arg(ap, int *);
899 *iovcnt = pdu_copy_sg(pdu, offset, 1, iov);
903 V9fsString *str = va_arg(ap, V9fsString *);
904 offset += pdu_marshal(pdu, offset, "w", str->size);
905 offset += pdu_pack(pdu, offset, str->data, str->size);
909 V9fsQID *qidp = va_arg(ap, V9fsQID *);
910 offset += pdu_marshal(pdu, offset, "bdq",
911 qidp->type, qidp->version, qidp->path);
915 V9fsStat *statp = va_arg(ap, V9fsStat *);
916 offset += pdu_marshal(pdu, offset, "wwdQdddqsssssddd",
917 statp->size, statp->type, statp->dev,
918 &statp->qid, statp->mode, statp->atime,
919 statp->mtime, statp->length, &statp->name,
920 &statp->uid, &statp->gid, &statp->muid,
921 &statp->extension, statp->n_uid,
922 statp->n_gid, statp->n_muid);
926 V9fsStatDotl *statp = va_arg(ap, V9fsStatDotl *);
927 offset += pdu_marshal(pdu, offset, "qQdddqqqqqqqqqqqqqqq",
928 statp->st_result_mask,
929 &statp->qid, statp->st_mode,
930 statp->st_uid, statp->st_gid,
931 statp->st_nlink, statp->st_rdev,
932 statp->st_size, statp->st_blksize, statp->st_blocks,
933 statp->st_atime_sec, statp->st_atime_nsec,
934 statp->st_mtime_sec, statp->st_mtime_nsec,
935 statp->st_ctime_sec, statp->st_ctime_nsec,
936 statp->st_btime_sec, statp->st_btime_nsec,
937 statp->st_gen, statp->st_data_version);
946 return offset - old_offset;
949 static void complete_pdu(V9fsState *s, V9fsPDU *pdu, ssize_t len)
951 int8_t id = pdu->id + 1; /* Response */
957 if (s->proto_version != V9FS_PROTO_2000L) {
960 str.data = strerror(err);
961 str.size = strlen(str.data);
963 len += pdu_marshal(pdu, len, "s", &str);
967 len += pdu_marshal(pdu, len, "d", err);
969 if (s->proto_version == V9FS_PROTO_2000L) {
972 trace_v9fs_rerror(pdu->tag, pdu->id, err); /* Trace ERROR */
975 /* fill out the header */
976 pdu_marshal(pdu, 0, "dbw", (int32_t)len, id, pdu->tag);
978 /* keep these in sync */
982 /* push onto queue and notify */
983 virtqueue_push(s->vq, &pdu->elem, len);
985 /* FIXME: we should batch these completions */
986 virtio_notify(&s->vdev, s->vq);
988 /* Now wakeup anybody waiting in flush for this request */
989 qemu_co_queue_next(&pdu->complete);
994 static mode_t v9mode_to_mode(uint32_t mode, V9fsString *extension)
999 if (mode & P9_STAT_MODE_DIR) {
1003 if (mode & P9_STAT_MODE_SYMLINK) {
1006 if (mode & P9_STAT_MODE_SOCKET) {
1009 if (mode & P9_STAT_MODE_NAMED_PIPE) {
1012 if (mode & P9_STAT_MODE_DEVICE) {
1013 if (extension && extension->data[0] == 'c') {
1024 if (mode & P9_STAT_MODE_SETUID) {
1027 if (mode & P9_STAT_MODE_SETGID) {
1030 if (mode & P9_STAT_MODE_SETVTX) {
1037 static int donttouch_stat(V9fsStat *stat)
1039 if (stat->type == -1 &&
1041 stat->qid.type == -1 &&
1042 stat->qid.version == -1 &&
1043 stat->qid.path == -1 &&
1045 stat->atime == -1 &&
1046 stat->mtime == -1 &&
1047 stat->length == -1 &&
1052 stat->n_uid == -1 &&
1053 stat->n_gid == -1 &&
1054 stat->n_muid == -1) {
1061 static void v9fs_stat_free(V9fsStat *stat)
1063 v9fs_string_free(&stat->name);
1064 v9fs_string_free(&stat->uid);
1065 v9fs_string_free(&stat->gid);
1066 v9fs_string_free(&stat->muid);
1067 v9fs_string_free(&stat->extension);
1070 static uint32_t stat_to_v9mode(const struct stat *stbuf)
1074 mode = stbuf->st_mode & 0777;
1075 if (S_ISDIR(stbuf->st_mode)) {
1076 mode |= P9_STAT_MODE_DIR;
1079 if (S_ISLNK(stbuf->st_mode)) {
1080 mode |= P9_STAT_MODE_SYMLINK;
1083 if (S_ISSOCK(stbuf->st_mode)) {
1084 mode |= P9_STAT_MODE_SOCKET;
1087 if (S_ISFIFO(stbuf->st_mode)) {
1088 mode |= P9_STAT_MODE_NAMED_PIPE;
1091 if (S_ISBLK(stbuf->st_mode) || S_ISCHR(stbuf->st_mode)) {
1092 mode |= P9_STAT_MODE_DEVICE;
1095 if (stbuf->st_mode & S_ISUID) {
1096 mode |= P9_STAT_MODE_SETUID;
1099 if (stbuf->st_mode & S_ISGID) {
1100 mode |= P9_STAT_MODE_SETGID;
1103 if (stbuf->st_mode & S_ISVTX) {
1104 mode |= P9_STAT_MODE_SETVTX;
1110 static int stat_to_v9stat(V9fsPDU *pdu, V9fsPath *name,
1111 const struct stat *stbuf,
1117 memset(v9stat, 0, sizeof(*v9stat));
1119 stat_to_qid(stbuf, &v9stat->qid);
1120 v9stat->mode = stat_to_v9mode(stbuf);
1121 v9stat->atime = stbuf->st_atime;
1122 v9stat->mtime = stbuf->st_mtime;
1123 v9stat->length = stbuf->st_size;
1125 v9fs_string_null(&v9stat->uid);
1126 v9fs_string_null(&v9stat->gid);
1127 v9fs_string_null(&v9stat->muid);
1129 v9stat->n_uid = stbuf->st_uid;
1130 v9stat->n_gid = stbuf->st_gid;
1133 v9fs_string_null(&v9stat->extension);
1135 if (v9stat->mode & P9_STAT_MODE_SYMLINK) {
1136 err = v9fs_co_readlink(pdu, name, &v9stat->extension);
1140 } else if (v9stat->mode & P9_STAT_MODE_DEVICE) {
1141 v9fs_string_sprintf(&v9stat->extension, "%c %u %u",
1142 S_ISCHR(stbuf->st_mode) ? 'c' : 'b',
1143 major(stbuf->st_rdev), minor(stbuf->st_rdev));
1144 } else if (S_ISDIR(stbuf->st_mode) || S_ISREG(stbuf->st_mode)) {
1145 v9fs_string_sprintf(&v9stat->extension, "%s %lu",
1146 "HARDLINKCOUNT", (unsigned long)stbuf->st_nlink);
1149 str = strrchr(name->data, '/');
1156 v9fs_string_sprintf(&v9stat->name, "%s", str);
1159 v9fs_string_size(&v9stat->name) +
1160 v9fs_string_size(&v9stat->uid) +
1161 v9fs_string_size(&v9stat->gid) +
1162 v9fs_string_size(&v9stat->muid) +
1163 v9fs_string_size(&v9stat->extension);
1167 #define P9_STATS_MODE 0x00000001ULL
1168 #define P9_STATS_NLINK 0x00000002ULL
1169 #define P9_STATS_UID 0x00000004ULL
1170 #define P9_STATS_GID 0x00000008ULL
1171 #define P9_STATS_RDEV 0x00000010ULL
1172 #define P9_STATS_ATIME 0x00000020ULL
1173 #define P9_STATS_MTIME 0x00000040ULL
1174 #define P9_STATS_CTIME 0x00000080ULL
1175 #define P9_STATS_INO 0x00000100ULL
1176 #define P9_STATS_SIZE 0x00000200ULL
1177 #define P9_STATS_BLOCKS 0x00000400ULL
1179 #define P9_STATS_BTIME 0x00000800ULL
1180 #define P9_STATS_GEN 0x00001000ULL
1181 #define P9_STATS_DATA_VERSION 0x00002000ULL
1183 #define P9_STATS_BASIC 0x000007ffULL /* Mask for fields up to BLOCKS */
1184 #define P9_STATS_ALL 0x00003fffULL /* Mask for All fields above */
1187 static void stat_to_v9stat_dotl(V9fsState *s, const struct stat *stbuf,
1188 V9fsStatDotl *v9lstat)
1190 memset(v9lstat, 0, sizeof(*v9lstat));
1192 v9lstat->st_mode = stbuf->st_mode;
1193 v9lstat->st_nlink = stbuf->st_nlink;
1194 v9lstat->st_uid = stbuf->st_uid;
1195 v9lstat->st_gid = stbuf->st_gid;
1196 v9lstat->st_rdev = stbuf->st_rdev;
1197 v9lstat->st_size = stbuf->st_size;
1198 v9lstat->st_blksize = stbuf->st_blksize;
1199 v9lstat->st_blocks = stbuf->st_blocks;
1200 v9lstat->st_atime_sec = stbuf->st_atime;
1201 v9lstat->st_atime_nsec = stbuf->st_atim.tv_nsec;
1202 v9lstat->st_mtime_sec = stbuf->st_mtime;
1203 v9lstat->st_mtime_nsec = stbuf->st_mtim.tv_nsec;
1204 v9lstat->st_ctime_sec = stbuf->st_ctime;
1205 v9lstat->st_ctime_nsec = stbuf->st_ctim.tv_nsec;
1206 /* Currently we only support BASIC fields in stat */
1207 v9lstat->st_result_mask = P9_STATS_BASIC;
1209 stat_to_qid(stbuf, &v9lstat->qid);
1212 static struct iovec *adjust_sg(struct iovec *sg, int len, int *iovcnt)
1214 while (len && *iovcnt) {
1215 if (len < sg->iov_len) {
1217 sg->iov_base += len;
1229 static struct iovec *cap_sg(struct iovec *sg, int cap, int *cnt)
1234 for (i = 0; i < *cnt; i++) {
1235 if ((total + sg[i].iov_len) > cap) {
1236 sg[i].iov_len -= ((total + sg[i].iov_len) - cap);
1240 total += sg[i].iov_len;
1248 static void print_sg(struct iovec *sg, int cnt)
1252 printf("sg[%d]: {", cnt);
1253 for (i = 0; i < cnt; i++) {
1257 printf("(%p, %zd)", sg[i].iov_base, sg[i].iov_len);
1262 /* Will call this only for path name based fid */
1263 static void v9fs_fix_path(V9fsPath *dst, V9fsPath *src, int len)
1266 v9fs_path_init(&str);
1267 v9fs_path_copy(&str, dst);
1268 v9fs_string_sprintf((V9fsString *)dst, "%s%s", src->data, str.data+len);
1269 v9fs_path_free(&str);
1270 /* +1 to include terminating NULL */
1274 static void v9fs_version(void *opaque)
1276 V9fsPDU *pdu = opaque;
1277 V9fsState *s = pdu->s;
1281 pdu_unmarshal(pdu, offset, "ds", &s->msize, &version);
1282 trace_v9fs_version(pdu->tag, pdu->id, s->msize, version.data);
1284 if (!strcmp(version.data, "9P2000.u")) {
1285 s->proto_version = V9FS_PROTO_2000U;
1286 } else if (!strcmp(version.data, "9P2000.L")) {
1287 s->proto_version = V9FS_PROTO_2000L;
1289 v9fs_string_sprintf(&version, "unknown");
1292 offset += pdu_marshal(pdu, offset, "ds", s->msize, &version);
1293 trace_v9fs_version_return(pdu->tag, pdu->id, s->msize, version.data);
1295 complete_pdu(s, pdu, offset);
1297 v9fs_string_free(&version);
1301 static void v9fs_attach(void *opaque)
1303 V9fsPDU *pdu = opaque;
1304 V9fsState *s = pdu->s;
1305 int32_t fid, afid, n_uname;
1306 V9fsString uname, aname;
1312 pdu_unmarshal(pdu, offset, "ddssd", &fid, &afid, &uname, &aname, &n_uname);
1313 trace_v9fs_attach(pdu->tag, pdu->id, fid, afid, uname.data, aname.data);
1315 fidp = alloc_fid(s, fid);
1320 fidp->uid = n_uname;
1321 err = v9fs_co_name_to_path(pdu, NULL, "/", &fidp->path);
1327 err = fid_to_qid(pdu, fidp, &qid);
1333 offset += pdu_marshal(pdu, offset, "Q", &qid);
1335 trace_v9fs_attach_return(pdu->tag, pdu->id,
1336 qid.type, qid.version, qid.path);
1340 complete_pdu(s, pdu, err);
1341 v9fs_string_free(&uname);
1342 v9fs_string_free(&aname);
1345 static void v9fs_stat(void *opaque)
1353 V9fsPDU *pdu = opaque;
1354 V9fsState *s = pdu->s;
1356 pdu_unmarshal(pdu, offset, "d", &fid);
1357 trace_v9fs_stat(pdu->tag, pdu->id, fid);
1359 fidp = get_fid(pdu, fid);
1364 err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1368 err = stat_to_v9stat(pdu, &fidp->path, &stbuf, &v9stat);
1372 offset += pdu_marshal(pdu, offset, "wS", 0, &v9stat);
1374 trace_v9fs_stat_return(pdu->tag, pdu->id, v9stat.mode,
1375 v9stat.atime, v9stat.mtime, v9stat.length);
1376 v9fs_stat_free(&v9stat);
1380 complete_pdu(s, pdu, err);
1383 static void v9fs_getattr(void *opaque)
1390 uint64_t request_mask;
1391 V9fsStatDotl v9stat_dotl;
1392 V9fsPDU *pdu = opaque;
1393 V9fsState *s = pdu->s;
1395 pdu_unmarshal(pdu, offset, "dq", &fid, &request_mask);
1396 trace_v9fs_getattr(pdu->tag, pdu->id, fid, request_mask);
1398 fidp = get_fid(pdu, fid);
1404 * Currently we only support BASIC fields in stat, so there is no
1405 * need to look at request_mask.
1407 retval = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1411 stat_to_v9stat_dotl(s, &stbuf, &v9stat_dotl);
1413 /* fill st_gen if requested and supported by underlying fs */
1414 if (request_mask & P9_STATS_GEN) {
1415 retval = v9fs_co_st_gen(pdu, &fidp->path, stbuf.st_mode, &v9stat_dotl);
1419 v9stat_dotl.st_result_mask |= P9_STATS_GEN;
1422 retval += pdu_marshal(pdu, offset, "A", &v9stat_dotl);
1423 trace_v9fs_getattr_return(pdu->tag, pdu->id, v9stat_dotl.st_result_mask,
1424 v9stat_dotl.st_mode, v9stat_dotl.st_uid,
1425 v9stat_dotl.st_gid);
1429 complete_pdu(s, pdu, retval);
1432 /* From Linux kernel code */
1433 #define ATTR_MODE (1 << 0)
1434 #define ATTR_UID (1 << 1)
1435 #define ATTR_GID (1 << 2)
1436 #define ATTR_SIZE (1 << 3)
1437 #define ATTR_ATIME (1 << 4)
1438 #define ATTR_MTIME (1 << 5)
1439 #define ATTR_CTIME (1 << 6)
1440 #define ATTR_MASK 127
1441 #define ATTR_ATIME_SET (1 << 7)
1442 #define ATTR_MTIME_SET (1 << 8)
1444 static void v9fs_setattr(void *opaque)
1451 V9fsPDU *pdu = opaque;
1452 V9fsState *s = pdu->s;
1454 pdu_unmarshal(pdu, offset, "dI", &fid, &v9iattr);
1456 fidp = get_fid(pdu, fid);
1461 if (v9iattr.valid & ATTR_MODE) {
1462 err = v9fs_co_chmod(pdu, &fidp->path, v9iattr.mode);
1467 if (v9iattr.valid & (ATTR_ATIME | ATTR_MTIME)) {
1468 struct timespec times[2];
1469 if (v9iattr.valid & ATTR_ATIME) {
1470 if (v9iattr.valid & ATTR_ATIME_SET) {
1471 times[0].tv_sec = v9iattr.atime_sec;
1472 times[0].tv_nsec = v9iattr.atime_nsec;
1474 times[0].tv_nsec = UTIME_NOW;
1477 times[0].tv_nsec = UTIME_OMIT;
1479 if (v9iattr.valid & ATTR_MTIME) {
1480 if (v9iattr.valid & ATTR_MTIME_SET) {
1481 times[1].tv_sec = v9iattr.mtime_sec;
1482 times[1].tv_nsec = v9iattr.mtime_nsec;
1484 times[1].tv_nsec = UTIME_NOW;
1487 times[1].tv_nsec = UTIME_OMIT;
1489 err = v9fs_co_utimensat(pdu, &fidp->path, times);
1495 * If the only valid entry in iattr is ctime we can call
1496 * chown(-1,-1) to update the ctime of the file
1498 if ((v9iattr.valid & (ATTR_UID | ATTR_GID)) ||
1499 ((v9iattr.valid & ATTR_CTIME)
1500 && !((v9iattr.valid & ATTR_MASK) & ~ATTR_CTIME))) {
1501 if (!(v9iattr.valid & ATTR_UID)) {
1504 if (!(v9iattr.valid & ATTR_GID)) {
1507 err = v9fs_co_chown(pdu, &fidp->path, v9iattr.uid,
1513 if (v9iattr.valid & (ATTR_SIZE)) {
1514 err = v9fs_co_truncate(pdu, &fidp->path, v9iattr.size);
1523 complete_pdu(s, pdu, err);
1526 static int v9fs_walk_marshal(V9fsPDU *pdu, uint16_t nwnames, V9fsQID *qids)
1530 offset += pdu_marshal(pdu, offset, "w", nwnames);
1531 for (i = 0; i < nwnames; i++) {
1532 offset += pdu_marshal(pdu, offset, "Q", &qids[i]);
1537 static void v9fs_walk(void *opaque)
1540 V9fsQID *qids = NULL;
1542 V9fsPath dpath, path;
1546 int32_t fid, newfid;
1547 V9fsString *wnames = NULL;
1549 V9fsFidState *newfidp = NULL;;
1550 V9fsPDU *pdu = opaque;
1551 V9fsState *s = pdu->s;
1553 offset += pdu_unmarshal(pdu, offset, "ddw", &fid,
1556 trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames);
1558 if (nwnames && nwnames <= P9_MAXWELEM) {
1559 wnames = g_malloc0(sizeof(wnames[0]) * nwnames);
1560 qids = g_malloc0(sizeof(qids[0]) * nwnames);
1561 for (i = 0; i < nwnames; i++) {
1562 offset += pdu_unmarshal(pdu, offset, "s", &wnames[i]);
1564 } else if (nwnames > P9_MAXWELEM) {
1568 fidp = get_fid(pdu, fid);
1573 v9fs_path_init(&dpath);
1574 v9fs_path_init(&path);
1576 * Both dpath and path initially poin to fidp.
1577 * Needed to handle request with nwnames == 0
1579 v9fs_path_copy(&dpath, &fidp->path);
1580 v9fs_path_copy(&path, &fidp->path);
1581 for (name_idx = 0; name_idx < nwnames; name_idx++) {
1582 err = v9fs_co_name_to_path(pdu, &dpath, wnames[name_idx].data, &path);
1586 err = v9fs_co_lstat(pdu, &path, &stbuf);
1590 stat_to_qid(&stbuf, &qids[name_idx]);
1591 v9fs_path_copy(&dpath, &path);
1593 if (fid == newfid) {
1594 BUG_ON(fidp->fid_type != P9_FID_NONE);
1595 v9fs_path_copy(&fidp->path, &path);
1597 newfidp = alloc_fid(s, newfid);
1598 if (newfidp == NULL) {
1602 newfidp->uid = fidp->uid;
1603 v9fs_path_copy(&newfidp->path, &path);
1605 err = v9fs_walk_marshal(pdu, nwnames, qids);
1606 trace_v9fs_walk_return(pdu->tag, pdu->id, nwnames, qids);
1610 put_fid(pdu, newfidp);
1612 v9fs_path_free(&dpath);
1613 v9fs_path_free(&path);
1615 complete_pdu(s, pdu, err);
1616 if (nwnames && nwnames <= P9_MAXWELEM) {
1617 for (name_idx = 0; name_idx < nwnames; name_idx++) {
1618 v9fs_string_free(&wnames[name_idx]);
1626 static int32_t get_iounit(V9fsPDU *pdu, V9fsPath *path)
1628 struct statfs stbuf;
1630 V9fsState *s = pdu->s;
1633 * iounit should be multiples of f_bsize (host filesystem block size
1634 * and as well as less than (client msize - P9_IOHDRSZ))
1636 if (!v9fs_co_statfs(pdu, path, &stbuf)) {
1637 iounit = stbuf.f_bsize;
1638 iounit *= (s->msize - P9_IOHDRSZ)/stbuf.f_bsize;
1641 iounit = s->msize - P9_IOHDRSZ;
1646 static void v9fs_open(void *opaque)
1657 V9fsPDU *pdu = opaque;
1658 V9fsState *s = pdu->s;
1660 if (s->proto_version == V9FS_PROTO_2000L) {
1661 pdu_unmarshal(pdu, offset, "dd", &fid, &mode);
1663 pdu_unmarshal(pdu, offset, "db", &fid, &mode);
1665 trace_v9fs_open(pdu->tag, pdu->id, fid, mode);
1667 fidp = get_fid(pdu, fid);
1672 BUG_ON(fidp->fid_type != P9_FID_NONE);
1674 err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
1678 stat_to_qid(&stbuf, &qid);
1679 if (S_ISDIR(stbuf.st_mode)) {
1680 err = v9fs_co_opendir(pdu, fidp);
1684 fidp->fid_type = P9_FID_DIR;
1685 offset += pdu_marshal(pdu, offset, "Qd", &qid, 0);
1688 if (s->proto_version == V9FS_PROTO_2000L) {
1689 flags = get_dotl_openflags(s, mode);
1691 flags = omode_to_uflags(mode);
1693 err = v9fs_co_open(pdu, fidp, flags);
1697 fidp->fid_type = P9_FID_FILE;
1698 fidp->open_flags = flags;
1699 if (flags & O_EXCL) {
1701 * We let the host file system do O_EXCL check
1702 * We should not reclaim such fd
1704 fidp->flags |= FID_NON_RECLAIMABLE;
1706 iounit = get_iounit(pdu, &fidp->path);
1707 offset += pdu_marshal(pdu, offset, "Qd", &qid, iounit);
1710 trace_v9fs_open_return(pdu->tag, pdu->id,
1711 qid.type, qid.version, qid.path, iounit);
1715 complete_pdu(s, pdu, err);
1718 static void v9fs_lcreate(void *opaque)
1720 int32_t dfid, flags, mode;
1729 V9fsPDU *pdu = opaque;
1731 pdu_unmarshal(pdu, offset, "dsddd", &dfid, &name, &flags,
1733 trace_v9fs_lcreate(pdu->tag, pdu->id, dfid, flags, mode, gid);
1735 fidp = get_fid(pdu, dfid);
1741 flags = get_dotl_openflags(pdu->s, flags);
1742 err = v9fs_co_open2(pdu, fidp, &name, gid,
1743 flags | O_CREAT, mode, &stbuf);
1747 fidp->fid_type = P9_FID_FILE;
1748 fidp->open_flags = flags;
1749 if (flags & O_EXCL) {
1751 * We let the host file system do O_EXCL check
1752 * We should not reclaim such fd
1754 fidp->flags |= FID_NON_RECLAIMABLE;
1756 iounit = get_iounit(pdu, &fidp->path);
1757 stat_to_qid(&stbuf, &qid);
1758 offset += pdu_marshal(pdu, offset, "Qd", &qid, iounit);
1760 trace_v9fs_lcreate_return(pdu->tag, pdu->id,
1761 qid.type, qid.version, qid.path, iounit);
1765 complete_pdu(pdu->s, pdu, err);
1766 v9fs_string_free(&name);
1769 static void v9fs_fsync(void *opaque)
1776 V9fsPDU *pdu = opaque;
1777 V9fsState *s = pdu->s;
1779 pdu_unmarshal(pdu, offset, "dd", &fid, &datasync);
1780 trace_v9fs_fsync(pdu->tag, pdu->id, fid, datasync);
1782 fidp = get_fid(pdu, fid);
1787 err = v9fs_co_fsync(pdu, fidp, datasync);
1793 complete_pdu(s, pdu, err);
1796 static void v9fs_clunk(void *opaque)
1802 V9fsPDU *pdu = opaque;
1803 V9fsState *s = pdu->s;
1805 pdu_unmarshal(pdu, offset, "d", &fid);
1806 trace_v9fs_clunk(pdu->tag, pdu->id, fid);
1808 fidp = clunk_fid(s, fid);
1814 * Bump the ref so that put_fid will
1822 complete_pdu(s, pdu, err);
1825 static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu,
1826 V9fsFidState *fidp, int64_t off, int32_t max_count)
1832 xattr_len = fidp->fs.xattr.len;
1833 read_count = xattr_len - off;
1834 if (read_count > max_count) {
1835 read_count = max_count;
1836 } else if (read_count < 0) {
1838 * read beyond XATTR value
1842 offset += pdu_marshal(pdu, offset, "d", read_count);
1843 offset += pdu_pack(pdu, offset,
1844 ((char *)fidp->fs.xattr.value) + off,
1849 static int v9fs_do_readdir_with_stat(V9fsPDU *pdu,
1850 V9fsFidState *fidp, int32_t max_count)
1857 off_t saved_dir_pos;
1858 struct dirent *dent, *result;
1860 /* save the directory position */
1861 saved_dir_pos = v9fs_co_telldir(pdu, fidp);
1862 if (saved_dir_pos < 0) {
1863 return saved_dir_pos;
1866 dent = g_malloc(sizeof(struct dirent));
1869 v9fs_path_init(&path);
1870 err = v9fs_co_readdir_r(pdu, fidp, dent, &result);
1871 if (err || !result) {
1874 err = v9fs_co_name_to_path(pdu, &fidp->path, dent->d_name, &path);
1878 err = v9fs_co_lstat(pdu, &path, &stbuf);
1882 err = stat_to_v9stat(pdu, &path, &stbuf, &v9stat);
1886 /* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */
1887 len = pdu_marshal(pdu, 11 + count, "S", &v9stat);
1888 if ((len != (v9stat.size + 2)) || ((count + len) > max_count)) {
1889 /* Ran out of buffer. Set dir back to old position and return */
1890 v9fs_co_seekdir(pdu, fidp, saved_dir_pos);
1891 v9fs_stat_free(&v9stat);
1892 v9fs_path_free(&path);
1897 v9fs_stat_free(&v9stat);
1898 v9fs_path_free(&path);
1899 saved_dir_pos = dent->d_off;
1903 v9fs_path_free(&path);
1910 static void v9fs_read(void *opaque)
1919 V9fsPDU *pdu = opaque;
1920 V9fsState *s = pdu->s;
1922 pdu_unmarshal(pdu, offset, "dqd", &fid, &off, &max_count);
1923 trace_v9fs_read(pdu->tag, pdu->id, fid, off, max_count);
1925 fidp = get_fid(pdu, fid);
1930 if (fidp->fid_type == P9_FID_DIR) {
1933 v9fs_co_rewinddir(pdu, fidp);
1935 count = v9fs_do_readdir_with_stat(pdu, fidp, max_count);
1941 err += pdu_marshal(pdu, offset, "d", count);
1943 } else if (fidp->fid_type == P9_FID_FILE) {
1947 struct iovec iov[128]; /* FIXME: bad, bad, bad */
1950 pdu_marshal(pdu, offset + 4, "v", sg, &cnt);
1951 sg = cap_sg(sg, max_count, &cnt);
1956 /* Loop in case of EINTR */
1958 len = v9fs_co_preadv(pdu, fidp, sg, cnt, off);
1963 } while (len == -EINTR && !pdu->cancelled);
1965 /* IO error return the error */
1969 sg = adjust_sg(sg, len, &cnt);
1970 } while (count < max_count && len > 0);
1972 err += pdu_marshal(pdu, offset, "d", count);
1974 } else if (fidp->fid_type == P9_FID_XATTR) {
1975 err = v9fs_xattr_read(s, pdu, fidp, off, max_count);
1979 trace_v9fs_read_return(pdu->tag, pdu->id, count, err);
1983 complete_pdu(s, pdu, err);
1986 static size_t v9fs_readdir_data_size(V9fsString *name)
1989 * Size of each dirent on the wire: size of qid (13) + size of offset (8)
1990 * size of type (1) + size of name.size (2) + strlen(name.data)
1992 return 24 + v9fs_string_size(name);
1995 static int v9fs_do_readdir(V9fsPDU *pdu,
1996 V9fsFidState *fidp, int32_t max_count)
2003 off_t saved_dir_pos;
2004 struct dirent *dent, *result;
2006 /* save the directory position */
2007 saved_dir_pos = v9fs_co_telldir(pdu, fidp);
2008 if (saved_dir_pos < 0) {
2009 return saved_dir_pos;
2012 dent = g_malloc(sizeof(struct dirent));
2015 err = v9fs_co_readdir_r(pdu, fidp, dent, &result);
2016 if (err || !result) {
2019 v9fs_string_init(&name);
2020 v9fs_string_sprintf(&name, "%s", dent->d_name);
2021 if ((count + v9fs_readdir_data_size(&name)) > max_count) {
2022 /* Ran out of buffer. Set dir back to old position and return */
2023 v9fs_co_seekdir(pdu, fidp, saved_dir_pos);
2024 v9fs_string_free(&name);
2029 * Fill up just the path field of qid because the client uses
2030 * only that. To fill the entire qid structure we will have
2031 * to stat each dirent found, which is expensive
2033 size = MIN(sizeof(dent->d_ino), sizeof(qid.path));
2034 memcpy(&qid.path, &dent->d_ino, size);
2035 /* Fill the other fields with dummy values */
2039 /* 11 = 7 + 4 (7 = start offset, 4 = space for storing count) */
2040 len = pdu_marshal(pdu, 11 + count, "Qqbs",
2042 dent->d_type, &name);
2044 v9fs_string_free(&name);
2045 saved_dir_pos = dent->d_off;
2054 static void v9fs_readdir(void *opaque)
2060 int64_t initial_offset;
2061 int32_t count, max_count;
2062 V9fsPDU *pdu = opaque;
2063 V9fsState *s = pdu->s;
2065 pdu_unmarshal(pdu, offset, "dqd", &fid, &initial_offset, &max_count);
2067 trace_v9fs_readdir(pdu->tag, pdu->id, fid, initial_offset, max_count);
2069 fidp = get_fid(pdu, fid);
2074 if (!fidp->fs.dir) {
2078 if (initial_offset == 0) {
2079 v9fs_co_rewinddir(pdu, fidp);
2081 v9fs_co_seekdir(pdu, fidp, initial_offset);
2083 count = v9fs_do_readdir(pdu, fidp, max_count);
2089 retval += pdu_marshal(pdu, offset, "d", count);
2091 trace_v9fs_readdir_return(pdu->tag, pdu->id, count, retval);
2095 complete_pdu(s, pdu, retval);
2098 static int v9fs_xattr_write(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
2099 int64_t off, int32_t count,
2100 struct iovec *sg, int cnt)
2109 xattr_len = fidp->fs.xattr.len;
2110 write_count = xattr_len - off;
2111 if (write_count > count) {
2112 write_count = count;
2113 } else if (write_count < 0) {
2115 * write beyond XATTR value len specified in
2121 offset += pdu_marshal(pdu, offset, "d", write_count);
2123 fidp->fs.xattr.copied_len += write_count;
2125 * Now copy the content from sg list
2127 for (i = 0; i < cnt; i++) {
2128 if (write_count > sg[i].iov_len) {
2129 to_copy = sg[i].iov_len;
2131 to_copy = write_count;
2133 memcpy((char *)fidp->fs.xattr.value + off, sg[i].iov_base, to_copy);
2134 /* updating vs->off since we are not using below */
2136 write_count -= to_copy;
2142 static void v9fs_write(void *opaque)
2153 struct iovec iov[128]; /* FIXME: bad, bad, bad */
2154 struct iovec *sg = iov;
2155 V9fsPDU *pdu = opaque;
2156 V9fsState *s = pdu->s;
2158 pdu_unmarshal(pdu, offset, "dqdv", &fid, &off, &count, sg, &cnt);
2159 trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, cnt);
2161 fidp = get_fid(pdu, fid);
2166 if (fidp->fid_type == P9_FID_FILE) {
2167 if (fidp->fs.fd == -1) {
2171 } else if (fidp->fid_type == P9_FID_XATTR) {
2173 * setxattr operation
2175 err = v9fs_xattr_write(s, pdu, fidp, off, count, sg, cnt);
2181 sg = cap_sg(sg, count, &cnt);
2186 /* Loop in case of EINTR */
2188 len = v9fs_co_pwritev(pdu, fidp, sg, cnt, off);
2193 } while (len == -EINTR && !pdu->cancelled);
2195 /* IO error return the error */
2199 sg = adjust_sg(sg, len, &cnt);
2200 } while (total < count && len > 0);
2201 offset += pdu_marshal(pdu, offset, "d", total);
2203 trace_v9fs_write_return(pdu->tag, pdu->id, total, err);
2207 complete_pdu(s, pdu, err);
2210 static void v9fs_create(void *opaque)
2222 V9fsString extension;
2224 V9fsPDU *pdu = opaque;
2226 v9fs_path_init(&path);
2228 pdu_unmarshal(pdu, offset, "dsdbs", &fid, &name,
2229 &perm, &mode, &extension);
2231 trace_v9fs_create(pdu->tag, pdu->id, fid, name.data, perm, mode);
2233 fidp = get_fid(pdu, fid);
2238 if (perm & P9_STAT_MODE_DIR) {
2239 err = v9fs_co_mkdir(pdu, fidp, &name, perm & 0777,
2240 fidp->uid, -1, &stbuf);
2244 err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2248 v9fs_path_copy(&fidp->path, &path);
2249 err = v9fs_co_opendir(pdu, fidp);
2253 fidp->fid_type = P9_FID_DIR;
2254 } else if (perm & P9_STAT_MODE_SYMLINK) {
2255 err = v9fs_co_symlink(pdu, fidp, &name,
2256 extension.data, -1 , &stbuf);
2260 err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2264 v9fs_path_copy(&fidp->path, &path);
2265 } else if (perm & P9_STAT_MODE_LINK) {
2266 int32_t ofid = atoi(extension.data);
2267 V9fsFidState *ofidp = get_fid(pdu, ofid);
2268 if (ofidp == NULL) {
2272 err = v9fs_co_link(pdu, ofidp, fidp, &name);
2273 put_fid(pdu, ofidp);
2277 err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2279 fidp->fid_type = P9_FID_NONE;
2282 v9fs_path_copy(&fidp->path, &path);
2283 err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
2285 fidp->fid_type = P9_FID_NONE;
2288 } else if (perm & P9_STAT_MODE_DEVICE) {
2290 uint32_t major, minor;
2293 if (sscanf(extension.data, "%c %u %u", &ctype, &major, &minor) != 3) {
2310 nmode |= perm & 0777;
2311 err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1,
2312 makedev(major, minor), nmode, &stbuf);
2316 err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2320 v9fs_path_copy(&fidp->path, &path);
2321 } else if (perm & P9_STAT_MODE_NAMED_PIPE) {
2322 err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1,
2323 0, S_IFIFO | (perm & 0777), &stbuf);
2327 err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2331 v9fs_path_copy(&fidp->path, &path);
2332 } else if (perm & P9_STAT_MODE_SOCKET) {
2333 err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, -1,
2334 0, S_IFSOCK | (perm & 0777), &stbuf);
2338 err = v9fs_co_name_to_path(pdu, &fidp->path, name.data, &path);
2342 v9fs_path_copy(&fidp->path, &path);
2344 err = v9fs_co_open2(pdu, fidp, &name, -1,
2345 omode_to_uflags(mode)|O_CREAT, perm, &stbuf);
2349 fidp->fid_type = P9_FID_FILE;
2350 fidp->open_flags = omode_to_uflags(mode);
2351 if (fidp->open_flags & O_EXCL) {
2353 * We let the host file system do O_EXCL check
2354 * We should not reclaim such fd
2356 fidp->flags |= FID_NON_RECLAIMABLE;
2359 iounit = get_iounit(pdu, &fidp->path);
2360 stat_to_qid(&stbuf, &qid);
2361 offset += pdu_marshal(pdu, offset, "Qd", &qid, iounit);
2363 trace_v9fs_create_return(pdu->tag, pdu->id,
2364 qid.type, qid.version, qid.path, iounit);
2368 complete_pdu(pdu->s, pdu, err);
2369 v9fs_string_free(&name);
2370 v9fs_string_free(&extension);
2371 v9fs_path_free(&path);
2374 static void v9fs_symlink(void *opaque)
2376 V9fsPDU *pdu = opaque;
2379 V9fsFidState *dfidp;
2387 pdu_unmarshal(pdu, offset, "dssd", &dfid, &name, &symname, &gid);
2388 trace_v9fs_symlink(pdu->tag, pdu->id, dfid, name.data, symname.data, gid);
2390 dfidp = get_fid(pdu, dfid);
2391 if (dfidp == NULL) {
2395 err = v9fs_co_symlink(pdu, dfidp, &name, symname.data, gid, &stbuf);
2399 stat_to_qid(&stbuf, &qid);
2400 offset += pdu_marshal(pdu, offset, "Q", &qid);
2402 trace_v9fs_symlink_return(pdu->tag, pdu->id,
2403 qid.type, qid.version, qid.path);
2405 put_fid(pdu, dfidp);
2407 complete_pdu(pdu->s, pdu, err);
2408 v9fs_string_free(&name);
2409 v9fs_string_free(&symname);
2412 static void v9fs_flush(void *opaque)
2416 V9fsPDU *cancel_pdu;
2417 V9fsPDU *pdu = opaque;
2418 V9fsState *s = pdu->s;
2420 pdu_unmarshal(pdu, offset, "w", &tag);
2421 trace_v9fs_flush(pdu->tag, pdu->id, tag);
2423 QLIST_FOREACH(cancel_pdu, &s->active_list, next) {
2424 if (cancel_pdu->tag == tag) {
2429 cancel_pdu->cancelled = 1;
2431 * Wait for pdu to complete.
2433 qemu_co_queue_wait(&cancel_pdu->complete);
2434 cancel_pdu->cancelled = 0;
2435 free_pdu(pdu->s, cancel_pdu);
2437 complete_pdu(s, pdu, 7);
2441 static void v9fs_link(void *opaque)
2443 V9fsPDU *pdu = opaque;
2444 V9fsState *s = pdu->s;
2445 int32_t dfid, oldfid;
2446 V9fsFidState *dfidp, *oldfidp;
2451 pdu_unmarshal(pdu, offset, "dds", &dfid, &oldfid, &name);
2452 trace_v9fs_link(pdu->tag, pdu->id, dfid, oldfid, name.data);
2454 dfidp = get_fid(pdu, dfid);
2455 if (dfidp == NULL) {
2460 oldfidp = get_fid(pdu, oldfid);
2461 if (oldfidp == NULL) {
2465 err = v9fs_co_link(pdu, oldfidp, dfidp, &name);
2470 put_fid(pdu, dfidp);
2472 v9fs_string_free(&name);
2473 complete_pdu(s, pdu, err);
2476 /* Only works with path name based fid */
2477 static void v9fs_remove(void *opaque)
2483 V9fsPDU *pdu = opaque;
2485 pdu_unmarshal(pdu, offset, "d", &fid);
2486 trace_v9fs_remove(pdu->tag, pdu->id, fid);
2488 fidp = get_fid(pdu, fid);
2493 /* if fs driver is not path based, return EOPNOTSUPP */
2494 if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) {
2499 * IF the file is unlinked, we cannot reopen
2500 * the file later. So don't reclaim fd
2502 err = v9fs_mark_fids_unreclaim(pdu, &fidp->path);
2506 err = v9fs_co_remove(pdu, &fidp->path);
2511 /* For TREMOVE we need to clunk the fid even on failed remove */
2512 clunk_fid(pdu->s, fidp->fid);
2515 complete_pdu(pdu->s, pdu, err);
2518 static void v9fs_unlinkat(void *opaque)
2522 int32_t dfid, flags;
2525 V9fsFidState *dfidp;
2526 V9fsPDU *pdu = opaque;
2528 pdu_unmarshal(pdu, offset, "dsd", &dfid, &name, &flags);
2529 flags = dotl_to_at_flags(flags);
2531 dfidp = get_fid(pdu, dfid);
2532 if (dfidp == NULL) {
2537 * IF the file is unlinked, we cannot reopen
2538 * the file later. So don't reclaim fd
2540 v9fs_path_init(&path);
2541 err = v9fs_co_name_to_path(pdu, &dfidp->path, name.data, &path);
2545 err = v9fs_mark_fids_unreclaim(pdu, &path);
2549 err = v9fs_co_unlinkat(pdu, &dfidp->path, &name, flags);
2554 put_fid(pdu, dfidp);
2555 v9fs_path_free(&path);
2557 complete_pdu(pdu->s, pdu, err);
2558 v9fs_string_free(&name);
2562 /* Only works with path name based fid */
2563 static int v9fs_complete_rename(V9fsPDU *pdu, V9fsFidState *fidp,
2564 int32_t newdirfid, V9fsString *name)
2569 V9fsFidState *tfidp;
2570 V9fsState *s = pdu->s;
2571 V9fsFidState *dirfidp = NULL;
2572 char *old_name, *new_name;
2574 v9fs_path_init(&new_path);
2575 if (newdirfid != -1) {
2576 dirfidp = get_fid(pdu, newdirfid);
2577 if (dirfidp == NULL) {
2581 BUG_ON(dirfidp->fid_type != P9_FID_NONE);
2582 v9fs_co_name_to_path(pdu, &dirfidp->path, name->data, &new_path);
2584 old_name = fidp->path.data;
2585 end = strrchr(old_name, '/');
2591 new_name = g_malloc0(end - old_name + name->size + 1);
2592 strncat(new_name, old_name, end - old_name);
2593 strncat(new_name + (end - old_name), name->data, name->size);
2594 v9fs_co_name_to_path(pdu, NULL, new_name, &new_path);
2597 err = v9fs_co_rename(pdu, &fidp->path, &new_path);
2602 * Fixup fid's pointing to the old name to
2603 * start pointing to the new name
2605 for (tfidp = s->fid_list; tfidp; tfidp = tfidp->next) {
2606 if (v9fs_path_is_ancestor(&fidp->path, &tfidp->path)) {
2607 /* replace the name */
2608 v9fs_fix_path(&tfidp->path, &new_path, strlen(fidp->path.data));
2613 put_fid(pdu, dirfidp);
2615 v9fs_path_free(&new_path);
2620 /* Only works with path name based fid */
2621 static void v9fs_rename(void *opaque)
2629 V9fsPDU *pdu = opaque;
2630 V9fsState *s = pdu->s;
2632 pdu_unmarshal(pdu, offset, "dds", &fid, &newdirfid, &name);
2634 fidp = get_fid(pdu, fid);
2639 BUG_ON(fidp->fid_type != P9_FID_NONE);
2640 /* if fs driver is not path based, return EOPNOTSUPP */
2641 if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) {
2645 v9fs_path_write_lock(s);
2646 err = v9fs_complete_rename(pdu, fidp, newdirfid, &name);
2647 v9fs_path_unlock(s);
2654 complete_pdu(s, pdu, err);
2655 v9fs_string_free(&name);
2658 static void v9fs_fix_fid_paths(V9fsPDU *pdu, V9fsPath *olddir,
2659 V9fsString *old_name, V9fsPath *newdir,
2660 V9fsString *new_name)
2662 V9fsFidState *tfidp;
2663 V9fsPath oldpath, newpath;
2664 V9fsState *s = pdu->s;
2667 v9fs_path_init(&oldpath);
2668 v9fs_path_init(&newpath);
2669 v9fs_co_name_to_path(pdu, olddir, old_name->data, &oldpath);
2670 v9fs_co_name_to_path(pdu, newdir, new_name->data, &newpath);
2673 * Fixup fid's pointing to the old name to
2674 * start pointing to the new name
2676 for (tfidp = s->fid_list; tfidp; tfidp = tfidp->next) {
2677 if (v9fs_path_is_ancestor(&oldpath, &tfidp->path)) {
2678 /* replace the name */
2679 v9fs_fix_path(&tfidp->path, &newpath, strlen(oldpath.data));
2682 v9fs_path_free(&oldpath);
2683 v9fs_path_free(&newpath);
2686 static int v9fs_complete_renameat(V9fsPDU *pdu, int32_t olddirfid,
2687 V9fsString *old_name, int32_t newdirfid,
2688 V9fsString *new_name)
2691 V9fsState *s = pdu->s;
2692 V9fsFidState *newdirfidp = NULL, *olddirfidp = NULL;
2694 olddirfidp = get_fid(pdu, olddirfid);
2695 if (olddirfidp == NULL) {
2699 if (newdirfid != -1) {
2700 newdirfidp = get_fid(pdu, newdirfid);
2701 if (newdirfidp == NULL) {
2706 newdirfidp = get_fid(pdu, olddirfid);
2709 err = v9fs_co_renameat(pdu, &olddirfidp->path, old_name,
2710 &newdirfidp->path, new_name);
2714 if (s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT) {
2715 /* Only for path based fid we need to do the below fixup */
2716 v9fs_fix_fid_paths(pdu, &olddirfidp->path, old_name,
2717 &newdirfidp->path, new_name);
2721 put_fid(pdu, olddirfidp);
2724 put_fid(pdu, newdirfidp);
2729 static void v9fs_renameat(void *opaque)
2733 V9fsPDU *pdu = opaque;
2734 V9fsState *s = pdu->s;
2735 int32_t olddirfid, newdirfid;
2736 V9fsString old_name, new_name;
2738 pdu_unmarshal(pdu, offset, "dsds", &olddirfid,
2739 &old_name, &newdirfid, &new_name);
2741 v9fs_path_write_lock(s);
2742 err = v9fs_complete_renameat(pdu, olddirfid,
2743 &old_name, newdirfid, &new_name);
2744 v9fs_path_unlock(s);
2748 complete_pdu(s, pdu, err);
2749 v9fs_string_free(&old_name);
2750 v9fs_string_free(&new_name);
2753 static void v9fs_wstat(void *opaque)
2762 V9fsPDU *pdu = opaque;
2763 V9fsState *s = pdu->s;
2765 pdu_unmarshal(pdu, offset, "dwS", &fid, &unused, &v9stat);
2766 trace_v9fs_wstat(pdu->tag, pdu->id, fid,
2767 v9stat.mode, v9stat.atime, v9stat.mtime);
2769 fidp = get_fid(pdu, fid);
2774 /* do we need to sync the file? */
2775 if (donttouch_stat(&v9stat)) {
2776 err = v9fs_co_fsync(pdu, fidp, 0);
2779 if (v9stat.mode != -1) {
2781 err = v9fs_co_lstat(pdu, &fidp->path, &stbuf);
2785 v9_mode = stat_to_v9mode(&stbuf);
2786 if ((v9stat.mode & P9_STAT_MODE_TYPE_BITS) !=
2787 (v9_mode & P9_STAT_MODE_TYPE_BITS)) {
2788 /* Attempting to change the type */
2792 err = v9fs_co_chmod(pdu, &fidp->path,
2793 v9mode_to_mode(v9stat.mode,
2794 &v9stat.extension));
2799 if (v9stat.mtime != -1 || v9stat.atime != -1) {
2800 struct timespec times[2];
2801 if (v9stat.atime != -1) {
2802 times[0].tv_sec = v9stat.atime;
2803 times[0].tv_nsec = 0;
2805 times[0].tv_nsec = UTIME_OMIT;
2807 if (v9stat.mtime != -1) {
2808 times[1].tv_sec = v9stat.mtime;
2809 times[1].tv_nsec = 0;
2811 times[1].tv_nsec = UTIME_OMIT;
2813 err = v9fs_co_utimensat(pdu, &fidp->path, times);
2818 if (v9stat.n_gid != -1 || v9stat.n_uid != -1) {
2819 err = v9fs_co_chown(pdu, &fidp->path, v9stat.n_uid, v9stat.n_gid);
2824 if (v9stat.name.size != 0) {
2825 err = v9fs_complete_rename(pdu, fidp, -1, &v9stat.name);
2830 if (v9stat.length != -1) {
2831 err = v9fs_co_truncate(pdu, &fidp->path, v9stat.length);
2840 v9fs_stat_free(&v9stat);
2841 complete_pdu(s, pdu, err);
2844 static int v9fs_fill_statfs(V9fsState *s, V9fsPDU *pdu, struct statfs *stbuf)
2856 int32_t bsize_factor;
2859 * compute bsize factor based on host file system block size
2862 bsize_factor = (s->msize - P9_IOHDRSZ)/stbuf->f_bsize;
2863 if (!bsize_factor) {
2866 f_type = stbuf->f_type;
2867 f_bsize = stbuf->f_bsize;
2868 f_bsize *= bsize_factor;
2870 * f_bsize is adjusted(multiplied) by bsize factor, so we need to
2871 * adjust(divide) the number of blocks, free blocks and available
2872 * blocks by bsize factor
2874 f_blocks = stbuf->f_blocks/bsize_factor;
2875 f_bfree = stbuf->f_bfree/bsize_factor;
2876 f_bavail = stbuf->f_bavail/bsize_factor;
2877 f_files = stbuf->f_files;
2878 f_ffree = stbuf->f_ffree;
2879 fsid_val = (unsigned int) stbuf->f_fsid.__val[0] |
2880 (unsigned long long)stbuf->f_fsid.__val[1] << 32;
2881 f_namelen = stbuf->f_namelen;
2883 return pdu_marshal(pdu, offset, "ddqqqqqqd",
2884 f_type, f_bsize, f_blocks, f_bfree,
2885 f_bavail, f_files, f_ffree,
2886 fsid_val, f_namelen);
2889 static void v9fs_statfs(void *opaque)
2895 struct statfs stbuf;
2896 V9fsPDU *pdu = opaque;
2897 V9fsState *s = pdu->s;
2899 pdu_unmarshal(pdu, offset, "d", &fid);
2900 fidp = get_fid(pdu, fid);
2905 retval = v9fs_co_statfs(pdu, &fidp->path, &stbuf);
2910 retval += v9fs_fill_statfs(s, pdu, &stbuf);
2914 complete_pdu(s, pdu, retval);
2918 static void v9fs_mknod(void *opaque)
2931 V9fsPDU *pdu = opaque;
2932 V9fsState *s = pdu->s;
2934 pdu_unmarshal(pdu, offset, "dsdddd", &fid, &name, &mode,
2935 &major, &minor, &gid);
2936 trace_v9fs_mknod(pdu->tag, pdu->id, fid, mode, major, minor);
2938 fidp = get_fid(pdu, fid);
2943 err = v9fs_co_mknod(pdu, fidp, &name, fidp->uid, gid,
2944 makedev(major, minor), mode, &stbuf);
2948 stat_to_qid(&stbuf, &qid);
2950 err += pdu_marshal(pdu, offset, "Q", &qid);
2951 trace_v9fs_mknod_return(pdu->tag, pdu->id,
2952 qid.type, qid.version, qid.path);
2956 complete_pdu(s, pdu, err);
2957 v9fs_string_free(&name);
2961 * Implement posix byte range locking code
2962 * Server side handling of locking code is very simple, because 9p server in
2963 * QEMU can handle only one client. And most of the lock handling
2964 * (like conflict, merging) etc is done by the VFS layer itself, so no need to
2965 * do any thing in * qemu 9p server side lock code path.
2966 * So when a TLOCK request comes, always return success
2968 static void v9fs_lock(void *opaque)
2975 int32_t fid, err = 0;
2976 V9fsPDU *pdu = opaque;
2977 V9fsState *s = pdu->s;
2979 flock = g_malloc(sizeof(*flock));
2980 pdu_unmarshal(pdu, offset, "dbdqqds", &fid, &flock->type,
2981 &flock->flags, &flock->start, &flock->length,
2982 &flock->proc_id, &flock->client_id);
2984 trace_v9fs_lock(pdu->tag, pdu->id, fid,
2985 flock->type, flock->start, flock->length);
2987 status = P9_LOCK_ERROR;
2989 /* We support only block flag now (that too ignored currently) */
2990 if (flock->flags & ~P9_LOCK_FLAGS_BLOCK) {
2994 fidp = get_fid(pdu, fid);
2999 err = v9fs_co_fstat(pdu, fidp->fs.fd, &stbuf);
3003 status = P9_LOCK_SUCCESS;
3008 err += pdu_marshal(pdu, offset, "b", status);
3009 trace_v9fs_lock_return(pdu->tag, pdu->id, status);
3010 complete_pdu(s, pdu, err);
3011 v9fs_string_free(&flock->client_id);
3016 * When a TGETLOCK request comes, always return success because all lock
3017 * handling is done by client's VFS layer.
3019 static void v9fs_getlock(void *opaque)
3025 int32_t fid, err = 0;
3026 V9fsPDU *pdu = opaque;
3027 V9fsState *s = pdu->s;
3029 glock = g_malloc(sizeof(*glock));
3030 pdu_unmarshal(pdu, offset, "dbqqds", &fid, &glock->type,
3031 &glock->start, &glock->length, &glock->proc_id,
3034 trace_v9fs_getlock(pdu->tag, pdu->id, fid,
3035 glock->type, glock->start, glock->length);
3037 fidp = get_fid(pdu, fid);
3042 err = v9fs_co_fstat(pdu, fidp->fs.fd, &stbuf);
3046 glock->type = P9_LOCK_TYPE_UNLCK;
3047 offset += pdu_marshal(pdu, offset, "bqqds", glock->type,
3048 glock->start, glock->length, glock->proc_id,
3051 trace_v9fs_getlock_return(pdu->tag, pdu->id, glock->type, glock->start,
3052 glock->length, glock->proc_id);
3056 complete_pdu(s, pdu, err);
3057 v9fs_string_free(&glock->client_id);
3061 static void v9fs_mkdir(void *opaque)
3063 V9fsPDU *pdu = opaque;
3074 pdu_unmarshal(pdu, offset, "dsdd", &fid, &name, &mode, &gid);
3076 trace_v9fs_mkdir(pdu->tag, pdu->id, fid, name.data, mode, gid);
3078 fidp = get_fid(pdu, fid);
3083 err = v9fs_co_mkdir(pdu, fidp, &name, mode, fidp->uid, gid, &stbuf);
3087 stat_to_qid(&stbuf, &qid);
3088 offset += pdu_marshal(pdu, offset, "Q", &qid);
3090 trace_v9fs_mkdir_return(pdu->tag, pdu->id,
3091 qid.type, qid.version, qid.path, err);
3095 complete_pdu(pdu->s, pdu, err);
3096 v9fs_string_free(&name);
3099 static void v9fs_xattrwalk(void *opaque)
3105 int32_t fid, newfid;
3106 V9fsFidState *file_fidp;
3107 V9fsFidState *xattr_fidp = NULL;
3108 V9fsPDU *pdu = opaque;
3109 V9fsState *s = pdu->s;
3111 pdu_unmarshal(pdu, offset, "dds", &fid, &newfid, &name);
3112 trace_v9fs_xattrwalk(pdu->tag, pdu->id, fid, newfid, name.data);
3114 file_fidp = get_fid(pdu, fid);
3115 if (file_fidp == NULL) {
3119 xattr_fidp = alloc_fid(s, newfid);
3120 if (xattr_fidp == NULL) {
3124 v9fs_path_copy(&xattr_fidp->path, &file_fidp->path);
3125 if (name.data[0] == 0) {
3127 * listxattr request. Get the size first
3129 size = v9fs_co_llistxattr(pdu, &xattr_fidp->path, NULL, 0);
3132 clunk_fid(s, xattr_fidp->fid);
3136 * Read the xattr value
3138 xattr_fidp->fs.xattr.len = size;
3139 xattr_fidp->fid_type = P9_FID_XATTR;
3140 xattr_fidp->fs.xattr.copied_len = -1;
3142 xattr_fidp->fs.xattr.value = g_malloc(size);
3143 err = v9fs_co_llistxattr(pdu, &xattr_fidp->path,
3144 xattr_fidp->fs.xattr.value,
3145 xattr_fidp->fs.xattr.len);
3147 clunk_fid(s, xattr_fidp->fid);
3151 offset += pdu_marshal(pdu, offset, "q", size);
3155 * specific xattr fid. We check for xattr
3156 * presence also collect the xattr size
3158 size = v9fs_co_lgetxattr(pdu, &xattr_fidp->path,
3162 clunk_fid(s, xattr_fidp->fid);
3166 * Read the xattr value
3168 xattr_fidp->fs.xattr.len = size;
3169 xattr_fidp->fid_type = P9_FID_XATTR;
3170 xattr_fidp->fs.xattr.copied_len = -1;
3172 xattr_fidp->fs.xattr.value = g_malloc(size);
3173 err = v9fs_co_lgetxattr(pdu, &xattr_fidp->path,
3174 &name, xattr_fidp->fs.xattr.value,
3175 xattr_fidp->fs.xattr.len);
3177 clunk_fid(s, xattr_fidp->fid);
3181 offset += pdu_marshal(pdu, offset, "q", size);
3184 trace_v9fs_xattrwalk_return(pdu->tag, pdu->id, size);
3186 put_fid(pdu, file_fidp);
3188 put_fid(pdu, xattr_fidp);
3191 complete_pdu(s, pdu, err);
3192 v9fs_string_free(&name);
3195 static void v9fs_xattrcreate(void *opaque)
3203 V9fsFidState *file_fidp;
3204 V9fsFidState *xattr_fidp;
3205 V9fsPDU *pdu = opaque;
3206 V9fsState *s = pdu->s;
3208 pdu_unmarshal(pdu, offset, "dsqd",
3209 &fid, &name, &size, &flags);
3210 trace_v9fs_xattrcreate(pdu->tag, pdu->id, fid, name.data, size, flags);
3212 file_fidp = get_fid(pdu, fid);
3213 if (file_fidp == NULL) {
3217 /* Make the file fid point to xattr */
3218 xattr_fidp = file_fidp;
3219 xattr_fidp->fid_type = P9_FID_XATTR;
3220 xattr_fidp->fs.xattr.copied_len = 0;
3221 xattr_fidp->fs.xattr.len = size;
3222 xattr_fidp->fs.xattr.flags = flags;
3223 v9fs_string_init(&xattr_fidp->fs.xattr.name);
3224 v9fs_string_copy(&xattr_fidp->fs.xattr.name, &name);
3226 xattr_fidp->fs.xattr.value = g_malloc(size);
3228 xattr_fidp->fs.xattr.value = NULL;
3231 put_fid(pdu, file_fidp);
3233 complete_pdu(s, pdu, err);
3234 v9fs_string_free(&name);
3237 static void v9fs_readlink(void *opaque)
3239 V9fsPDU *pdu = opaque;
3246 pdu_unmarshal(pdu, offset, "d", &fid);
3247 trace_v9fs_readlink(pdu->tag, pdu->id, fid);
3248 fidp = get_fid(pdu, fid);
3254 v9fs_string_init(&target);
3255 err = v9fs_co_readlink(pdu, &fidp->path, &target);
3259 offset += pdu_marshal(pdu, offset, "s", &target);
3261 trace_v9fs_readlink_return(pdu->tag, pdu->id, target.data);
3262 v9fs_string_free(&target);
3266 complete_pdu(pdu->s, pdu, err);
3269 static CoroutineEntry *pdu_co_handlers[] = {
3270 [P9_TREADDIR] = v9fs_readdir,
3271 [P9_TSTATFS] = v9fs_statfs,
3272 [P9_TGETATTR] = v9fs_getattr,
3273 [P9_TSETATTR] = v9fs_setattr,
3274 [P9_TXATTRWALK] = v9fs_xattrwalk,
3275 [P9_TXATTRCREATE] = v9fs_xattrcreate,
3276 [P9_TMKNOD] = v9fs_mknod,
3277 [P9_TRENAME] = v9fs_rename,
3278 [P9_TLOCK] = v9fs_lock,
3279 [P9_TGETLOCK] = v9fs_getlock,
3280 [P9_TRENAMEAT] = v9fs_renameat,
3281 [P9_TREADLINK] = v9fs_readlink,
3282 [P9_TUNLINKAT] = v9fs_unlinkat,
3283 [P9_TMKDIR] = v9fs_mkdir,
3284 [P9_TVERSION] = v9fs_version,
3285 [P9_TLOPEN] = v9fs_open,
3286 [P9_TATTACH] = v9fs_attach,
3287 [P9_TSTAT] = v9fs_stat,
3288 [P9_TWALK] = v9fs_walk,
3289 [P9_TCLUNK] = v9fs_clunk,
3290 [P9_TFSYNC] = v9fs_fsync,
3291 [P9_TOPEN] = v9fs_open,
3292 [P9_TREAD] = v9fs_read,
3294 [P9_TAUTH] = v9fs_auth,
3296 [P9_TFLUSH] = v9fs_flush,
3297 [P9_TLINK] = v9fs_link,
3298 [P9_TSYMLINK] = v9fs_symlink,
3299 [P9_TCREATE] = v9fs_create,
3300 [P9_TLCREATE] = v9fs_lcreate,
3301 [P9_TWRITE] = v9fs_write,
3302 [P9_TWSTAT] = v9fs_wstat,
3303 [P9_TREMOVE] = v9fs_remove,
3306 static void v9fs_op_not_supp(void *opaque)
3308 V9fsPDU *pdu = opaque;
3309 complete_pdu(pdu->s, pdu, -EOPNOTSUPP);
3312 static void submit_pdu(V9fsState *s, V9fsPDU *pdu)
3315 CoroutineEntry *handler;
3317 if (pdu->id >= ARRAY_SIZE(pdu_co_handlers) ||
3318 (pdu_co_handlers[pdu->id] == NULL)) {
3319 handler = v9fs_op_not_supp;
3321 handler = pdu_co_handlers[pdu->id];
3323 co = qemu_coroutine_create(handler);
3324 qemu_coroutine_enter(co, pdu);
3327 void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq)
3329 V9fsState *s = (V9fsState *)vdev;
3333 while ((pdu = alloc_pdu(s)) &&
3334 (len = virtqueue_pop(vq, &pdu->elem)) != 0) {
3337 BUG_ON(pdu->elem.out_num == 0 || pdu->elem.in_num == 0);
3338 BUG_ON(pdu->elem.out_sg[0].iov_len < 7);
3340 ptr = pdu->elem.out_sg[0].iov_base;
3342 memcpy(&pdu->size, ptr, 4);
3344 memcpy(&pdu->tag, ptr + 5, 2);
3345 qemu_co_queue_init(&pdu->complete);
3351 void virtio_9p_set_fd_limit(void)
3354 if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) {
3355 fprintf(stderr, "Failed to get the resource limit\n");
3358 open_fd_hw = rlim.rlim_cur - MIN(400, rlim.rlim_cur/3);
3359 open_fd_rc = rlim.rlim_cur/2;