1 // SPDX-License-Identifier: GPL-2.0-only
3 * This file contians vfs address (mmap) ops for 9P2000.
9 #include <linux/module.h>
10 #include <linux/errno.h>
12 #include <linux/file.h>
13 #include <linux/stat.h>
14 #include <linux/string.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/swap.h>
18 #include <linux/uio.h>
19 #include <linux/netfs.h>
20 #include <net/9p/9p.h>
21 #include <net/9p/client.h>
22 #include <trace/events/netfs.h>
30 * Writeback calls this when it finds a folio that needs uploading. This isn't
31 * called if writeback only has copy-to-cache to deal with.
33 static void v9fs_begin_writeback(struct netfs_io_request *wreq)
37 fid = v9fs_fid_find_inode(wreq->inode, true, INVALID_UID, true);
39 WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n",
44 wreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
46 wreq->wsize = min(wreq->wsize, fid->iounit);
47 wreq->netfs_priv = fid;
48 wreq->io_streams[0].avail = true;
52 * Issue a subrequest to write to the server.
54 static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
56 struct p9_fid *fid = subreq->rreq->netfs_priv;
59 len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
61 __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
62 netfs_write_subrequest_terminated(subreq, len ?: err, false);
66 * v9fs_issue_read - Issue a read from 9P
67 * @subreq: The read to make
69 static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
71 struct netfs_io_request *rreq = subreq->rreq;
72 struct p9_fid *fid = rreq->netfs_priv;
73 unsigned long long pos = subreq->start + subreq->transferred;
76 total = p9_client_read(fid, pos, &subreq->io_iter, &err);
78 /* if we just extended the file size, any portion not in
79 * cache won't be on server and is zeroes */
80 if (subreq->rreq->origin != NETFS_DIO_READ)
81 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
82 if (pos + total >= i_size_read(rreq->inode))
83 __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
86 subreq->transferred += total;
87 __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
90 netfs_read_subreq_terminated(subreq, err, false);
94 * v9fs_init_request - Initialise a request
95 * @rreq: The read request
96 * @file: The file being read from
98 static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
101 bool writing = (rreq->origin == NETFS_READ_FOR_WRITE ||
102 rreq->origin == NETFS_WRITETHROUGH ||
103 rreq->origin == NETFS_UNBUFFERED_WRITE ||
104 rreq->origin == NETFS_DIO_WRITE);
106 if (rreq->origin == NETFS_WRITEBACK)
107 return 0; /* We don't get the write handle until we find we
108 * have actually dirty data and not just
109 * copy-to-cache data.
113 fid = file->private_data;
118 fid = v9fs_fid_find_inode(rreq->inode, writing, INVALID_UID, true);
123 rreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
125 rreq->wsize = min(rreq->wsize, fid->iounit);
127 /* we might need to read from a fid that was opened write-only
128 * for read-modify-write of page cache, use the writeback fid
130 WARN_ON(rreq->origin == NETFS_READ_FOR_WRITE && !(fid->mode & P9_ORDWR));
131 rreq->netfs_priv = fid;
135 WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n",
141 * v9fs_free_request - Cleanup request initialized by v9fs_init_rreq
142 * @rreq: The I/O request to clean up
144 static void v9fs_free_request(struct netfs_io_request *rreq)
146 struct p9_fid *fid = rreq->netfs_priv;
151 const struct netfs_request_ops v9fs_req_ops = {
152 .init_request = v9fs_init_request,
153 .free_request = v9fs_free_request,
154 .issue_read = v9fs_issue_read,
155 .begin_writeback = v9fs_begin_writeback,
156 .issue_write = v9fs_issue_write,
159 const struct address_space_operations v9fs_addr_operations = {
160 .read_folio = netfs_read_folio,
161 .readahead = netfs_readahead,
162 .dirty_folio = netfs_dirty_folio,
163 .release_folio = netfs_release_folio,
164 .invalidate_folio = netfs_invalidate_folio,
165 .direct_IO = noop_direct_IO,
166 .writepages = netfs_writepages,