1 // SPDX-License-Identifier: GPL-2.0-only
3 * This file contians vfs address (mmap) ops for 9P2000.
9 #include <linux/module.h>
10 #include <linux/errno.h>
12 #include <linux/file.h>
13 #include <linux/stat.h>
14 #include <linux/string.h>
15 #include <linux/inet.h>
16 #include <linux/pagemap.h>
17 #include <linux/idr.h>
18 #include <linux/sched.h>
19 #include <linux/uio.h>
20 #include <linux/netfs.h>
21 #include <net/9p/9p.h>
22 #include <net/9p/client.h>
30 * v9fs_req_issue_op - Issue a read from 9P
31 * @subreq: The read to make
33 static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq)
35 struct netfs_read_request *rreq = subreq->rreq;
36 struct p9_fid *fid = rreq->netfs_priv;
38 loff_t pos = subreq->start + subreq->transferred;
39 size_t len = subreq->len - subreq->transferred;
42 iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len);
44 total = p9_client_read(fid, pos, &to, &err);
45 netfs_subreq_terminated(subreq, err ?: total, false);
49 * v9fs_init_rreq - Initialise a read request
50 * @rreq: The read request
51 * @file: The file being read from
53 static void v9fs_init_rreq(struct netfs_read_request *rreq, struct file *file)
55 struct p9_fid *fid = file->private_data;
57 refcount_inc(&fid->count);
58 rreq->netfs_priv = fid;
62 * v9fs_req_cleanup - Cleanup request initialized by v9fs_init_rreq
63 * @mapping: unused mapping of request to cleanup
64 * @priv: private data to cleanup, a fid, guaranted non-null.
66 static void v9fs_req_cleanup(struct address_space *mapping, void *priv)
68 struct p9_fid *fid = priv;
74 * v9fs_is_cache_enabled - Determine if caching is enabled for an inode
75 * @inode: The inode to check
77 static bool v9fs_is_cache_enabled(struct inode *inode)
79 struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(inode));
81 return fscache_cookie_enabled(cookie) && !hlist_empty(&cookie->backing_objects);
85 * v9fs_begin_cache_operation - Begin a cache operation for a read
86 * @rreq: The read request
88 static int v9fs_begin_cache_operation(struct netfs_read_request *rreq)
90 struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
92 return fscache_begin_read_operation(rreq, cookie);
95 static const struct netfs_read_request_ops v9fs_req_ops = {
96 .init_rreq = v9fs_init_rreq,
97 .is_cache_enabled = v9fs_is_cache_enabled,
98 .begin_cache_operation = v9fs_begin_cache_operation,
99 .issue_op = v9fs_req_issue_op,
100 .cleanup = v9fs_req_cleanup,
104 * v9fs_vfs_readpage - read an entire page in from 9P
105 * @file: file being read
106 * @page: structure to page
109 static int v9fs_vfs_readpage(struct file *file, struct page *page)
111 struct folio *folio = page_folio(page);
113 return netfs_readpage(file, folio, &v9fs_req_ops, NULL);
117 * v9fs_vfs_readahead - read a set of pages from 9P
118 * @ractl: The readahead parameters
120 static void v9fs_vfs_readahead(struct readahead_control *ractl)
122 netfs_readahead(ractl, &v9fs_req_ops, NULL);
126 * v9fs_release_page - release the private state associated with a page
127 * @page: The page to be released
128 * @gfp: The caller's allocation restrictions
130 * Returns 1 if the page can be released, false otherwise.
133 static int v9fs_release_page(struct page *page, gfp_t gfp)
135 struct folio *folio = page_folio(page);
137 if (folio_test_private(folio))
139 #ifdef CONFIG_9P_FSCACHE
140 if (folio_test_fscache(folio)) {
141 if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS))
143 folio_wait_fscache(folio);
150 * v9fs_invalidate_page - Invalidate a page completely or partially
151 * @page: The page to be invalidated
152 * @offset: offset of the invalidated region
153 * @length: length of the invalidated region
156 static void v9fs_invalidate_page(struct page *page, unsigned int offset,
159 struct folio *folio = page_folio(page);
161 folio_wait_fscache(folio);
164 static int v9fs_vfs_write_folio_locked(struct folio *folio)
166 struct inode *inode = folio_inode(folio);
167 struct v9fs_inode *v9inode = V9FS_I(inode);
168 loff_t start = folio_pos(folio);
169 loff_t i_size = i_size_read(inode);
170 struct iov_iter from;
171 size_t len = folio_size(folio);
175 return 0; /* Simultaneous truncation occurred */
177 len = min_t(loff_t, i_size - start, len);
179 iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len);
181 /* We should have writeback_fid always set */
182 BUG_ON(!v9inode->writeback_fid);
184 folio_start_writeback(folio);
186 p9_client_write(v9inode->writeback_fid, start, &from, &err);
188 folio_end_writeback(folio);
192 static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
194 struct folio *folio = page_folio(page);
197 p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
199 retval = v9fs_vfs_write_folio_locked(folio);
201 if (retval == -EAGAIN) {
202 folio_redirty_for_writepage(wbc, folio);
205 mapping_set_error(folio_mapping(folio), retval);
215 * v9fs_launder_page - Writeback a dirty page
216 * @page: The page to be cleaned up
218 * Returns 0 on success.
221 static int v9fs_launder_page(struct page *page)
223 struct folio *folio = page_folio(page);
226 if (folio_clear_dirty_for_io(folio)) {
227 retval = v9fs_vfs_write_folio_locked(folio);
231 folio_wait_fscache(folio);
236 * v9fs_direct_IO - 9P address space operation for direct I/O
237 * @iocb: target I/O control block
238 * @iter: The data/buffer to use
240 * The presence of v9fs_direct_IO() in the address space ops vector
241 * allowes open() O_DIRECT flags which would have failed otherwise.
243 * In the non-cached mode, we shunt off direct read and write requests before
244 * the VFS gets them, so this method should never be called.
246 * Direct IO is not 'yet' supported in the cached mode. Hence when
247 * this routine is called through generic_file_aio_read(), the read/write fails
252 v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
254 struct file *file = iocb->ki_filp;
255 loff_t pos = iocb->ki_pos;
259 if (iov_iter_rw(iter) == WRITE) {
260 n = p9_client_write(file->private_data, pos, iter, &err);
262 struct inode *inode = file_inode(file);
263 loff_t i_size = i_size_read(inode);
265 if (pos + n > i_size)
266 inode_add_bytes(inode, pos + n - i_size);
269 n = p9_client_read(file->private_data, pos, iter, &err);
274 static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
275 loff_t pos, unsigned int len, unsigned int flags,
276 struct page **subpagep, void **fsdata)
280 struct v9fs_inode *v9inode = V9FS_I(mapping->host);
282 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
284 BUG_ON(!v9inode->writeback_fid);
286 /* Prefetch area to be written into the cache if we're caching this
287 * file. We need to do this before we get a lock on the page in case
288 * there's more than one writer competing for the same cache block.
290 retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata,
291 &v9fs_req_ops, NULL);
295 *subpagep = &folio->page;
299 static int v9fs_write_end(struct file *filp, struct address_space *mapping,
300 loff_t pos, unsigned int len, unsigned int copied,
301 struct page *subpage, void *fsdata)
303 loff_t last_pos = pos + copied;
304 struct folio *folio = page_folio(subpage);
305 struct inode *inode = mapping->host;
307 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
309 if (!folio_test_uptodate(folio)) {
310 if (unlikely(copied < len)) {
315 folio_mark_uptodate(folio);
319 * No need to use i_size_read() here, the i_size
320 * cannot change under us because we hold the i_mutex.
322 if (last_pos > inode->i_size) {
323 inode_add_bytes(inode, last_pos - inode->i_size);
324 i_size_write(inode, last_pos);
326 folio_mark_dirty(folio);
335 const struct address_space_operations v9fs_addr_operations = {
336 .readpage = v9fs_vfs_readpage,
337 .readahead = v9fs_vfs_readahead,
338 .set_page_dirty = __set_page_dirty_nobuffers,
339 .writepage = v9fs_vfs_writepage,
340 .write_begin = v9fs_write_begin,
341 .write_end = v9fs_write_end,
342 .releasepage = v9fs_release_page,
343 .invalidatepage = v9fs_invalidate_page,
344 .launder_page = v9fs_launder_page,
345 .direct_IO = v9fs_direct_IO,