1 // SPDX-License-Identifier: GPL-2.0-only
7 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
11 #include <linux/time.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/fcntl.h>
15 #include <linux/stat.h>
17 #include <linux/slab.h>
18 #include <linux/task_io_accounting_ops.h>
19 #include <linux/pagemap.h>
20 #include <linux/sunrpc/clnt.h>
21 #include <linux/nfs_fs.h>
22 #include <linux/nfs_page.h>
23 #include <linux/module.h>
31 #include "delegation.h"
33 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
35 const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
36 static const struct nfs_rw_ops nfs_rw_read_ops;
38 static struct kmem_cache *nfs_rdata_cachep;
40 static struct nfs_pgio_header *nfs_readhdr_alloc(void)
42 struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
45 p->rw_mode = FMODE_READ;
49 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
51 if (rhdr->res.scratch != NULL)
52 kfree(rhdr->res.scratch);
53 kmem_cache_free(nfs_rdata_cachep, rhdr);
56 static int nfs_return_empty_folio(struct folio *folio)
58 folio_zero_segment(folio, 0, folio_size(folio));
59 folio_mark_uptodate(folio);
64 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
65 struct inode *inode, bool force_mds,
66 const struct nfs_pgio_completion_ops *compl_ops)
68 struct nfs_server *server = NFS_SERVER(inode);
69 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
71 #ifdef CONFIG_NFS_V4_1
72 if (server->pnfs_curr_ld && !force_mds)
73 pg_ops = server->pnfs_curr_ld->pg_read_ops;
75 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
78 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
80 void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
82 struct nfs_pgio_mirror *pgm;
85 nfs_pageio_complete(pgio);
87 /* It doesn't make sense to do mirrored reads! */
88 WARN_ON_ONCE(pgio->pg_mirror_count != 1);
90 pgm = &pgio->pg_mirrors[0];
91 NFS_I(pgio->pg_inode)->read_io += pgm->pg_bytes_written;
92 npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
93 nfs_add_stats(pgio->pg_inode, NFSIOS_READPAGES, npages);
97 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
99 struct nfs_pgio_mirror *mirror;
101 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
102 pgio->pg_ops->pg_cleanup(pgio);
104 pgio->pg_ops = &nfs_pgio_rw_ops;
106 /* read path should never have more than one mirror */
107 WARN_ON_ONCE(pgio->pg_mirror_count != 1);
109 mirror = &pgio->pg_mirrors[0];
110 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
112 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
114 bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size)
116 WARN_ON(hdr->res.scratch != NULL);
117 hdr->res.scratch = kmalloc(size, GFP_KERNEL);
118 return hdr->res.scratch != NULL;
120 EXPORT_SYMBOL_GPL(nfs_read_alloc_scratch);
122 static void nfs_readpage_release(struct nfs_page *req, int error)
124 struct folio *folio = nfs_page_to_folio(req);
126 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE))
127 if (nfs_netfs_folio_unlock(folio))
130 nfs_release_request(req);
133 static void nfs_page_group_set_uptodate(struct nfs_page *req)
135 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
136 folio_mark_uptodate(nfs_page_to_folio(req));
139 static void nfs_read_completion(struct nfs_pgio_header *hdr)
141 unsigned long bytes = 0;
144 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
146 while (!list_empty(&hdr->pages)) {
147 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
148 struct folio *folio = nfs_page_to_folio(req);
149 unsigned long start = req->wb_pgbase;
150 unsigned long end = req->wb_pgbase + req->wb_bytes;
152 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
153 /* note: regions of the page not covered by a
154 * request are zeroed in nfs_read_add_folio
156 if (bytes > hdr->good_bytes) {
157 /* nothing in this request was good, so zero
158 * the full extent of the request */
159 folio_zero_segment(folio, start, end);
161 } else if (hdr->good_bytes - bytes < req->wb_bytes) {
162 /* part of this request has good bytes, but
163 * not all. zero the bad bytes */
164 start += hdr->good_bytes - bytes;
165 WARN_ON(start < req->wb_pgbase);
166 folio_zero_segment(folio, start, end);
170 bytes += req->wb_bytes;
171 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
172 if (bytes <= hdr->good_bytes)
173 nfs_page_group_set_uptodate(req);
176 xchg(&nfs_req_openctx(req)->error, error);
179 nfs_page_group_set_uptodate(req);
180 nfs_list_remove_request(req);
181 nfs_readpage_release(req, error);
183 nfs_netfs_read_completion(hdr);
189 static void nfs_initiate_read(struct nfs_pgio_header *hdr,
190 struct rpc_message *msg,
191 const struct nfs_rpc_ops *rpc_ops,
192 struct rpc_task_setup *task_setup_data, int how)
194 rpc_ops->read_setup(hdr, msg);
195 nfs_netfs_initiate_read(hdr);
196 trace_nfs_initiate_read(hdr);
200 nfs_async_read_error(struct list_head *head, int error)
202 struct nfs_page *req;
204 while (!list_empty(head)) {
205 req = nfs_list_entry(head->next);
206 nfs_list_remove_request(req);
207 nfs_readpage_release(req, error);
211 const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
212 .error_cleanup = nfs_async_read_error,
213 .completion = nfs_read_completion,
217 * This is the callback from RPC telling us whether a reply was
218 * received or some error occurred (timeout or socket shutdown).
220 static int nfs_readpage_done(struct rpc_task *task,
221 struct nfs_pgio_header *hdr,
224 int status = NFS_PROTO(inode)->read_done(task, hdr);
228 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
229 trace_nfs_readpage_done(task, hdr);
231 if (task->tk_status == -ESTALE) {
232 nfs_set_inode_stale(inode);
233 nfs_mark_for_revalidate(inode);
238 static void nfs_readpage_retry(struct rpc_task *task,
239 struct nfs_pgio_header *hdr)
241 struct nfs_pgio_args *argp = &hdr->args;
242 struct nfs_pgio_res *resp = &hdr->res;
244 /* This is a short read! */
245 nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
246 trace_nfs_readpage_short(task, hdr);
248 /* Has the server at least made some progress? */
249 if (resp->count == 0) {
250 nfs_set_pgio_error(hdr, -EIO, argp->offset);
254 /* For non rpc-based layout drivers, retry-through-MDS */
256 hdr->pnfs_error = -EAGAIN;
260 /* Yes, so retry the read at the end of the hdr */
261 hdr->mds_offset += resp->count;
262 argp->offset += resp->count;
263 argp->pgbase += resp->count;
264 argp->count -= resp->count;
267 rpc_restart_call_prepare(task);
270 static void nfs_readpage_result(struct rpc_task *task,
271 struct nfs_pgio_header *hdr)
274 loff_t pos = hdr->args.offset + hdr->res.count;
275 unsigned int new = pos - hdr->io_start;
277 if (hdr->good_bytes > new) {
278 hdr->good_bytes = new;
279 set_bit(NFS_IOHDR_EOF, &hdr->flags);
280 clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
282 } else if (hdr->res.count < hdr->args.count)
283 nfs_readpage_retry(task, hdr);
286 int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
287 struct nfs_open_context *ctx,
290 struct inode *inode = folio->mapping->host;
291 struct nfs_server *server = NFS_SERVER(inode);
292 size_t fsize = folio_size(folio);
293 unsigned int rsize = server->rsize;
294 struct nfs_page *new;
295 unsigned int len, aligned_len;
298 len = nfs_folio_length(folio);
300 return nfs_return_empty_folio(folio);
302 aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize);
304 new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len);
306 error = PTR_ERR(new);
307 if (nfs_netfs_folio_unlock(folio))
313 folio_zero_segment(folio, len, fsize);
314 if (!nfs_pageio_add_request(pgio, new)) {
315 nfs_list_remove_request(new);
316 error = pgio->pg_error;
317 nfs_readpage_release(new, error);
326 * Actually read a folio over the wire.
328 static int nfs_do_read_folio(struct file *file, struct folio *folio)
330 struct inode *inode = file_inode(file);
331 struct nfs_pageio_descriptor pgio;
332 struct nfs_open_context *ctx;
335 ctx = get_nfs_open_context(nfs_file_open_context(file));
337 xchg(&ctx->error, 0);
338 nfs_pageio_init_read(&pgio, inode, false,
339 &nfs_async_read_completion_ops);
341 ret = nfs_read_add_folio(&pgio, ctx, folio);
345 nfs_pageio_complete_read(&pgio);
346 nfs_update_delegated_atime(inode);
347 if (pgio.pg_error < 0) {
352 ret = folio_wait_locked_killable(folio);
353 if (!folio_test_uptodate(folio) && !ret)
354 ret = xchg(&ctx->error, 0);
357 put_nfs_open_context(ctx);
362 * Synchronously read a folio.
364 * This is not heavily used as most users to try an asynchronous
365 * large read through ->readahead first.
367 int nfs_read_folio(struct file *file, struct folio *folio)
369 struct inode *inode = file_inode(file);
370 loff_t pos = folio_pos(folio);
371 size_t len = folio_size(folio);
374 trace_nfs_aop_readpage(inode, pos, len);
375 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
376 task_io_account_read(len);
379 * Try to flush any pending writes to the file..
381 * NOTE! Because we own the folio lock, there cannot
382 * be any new pending writes generated at this point
383 * for this folio (other folios can be written to).
385 ret = nfs_wb_folio(inode, folio);
388 if (folio_test_uptodate(folio))
392 if (NFS_STALE(inode))
395 ret = nfs_netfs_read_folio(file, folio);
397 ret = nfs_do_read_folio(file, folio);
399 trace_nfs_aop_readpage_done(inode, pos, len, ret);
406 void nfs_readahead(struct readahead_control *ractl)
408 struct nfs_pageio_descriptor pgio;
409 struct nfs_open_context *ctx;
410 unsigned int nr_pages = readahead_count(ractl);
411 struct file *file = ractl->file;
412 struct inode *inode = ractl->mapping->host;
416 trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
417 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
418 task_io_account_read(readahead_length(ractl));
421 if (NFS_STALE(inode))
424 ret = nfs_netfs_readahead(ractl);
430 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
434 ctx = get_nfs_open_context(nfs_file_open_context(file));
436 nfs_pageio_init_read(&pgio, inode, false,
437 &nfs_async_read_completion_ops);
439 while ((folio = readahead_folio(ractl)) != NULL) {
440 ret = nfs_read_add_folio(&pgio, ctx, folio);
445 nfs_pageio_complete_read(&pgio);
446 nfs_update_delegated_atime(inode);
448 put_nfs_open_context(ctx);
450 trace_nfs_aop_readahead_done(inode, nr_pages, ret);
453 int __init nfs_init_readpagecache(void)
455 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
456 sizeof(struct nfs_pgio_header),
457 0, SLAB_HWCACHE_ALIGN,
459 if (nfs_rdata_cachep == NULL)
465 void nfs_destroy_readpagecache(void)
467 kmem_cache_destroy(nfs_rdata_cachep);
470 static const struct nfs_rw_ops nfs_rw_read_ops = {
471 .rw_alloc_header = nfs_readhdr_alloc,
472 .rw_free_header = nfs_readhdr_free,
473 .rw_done = nfs_readpage_done,
474 .rw_result = nfs_readpage_result,
475 .rw_initiate = nfs_initiate_read,