1 // SPDX-License-Identifier: GPL-2.0-only
7 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
11 #include <linux/time.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/fcntl.h>
15 #include <linux/stat.h>
17 #include <linux/slab.h>
18 #include <linux/pagemap.h>
19 #include <linux/sunrpc/clnt.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_page.h>
22 #include <linux/module.h>
31 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
33 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
34 static const struct nfs_rw_ops nfs_rw_read_ops;
36 static struct kmem_cache *nfs_rdata_cachep;
38 static struct nfs_pgio_header *nfs_readhdr_alloc(void)
40 struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
43 p->rw_mode = FMODE_READ;
47 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
49 kmem_cache_free(nfs_rdata_cachep, rhdr);
53 int nfs_return_empty_page(struct page *page)
55 zero_user(page, 0, PAGE_SIZE);
56 SetPageUptodate(page);
61 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
62 struct inode *inode, bool force_mds,
63 const struct nfs_pgio_completion_ops *compl_ops)
65 struct nfs_server *server = NFS_SERVER(inode);
66 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
68 #ifdef CONFIG_NFS_V4_1
69 if (server->pnfs_curr_ld && !force_mds)
70 pg_ops = server->pnfs_curr_ld->pg_read_ops;
72 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
75 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
77 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
79 struct nfs_pgio_mirror *mirror;
81 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
82 pgio->pg_ops->pg_cleanup(pgio);
84 pgio->pg_ops = &nfs_pgio_rw_ops;
86 /* read path should never have more than one mirror */
87 WARN_ON_ONCE(pgio->pg_mirror_count != 1);
89 mirror = &pgio->pg_mirrors[0];
90 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
92 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
94 static void nfs_readpage_release(struct nfs_page *req)
96 struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
98 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
99 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
100 (long long)req_offset(req));
102 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
103 if (PageUptodate(req->wb_page))
104 nfs_readpage_to_fscache(inode, req->wb_page, 0);
106 unlock_page(req->wb_page);
108 nfs_release_request(req);
111 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
114 struct nfs_page *new;
116 struct nfs_pageio_descriptor pgio;
117 struct nfs_pgio_mirror *pgm;
119 len = nfs_page_length(page);
121 return nfs_return_empty_page(page);
122 new = nfs_create_request(ctx, page, 0, len);
128 zero_user_segment(page, len, PAGE_SIZE);
130 nfs_pageio_init_read(&pgio, inode, false,
131 &nfs_async_read_completion_ops);
132 if (!nfs_pageio_add_request(&pgio, new)) {
133 nfs_list_remove_request(new);
134 nfs_readpage_release(new);
136 nfs_pageio_complete(&pgio);
138 /* It doesn't make sense to do mirrored reads! */
139 WARN_ON_ONCE(pgio.pg_mirror_count != 1);
141 pgm = &pgio.pg_mirrors[0];
142 NFS_I(inode)->read_io += pgm->pg_bytes_written;
144 return pgio.pg_error < 0 ? pgio.pg_error : 0;
147 static void nfs_page_group_set_uptodate(struct nfs_page *req)
149 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
150 SetPageUptodate(req->wb_page);
153 static void nfs_read_completion(struct nfs_pgio_header *hdr)
155 unsigned long bytes = 0;
157 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
159 while (!list_empty(&hdr->pages)) {
160 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
161 struct page *page = req->wb_page;
162 unsigned long start = req->wb_pgbase;
163 unsigned long end = req->wb_pgbase + req->wb_bytes;
165 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
166 /* note: regions of the page not covered by a
167 * request are zeroed in nfs_readpage_async /
168 * readpage_async_filler */
169 if (bytes > hdr->good_bytes) {
170 /* nothing in this request was good, so zero
171 * the full extent of the request */
172 zero_user_segment(page, start, end);
174 } else if (hdr->good_bytes - bytes < req->wb_bytes) {
175 /* part of this request has good bytes, but
176 * not all. zero the bad bytes */
177 start += hdr->good_bytes - bytes;
178 WARN_ON(start < req->wb_pgbase);
179 zero_user_segment(page, start, end);
182 bytes += req->wb_bytes;
183 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
184 if (bytes <= hdr->good_bytes)
185 nfs_page_group_set_uptodate(req);
187 nfs_page_group_set_uptodate(req);
188 nfs_list_remove_request(req);
189 nfs_readpage_release(req);
195 static void nfs_initiate_read(struct nfs_pgio_header *hdr,
196 struct rpc_message *msg,
197 const struct nfs_rpc_ops *rpc_ops,
198 struct rpc_task_setup *task_setup_data, int how)
200 struct inode *inode = hdr->inode;
201 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
203 task_setup_data->flags |= swap_flags;
204 rpc_ops->read_setup(hdr, msg);
205 trace_nfs_initiate_read(inode, hdr->io_start, hdr->good_bytes);
209 nfs_async_read_error(struct list_head *head, int error)
211 struct nfs_page *req;
213 while (!list_empty(head)) {
214 req = nfs_list_entry(head->next);
215 nfs_list_remove_request(req);
216 nfs_readpage_release(req);
220 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
221 .error_cleanup = nfs_async_read_error,
222 .completion = nfs_read_completion,
226 * This is the callback from RPC telling us whether a reply was
227 * received or some error occurred (timeout or socket shutdown).
229 static int nfs_readpage_done(struct rpc_task *task,
230 struct nfs_pgio_header *hdr,
233 int status = NFS_PROTO(inode)->read_done(task, hdr);
237 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
238 trace_nfs_readpage_done(inode, task->tk_status,
239 hdr->args.offset, hdr->res.eof);
241 if (task->tk_status == -ESTALE) {
242 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
243 nfs_mark_for_revalidate(inode);
248 static void nfs_readpage_retry(struct rpc_task *task,
249 struct nfs_pgio_header *hdr)
251 struct nfs_pgio_args *argp = &hdr->args;
252 struct nfs_pgio_res *resp = &hdr->res;
254 /* This is a short read! */
255 nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
256 /* Has the server at least made some progress? */
257 if (resp->count == 0) {
258 nfs_set_pgio_error(hdr, -EIO, argp->offset);
262 /* For non rpc-based layout drivers, retry-through-MDS */
264 hdr->pnfs_error = -EAGAIN;
268 /* Yes, so retry the read at the end of the hdr */
269 hdr->mds_offset += resp->count;
270 argp->offset += resp->count;
271 argp->pgbase += resp->count;
272 argp->count -= resp->count;
273 rpc_restart_call_prepare(task);
276 static void nfs_readpage_result(struct rpc_task *task,
277 struct nfs_pgio_header *hdr)
280 loff_t pos = hdr->args.offset + hdr->res.count;
281 unsigned int new = pos - hdr->io_start;
283 if (hdr->good_bytes > new) {
284 hdr->good_bytes = new;
285 set_bit(NFS_IOHDR_EOF, &hdr->flags);
286 clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
288 } else if (hdr->res.count < hdr->args.count)
289 nfs_readpage_retry(task, hdr);
293 * Read a page over NFS.
294 * We read the page synchronously in the following case:
295 * - The error flag is set for this page. This happens only when a
296 * previous async read operation failed.
298 int nfs_readpage(struct file *file, struct page *page)
300 struct nfs_open_context *ctx;
301 struct inode *inode = page_file_mapping(page)->host;
304 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
305 page, PAGE_SIZE, page_index(page));
306 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
307 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
310 * Try to flush any pending writes to the file..
312 * NOTE! Because we own the page lock, there cannot
313 * be any new pending writes generated at this point
314 * for this page (other pages can be written to).
316 error = nfs_wb_page(inode, page);
319 if (PageUptodate(page))
323 if (NFS_STALE(inode))
328 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
332 ctx = get_nfs_open_context(nfs_file_open_context(file));
334 if (!IS_SYNC(inode)) {
335 error = nfs_readpage_from_fscache(ctx, inode, page);
340 error = nfs_readpage_async(ctx, inode, page);
343 put_nfs_open_context(ctx);
350 struct nfs_readdesc {
351 struct nfs_pageio_descriptor *pgio;
352 struct nfs_open_context *ctx;
356 readpage_async_filler(void *data, struct page *page)
358 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
359 struct nfs_page *new;
363 len = nfs_page_length(page);
365 return nfs_return_empty_page(page);
367 new = nfs_create_request(desc->ctx, page, 0, len);
372 zero_user_segment(page, len, PAGE_SIZE);
373 if (!nfs_pageio_add_request(desc->pgio, new)) {
374 nfs_list_remove_request(new);
375 nfs_readpage_release(new);
376 error = desc->pgio->pg_error;
381 error = PTR_ERR(new);
387 int nfs_readpages(struct file *filp, struct address_space *mapping,
388 struct list_head *pages, unsigned nr_pages)
390 struct nfs_pageio_descriptor pgio;
391 struct nfs_pgio_mirror *pgm;
392 struct nfs_readdesc desc = {
395 struct inode *inode = mapping->host;
396 unsigned long npages;
399 dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
401 (unsigned long long)NFS_FILEID(inode),
403 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
405 if (NFS_STALE(inode))
409 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
410 if (desc.ctx == NULL)
413 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
415 /* attempt to read as many of the pages as possible from the cache
416 * - this returns -ENOBUFS immediately if the cookie is negative
418 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
421 goto read_complete; /* all pages were read */
423 nfs_pageio_init_read(&pgio, inode, false,
424 &nfs_async_read_completion_ops);
426 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
427 nfs_pageio_complete(&pgio);
429 /* It doesn't make sense to do mirrored reads! */
430 WARN_ON_ONCE(pgio.pg_mirror_count != 1);
432 pgm = &pgio.pg_mirrors[0];
433 NFS_I(inode)->read_io += pgm->pg_bytes_written;
434 npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
436 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
438 put_nfs_open_context(desc.ctx);
443 int __init nfs_init_readpagecache(void)
445 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
446 sizeof(struct nfs_pgio_header),
447 0, SLAB_HWCACHE_ALIGN,
449 if (nfs_rdata_cachep == NULL)
455 void nfs_destroy_readpagecache(void)
457 kmem_cache_destroy(nfs_rdata_cachep);
460 static const struct nfs_rw_ops nfs_rw_read_ops = {
461 .rw_alloc_header = nfs_readhdr_alloc,
462 .rw_free_header = nfs_readhdr_free,
463 .rw_done = nfs_readpage_done,
464 .rw_result = nfs_readpage_result,
465 .rw_initiate = nfs_initiate_read,