6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/module.h>
29 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
31 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
32 static const struct nfs_rw_ops nfs_rw_read_ops;
34 static struct kmem_cache *nfs_rdata_cachep;
36 static struct nfs_rw_header *nfs_readhdr_alloc(void)
38 return kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
41 static void nfs_readhdr_free(struct nfs_rw_header *rhdr)
43 kmem_cache_free(nfs_rdata_cachep, rhdr);
47 int nfs_return_empty_page(struct page *page)
49 zero_user(page, 0, PAGE_CACHE_SIZE);
50 SetPageUptodate(page);
55 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
56 struct inode *inode, bool force_mds,
57 const struct nfs_pgio_completion_ops *compl_ops)
59 struct nfs_server *server = NFS_SERVER(inode);
60 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
62 #ifdef CONFIG_NFS_V4_1
63 if (server->pnfs_curr_ld && !force_mds)
64 pg_ops = server->pnfs_curr_ld->pg_read_ops;
66 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
69 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
71 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
73 pgio->pg_ops = &nfs_pgio_rw_ops;
74 pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
76 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
78 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
83 struct nfs_pageio_descriptor pgio;
85 len = nfs_page_length(page);
87 return nfs_return_empty_page(page);
88 new = nfs_create_request(ctx, page, NULL, 0, len);
93 if (len < PAGE_CACHE_SIZE)
94 zero_user_segment(page, len, PAGE_CACHE_SIZE);
96 nfs_pageio_init_read(&pgio, inode, false,
97 &nfs_async_read_completion_ops);
98 nfs_pageio_add_request(&pgio, new);
99 nfs_pageio_complete(&pgio);
100 NFS_I(inode)->read_io += pgio.pg_bytes_written;
104 static void nfs_readpage_release(struct nfs_page *req)
106 struct inode *d_inode = req->wb_context->dentry->d_inode;
108 dprintk("NFS: read done (%s/%llu %d@%lld)\n", d_inode->i_sb->s_id,
109 (unsigned long long)NFS_FILEID(d_inode), req->wb_bytes,
110 (long long)req_offset(req));
112 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
113 if (PageUptodate(req->wb_page))
114 nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
116 unlock_page(req->wb_page);
119 dprintk("NFS: read done (%s/%Lu %d@%Ld)\n",
120 req->wb_context->dentry->d_inode->i_sb->s_id,
121 (unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode),
123 (long long)req_offset(req));
124 nfs_release_request(req);
127 static void nfs_page_group_set_uptodate(struct nfs_page *req)
129 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
130 SetPageUptodate(req->wb_page);
133 static void nfs_read_completion(struct nfs_pgio_header *hdr)
135 unsigned long bytes = 0;
137 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
139 while (!list_empty(&hdr->pages)) {
140 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
141 struct page *page = req->wb_page;
142 unsigned long start = req->wb_pgbase;
143 unsigned long end = req->wb_pgbase + req->wb_bytes;
145 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
146 /* note: regions of the page not covered by a
147 * request are zeroed in nfs_readpage_async /
148 * readpage_async_filler */
149 if (bytes > hdr->good_bytes) {
150 /* nothing in this request was good, so zero
151 * the full extent of the request */
152 zero_user_segment(page, start, end);
154 } else if (hdr->good_bytes - bytes < req->wb_bytes) {
155 /* part of this request has good bytes, but
156 * not all. zero the bad bytes */
157 start += hdr->good_bytes - bytes;
158 WARN_ON(start < req->wb_pgbase);
159 zero_user_segment(page, start, end);
162 bytes += req->wb_bytes;
163 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
164 if (bytes <= hdr->good_bytes)
165 nfs_page_group_set_uptodate(req);
167 nfs_page_group_set_uptodate(req);
168 nfs_list_remove_request(req);
169 nfs_readpage_release(req);
175 static void nfs_initiate_read(struct nfs_pgio_data *data, struct rpc_message *msg,
176 struct rpc_task_setup *task_setup_data, int how)
178 struct inode *inode = data->header->inode;
179 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
181 task_setup_data->flags |= swap_flags;
182 NFS_PROTO(inode)->read_setup(data, msg);
186 nfs_async_read_error(struct list_head *head)
188 struct nfs_page *req;
190 while (!list_empty(head)) {
191 req = nfs_list_entry(head->next);
192 nfs_list_remove_request(req);
193 nfs_readpage_release(req);
197 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
198 .error_cleanup = nfs_async_read_error,
199 .completion = nfs_read_completion,
203 * This is the callback from RPC telling us whether a reply was
204 * received or some error occurred (timeout or socket shutdown).
206 static int nfs_readpage_done(struct rpc_task *task, struct nfs_pgio_data *data,
209 int status = NFS_PROTO(inode)->read_done(task, data);
213 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, data->res.count);
215 if (task->tk_status == -ESTALE) {
216 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
217 nfs_mark_for_revalidate(inode);
222 static void nfs_readpage_retry(struct rpc_task *task, struct nfs_pgio_data *data)
224 struct nfs_pgio_args *argp = &data->args;
225 struct nfs_pgio_res *resp = &data->res;
227 /* This is a short read! */
228 nfs_inc_stats(data->header->inode, NFSIOS_SHORTREAD);
229 /* Has the server at least made some progress? */
230 if (resp->count == 0) {
231 nfs_set_pgio_error(data->header, -EIO, argp->offset);
234 /* Yes, so retry the read at the end of the data */
235 data->mds_offset += resp->count;
236 argp->offset += resp->count;
237 argp->pgbase += resp->count;
238 argp->count -= resp->count;
239 rpc_restart_call_prepare(task);
242 static void nfs_readpage_result(struct rpc_task *task, struct nfs_pgio_data *data)
244 struct nfs_pgio_header *hdr = data->header;
249 bound = data->args.offset + data->res.count;
250 spin_lock(&hdr->lock);
251 if (bound < hdr->io_start + hdr->good_bytes) {
252 set_bit(NFS_IOHDR_EOF, &hdr->flags);
253 clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
254 hdr->good_bytes = bound - hdr->io_start;
256 spin_unlock(&hdr->lock);
257 } else if (data->res.count != data->args.count)
258 nfs_readpage_retry(task, data);
262 * Read a page over NFS.
263 * We read the page synchronously in the following case:
264 * - The error flag is set for this page. This happens only when a
265 * previous async read operation failed.
267 int nfs_readpage(struct file *file, struct page *page)
269 struct nfs_open_context *ctx;
270 struct inode *inode = page_file_mapping(page)->host;
273 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
274 page, PAGE_CACHE_SIZE, page_file_index(page));
275 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
276 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
279 * Try to flush any pending writes to the file..
281 * NOTE! Because we own the page lock, there cannot
282 * be any new pending writes generated at this point
283 * for this page (other pages can be written to).
285 error = nfs_wb_page(inode, page);
288 if (PageUptodate(page))
292 if (NFS_STALE(inode))
297 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
301 ctx = get_nfs_open_context(nfs_file_open_context(file));
303 if (!IS_SYNC(inode)) {
304 error = nfs_readpage_from_fscache(ctx, inode, page);
309 error = nfs_readpage_async(ctx, inode, page);
312 put_nfs_open_context(ctx);
319 struct nfs_readdesc {
320 struct nfs_pageio_descriptor *pgio;
321 struct nfs_open_context *ctx;
325 readpage_async_filler(void *data, struct page *page)
327 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
328 struct nfs_page *new;
332 len = nfs_page_length(page);
334 return nfs_return_empty_page(page);
336 new = nfs_create_request(desc->ctx, page, NULL, 0, len);
340 if (len < PAGE_CACHE_SIZE)
341 zero_user_segment(page, len, PAGE_CACHE_SIZE);
342 if (!nfs_pageio_add_request(desc->pgio, new)) {
343 error = desc->pgio->pg_error;
348 error = PTR_ERR(new);
354 int nfs_readpages(struct file *filp, struct address_space *mapping,
355 struct list_head *pages, unsigned nr_pages)
357 struct nfs_pageio_descriptor pgio;
358 struct nfs_readdesc desc = {
361 struct inode *inode = mapping->host;
362 unsigned long npages;
365 dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
367 (unsigned long long)NFS_FILEID(inode),
369 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
371 if (NFS_STALE(inode))
375 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
376 if (desc.ctx == NULL)
379 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
381 /* attempt to read as many of the pages as possible from the cache
382 * - this returns -ENOBUFS immediately if the cookie is negative
384 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
387 goto read_complete; /* all pages were read */
389 nfs_pageio_init_read(&pgio, inode, false,
390 &nfs_async_read_completion_ops);
392 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
394 nfs_pageio_complete(&pgio);
395 NFS_I(inode)->read_io += pgio.pg_bytes_written;
396 npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
397 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
399 put_nfs_open_context(desc.ctx);
404 int __init nfs_init_readpagecache(void)
406 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
407 sizeof(struct nfs_rw_header),
408 0, SLAB_HWCACHE_ALIGN,
410 if (nfs_rdata_cachep == NULL)
416 void nfs_destroy_readpagecache(void)
418 kmem_cache_destroy(nfs_rdata_cachep);
421 static const struct nfs_rw_ops nfs_rw_read_ops = {
422 .rw_mode = FMODE_READ,
423 .rw_alloc_header = nfs_readhdr_alloc,
424 .rw_free_header = nfs_readhdr_free,
425 .rw_done = nfs_readpage_done,
426 .rw_result = nfs_readpage_result,
427 .rw_initiate = nfs_initiate_read,