2 * linux/fs/nfs/direct.c
6 * High-performance uncached I/O for the Linux NFS client
8 * There are important applications whose performance or correctness
9 * depends on uncached access to file data. Database clusters
10 * (multiple copies of the same instance running on separate hosts)
11 * implement their own cache coherency protocol that subsumes file
12 * system cache protocols. Applications that process datasets
13 * considerably larger than the client's memory do not always benefit
14 * from a local cache. A streaming video server, for instance, has no
15 * need to cache the contents of a file.
17 * When an application requests uncached I/O, all read and write requests
18 * are made directly to the server; data stored or fetched via these
19 * requests is not cached in the Linux page cache. The client does not
20 * correct unaligned requests from applications. All requested bytes are
21 * held on permanent storage before a direct write system call returns to
24 * Solaris implements an uncached I/O facility called directio() that
25 * is used for backups and sequential I/O to very large files. Solaris
26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27 * an undocumented mount option.
29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30 * help from Andrew Morton.
32 * 18 Dec 2001 Initial implementation for 2.4 --cel
33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
34 * 08 Jun 2003 Port to 2.5 APIs --cel
35 * 31 Mar 2004 Handle direct I/O without VFS support --cel
36 * 15 Sep 2004 Parallel async reads --cel
37 * 04 May 2005 support O_DIRECT with aio --cel
41 #include <linux/config.h>
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/kernel.h>
45 #include <linux/smp_lock.h>
46 #include <linux/file.h>
47 #include <linux/pagemap.h>
48 #include <linux/kref.h>
50 #include <linux/nfs_fs.h>
51 #include <linux/nfs_page.h>
52 #include <linux/sunrpc/clnt.h>
54 #include <asm/system.h>
55 #include <asm/uaccess.h>
56 #include <asm/atomic.h>
60 #define NFSDBG_FACILITY NFSDBG_VFS
61 #define MAX_DIRECTIO_SIZE (4096UL << PAGE_SHIFT)
63 static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty);
64 static kmem_cache_t *nfs_direct_cachep;
67 * This represents a set of asynchronous requests that we're waiting on
69 struct nfs_direct_req {
70 struct kref kref; /* release manager */
71 struct list_head list; /* nfs_read/write_data structs */
72 struct file * filp; /* file descriptor */
73 struct kiocb * iocb; /* controlling i/o request */
74 wait_queue_head_t wait; /* wait for i/o completion */
75 struct inode * inode; /* target file of i/o */
76 struct page ** pages; /* pages in our buffer */
77 unsigned int npages; /* count of pages */
78 atomic_t complete, /* i/os we're waiting for */
79 count, /* bytes actually processed */
80 error; /* any reported error */
85 * nfs_direct_IO - NFS address space operation for direct I/O
86 * @rw: direction (read or write)
87 * @iocb: target I/O control block
88 * @iov: array of vectors that define I/O buffer
89 * @pos: offset in file to begin the operation
90 * @nr_segs: size of iovec array
92 * The presence of this routine in the address space ops vector means
93 * the NFS client supports direct I/O. However, we shunt off direct
94 * read and write requests before the VFS gets them, so this method
95 * should never be called.
97 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
99 struct dentry *dentry = iocb->ki_filp->f_dentry;
101 dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
102 dentry->d_name.name, (long long) pos, nr_segs);
107 static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages)
109 int result = -ENOMEM;
110 unsigned long page_count;
113 /* set an arbitrary limit to prevent type overflow */
114 if (size > MAX_DIRECTIO_SIZE) {
119 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
120 page_count -= user_addr >> PAGE_SHIFT;
122 array_size = (page_count * sizeof(struct page *));
123 *pages = kmalloc(array_size, GFP_KERNEL);
125 down_read(¤t->mm->mmap_sem);
126 result = get_user_pages(current, current->mm, user_addr,
127 page_count, (rw == READ), 0,
129 up_read(¤t->mm->mmap_sem);
131 * If we got fewer pages than expected from get_user_pages(),
132 * the user buffer runs off the end of a mapping; return EFAULT.
134 if (result >= 0 && result < page_count) {
135 nfs_free_user_pages(*pages, result, 0);
143 static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
146 for (i = 0; i < npages; i++) {
147 struct page *page = pages[i];
148 if (do_dirty && !PageCompound(page))
149 set_page_dirty_lock(page);
150 page_cache_release(page);
155 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
157 struct nfs_direct_req *dreq;
159 dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
163 kref_init(&dreq->kref);
164 init_waitqueue_head(&dreq->wait);
165 INIT_LIST_HEAD(&dreq->list);
167 atomic_set(&dreq->count, 0);
168 atomic_set(&dreq->error, 0);
173 static void nfs_direct_req_release(struct kref *kref)
175 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
176 kmem_cache_free(nfs_direct_cachep, dreq);
180 * Collects and returns the final error value/byte-count.
182 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
184 int result = -EIOCBQUEUED;
186 /* Async requests don't wait here */
190 result = wait_event_interruptible(dreq->wait,
191 (atomic_read(&dreq->complete) == 0));
194 result = atomic_read(&dreq->error);
196 result = atomic_read(&dreq->count);
199 kref_put(&dreq->kref, nfs_direct_req_release);
200 return (ssize_t) result;
204 * We must hold a reference to all the pages in this direct read request
205 * until the RPCs complete. This could be long *after* we are woken up in
206 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
208 * In addition, synchronous I/O uses a stack-allocated iocb. Thus we
209 * can't trust the iocb is still valid here if this is a synchronous
210 * request. If the waiter is woken prematurely, the iocb is long gone.
212 static void nfs_direct_complete(struct nfs_direct_req *dreq)
214 nfs_free_user_pages(dreq->pages, dreq->npages, 1);
217 long res = atomic_read(&dreq->error);
219 res = atomic_read(&dreq->count);
220 aio_complete(dreq->iocb, res, 0);
222 wake_up(&dreq->wait);
224 kref_put(&dreq->kref, nfs_direct_req_release);
228 * Note we also set the number of requests we have in the dreq when we are
229 * done. This prevents races with I/O completion so we will always wait
230 * until all requests have been dispatched and completed.
232 static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
234 struct list_head *list;
235 struct nfs_direct_req *dreq;
236 unsigned int reads = 0;
237 unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
239 dreq = nfs_direct_req_alloc();
245 struct nfs_read_data *data = nfs_readdata_alloc(rpages);
247 if (unlikely(!data)) {
248 while (!list_empty(list)) {
249 data = list_entry(list->next,
250 struct nfs_read_data, pages);
251 list_del(&data->pages);
252 nfs_readdata_free(data);
254 kref_put(&dreq->kref, nfs_direct_req_release);
258 INIT_LIST_HEAD(&data->pages);
259 list_add(&data->pages, list);
261 data->req = (struct nfs_page *) dreq;
267 kref_get(&dreq->kref);
268 atomic_set(&dreq->complete, reads);
272 static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
274 struct nfs_read_data *data = calldata;
275 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
277 if (nfs_readpage_result(task, data) != 0)
279 if (likely(task->tk_status >= 0))
280 atomic_add(data->res.count, &dreq->count);
282 atomic_set(&dreq->error, task->tk_status);
284 if (unlikely(atomic_dec_and_test(&dreq->complete)))
285 nfs_direct_complete(dreq);
288 static const struct rpc_call_ops nfs_read_direct_ops = {
289 .rpc_call_done = nfs_direct_read_result,
290 .rpc_release = nfs_readdata_release,
294 * For each nfs_read_data struct that was allocated on the list, dispatch
295 * an NFS READ operation
297 static void nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos)
299 struct file *file = dreq->filp;
300 struct inode *inode = file->f_mapping->host;
301 struct nfs_open_context *ctx = (struct nfs_open_context *)
303 struct list_head *list = &dreq->list;
304 struct page **pages = dreq->pages;
305 size_t rsize = NFS_SERVER(inode)->rsize;
306 unsigned int curpage, pgbase;
309 pgbase = user_addr & ~PAGE_MASK;
311 struct nfs_read_data *data;
318 data = list_entry(list->next, struct nfs_read_data, pages);
319 list_del_init(&data->pages);
322 data->cred = ctx->cred;
323 data->args.fh = NFS_FH(inode);
324 data->args.context = ctx;
325 data->args.offset = pos;
326 data->args.pgbase = pgbase;
327 data->args.pages = &pages[curpage];
328 data->args.count = bytes;
329 data->res.fattr = &data->fattr;
331 data->res.count = bytes;
333 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
334 &nfs_read_direct_ops, data);
335 NFS_PROTO(inode)->read_setup(data);
337 data->task.tk_cookie = (unsigned long) inode;
340 rpc_execute(&data->task);
343 dfprintk(VFS, "NFS: %4d initiated direct read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
346 (long long)NFS_FILEID(inode),
348 (unsigned long long)data->args.offset);
352 curpage += pgbase >> PAGE_SHIFT;
353 pgbase &= ~PAGE_MASK;
356 } while (count != 0);
359 static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, unsigned int nr_pages)
363 struct inode *inode = iocb->ki_filp->f_mapping->host;
364 struct rpc_clnt *clnt = NFS_CLIENT(inode);
365 struct nfs_direct_req *dreq;
367 dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize);
372 dreq->npages = nr_pages;
374 dreq->filp = iocb->ki_filp;
375 if (!is_sync_kiocb(iocb))
378 nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
379 rpc_clnt_sigmask(clnt, &oldset);
380 nfs_direct_read_schedule(dreq, user_addr, count, pos);
381 result = nfs_direct_wait(dreq);
382 rpc_clnt_sigunmask(clnt, &oldset);
387 static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize)
389 struct list_head *list;
390 struct nfs_direct_req *dreq;
391 unsigned int writes = 0;
392 unsigned int wpages = (wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
394 dreq = nfs_direct_req_alloc();
400 struct nfs_write_data *data = nfs_writedata_alloc(wpages);
402 if (unlikely(!data)) {
403 while (!list_empty(list)) {
404 data = list_entry(list->next,
405 struct nfs_write_data, pages);
406 list_del(&data->pages);
407 nfs_writedata_free(data);
409 kref_put(&dreq->kref, nfs_direct_req_release);
413 INIT_LIST_HEAD(&data->pages);
414 list_add(&data->pages, list);
416 data->req = (struct nfs_page *) dreq;
422 kref_get(&dreq->kref);
423 atomic_set(&dreq->complete, writes);
427 static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
429 struct nfs_write_data *data = calldata;
430 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
431 int status = task->tk_status;
433 if (nfs_writeback_done(task, data) != 0)
435 /* If the server fell back to an UNSTABLE write, it's an error. */
436 if (unlikely(data->res.verf->committed != NFS_FILE_SYNC))
439 if (likely(status >= 0))
440 atomic_add(data->res.count, &dreq->count);
442 atomic_set(&dreq->error, status);
444 if (unlikely(atomic_dec_and_test(&dreq->complete))) {
445 nfs_end_data_update(data->inode);
446 nfs_direct_complete(dreq);
450 static const struct rpc_call_ops nfs_write_direct_ops = {
451 .rpc_call_done = nfs_direct_write_result,
452 .rpc_release = nfs_writedata_release,
456 * For each nfs_write_data struct that was allocated on the list, dispatch
457 * an NFS WRITE operation
459 * XXX: For now, support only FILE_SYNC writes. Later we may add
460 * support for UNSTABLE + COMMIT.
462 static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos)
464 struct file *file = dreq->filp;
465 struct inode *inode = file->f_mapping->host;
466 struct nfs_open_context *ctx = (struct nfs_open_context *)
468 struct list_head *list = &dreq->list;
469 struct page **pages = dreq->pages;
470 size_t wsize = NFS_SERVER(inode)->wsize;
471 unsigned int curpage, pgbase;
474 pgbase = user_addr & ~PAGE_MASK;
476 struct nfs_write_data *data;
483 data = list_entry(list->next, struct nfs_write_data, pages);
484 list_del_init(&data->pages);
487 data->cred = ctx->cred;
488 data->args.fh = NFS_FH(inode);
489 data->args.context = ctx;
490 data->args.offset = pos;
491 data->args.pgbase = pgbase;
492 data->args.pages = &pages[curpage];
493 data->args.count = bytes;
494 data->res.fattr = &data->fattr;
495 data->res.count = bytes;
496 data->res.verf = &data->verf;
498 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
499 &nfs_write_direct_ops, data);
500 NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE);
502 data->task.tk_priority = RPC_PRIORITY_NORMAL;
503 data->task.tk_cookie = (unsigned long) inode;
506 rpc_execute(&data->task);
509 dfprintk(VFS, "NFS: %4d initiated direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
512 (long long)NFS_FILEID(inode),
514 (unsigned long long)data->args.offset);
518 curpage += pgbase >> PAGE_SHIFT;
519 pgbase &= ~PAGE_MASK;
522 } while (count != 0);
525 static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, int nr_pages)
529 struct inode *inode = iocb->ki_filp->f_mapping->host;
530 struct rpc_clnt *clnt = NFS_CLIENT(inode);
531 struct nfs_direct_req *dreq;
533 dreq = nfs_direct_write_alloc(count, NFS_SERVER(inode)->wsize);
538 dreq->npages = nr_pages;
540 dreq->filp = iocb->ki_filp;
541 if (!is_sync_kiocb(iocb))
544 nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, count);
546 nfs_begin_data_update(inode);
548 rpc_clnt_sigmask(clnt, &oldset);
549 nfs_direct_write_schedule(dreq, user_addr, count, pos);
550 result = nfs_direct_wait(dreq);
551 rpc_clnt_sigunmask(clnt, &oldset);
557 * nfs_file_direct_read - file direct read operation for NFS files
558 * @iocb: target I/O control block
559 * @buf: user's buffer into which to read data
560 * @count: number of bytes to read
561 * @pos: byte offset in file where reading starts
563 * We use this function for direct reads instead of calling
564 * generic_file_aio_read() in order to avoid gfar's check to see if
565 * the request starts before the end of the file. For that check
566 * to work, we must generate a GETATTR before each direct read, and
567 * even then there is a window between the GETATTR and the subsequent
568 * READ where the file size could change. Our preference is simply
569 * to do all reads the application wants, and the server will take
570 * care of managing the end of file boundary.
572 * This function also eliminates unnecessarily updating the file's
573 * atime locally, as the NFS server sets the file's atime, and this
574 * client must read the updated atime from the server back into its
577 ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
579 ssize_t retval = -EINVAL;
582 struct file *file = iocb->ki_filp;
583 struct address_space *mapping = file->f_mapping;
585 dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
586 file->f_dentry->d_parent->d_name.name,
587 file->f_dentry->d_name.name,
588 (unsigned long) count, (long long) pos);
593 if (!access_ok(VERIFY_WRITE, buf, count))
599 retval = nfs_sync_mapping(mapping);
603 page_count = nfs_get_user_pages(READ, (unsigned long) buf,
605 if (page_count < 0) {
606 nfs_free_user_pages(pages, 0, 0);
611 retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos,
614 iocb->ki_pos = pos + retval;
621 * nfs_file_direct_write - file direct write operation for NFS files
622 * @iocb: target I/O control block
623 * @buf: user's buffer from which to write data
624 * @count: number of bytes to write
625 * @pos: byte offset in file where writing starts
627 * We use this function for direct writes instead of calling
628 * generic_file_aio_write() in order to avoid taking the inode
629 * semaphore and updating the i_size. The NFS server will set
630 * the new i_size and this client must read the updated size
631 * back into its cache. We let the server do generic write
632 * parameter checking and report problems.
634 * We also avoid an unnecessary invocation of generic_osync_inode(),
635 * as it is fairly meaningless to sync the metadata of an NFS file.
637 * We eliminate local atime updates, see direct read above.
639 * We avoid unnecessary page cache invalidations for normal cached
640 * readers of this file.
642 * Note that O_APPEND is not supported for NFS direct writes, as there
643 * is no atomic O_APPEND write facility in the NFS protocol.
645 ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
650 struct file *file = iocb->ki_filp;
651 struct address_space *mapping = file->f_mapping;
653 dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n",
654 file->f_dentry->d_parent->d_name.name,
655 file->f_dentry->d_name.name,
656 (unsigned long) count, (long long) pos);
658 retval = generic_write_checks(file, &pos, &count, 0);
663 if ((ssize_t) count < 0)
670 if (!access_ok(VERIFY_READ, buf, count))
673 retval = nfs_sync_mapping(mapping);
677 page_count = nfs_get_user_pages(WRITE, (unsigned long) buf,
679 if (page_count < 0) {
680 nfs_free_user_pages(pages, 0, 0);
685 retval = nfs_direct_write(iocb, (unsigned long) buf, count,
686 pos, pages, page_count);
689 * XXX: nfs_end_data_update() already ensures this file's
690 * cached data is subsequently invalidated. Do we really
691 * need to call invalidate_inode_pages2() again here?
693 * For aio writes, this invalidation will almost certainly
694 * occur before the writes complete. Kind of racey.
696 if (mapping->nrpages)
697 invalidate_inode_pages2(mapping);
700 iocb->ki_pos = pos + retval;
707 * nfs_init_directcache - create a slab cache for nfs_direct_req structures
710 int nfs_init_directcache(void)
712 nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
713 sizeof(struct nfs_direct_req),
714 0, SLAB_RECLAIM_ACCOUNT,
716 if (nfs_direct_cachep == NULL)
723 * nfs_init_directcache - destroy the slab cache for nfs_direct_req structures
726 void nfs_destroy_directcache(void)
728 if (kmem_cache_destroy(nfs_direct_cachep))
729 printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");