]> Git Repo - J-linux.git/blob - fs/nfs/direct.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / fs / nfs / direct.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/nfs/direct.c
4  *
5  * Copyright (C) 2003 by Chuck Lever <[email protected]>
6  *
7  * High-performance uncached I/O for the Linux NFS client
8  *
9  * There are important applications whose performance or correctness
10  * depends on uncached access to file data.  Database clusters
11  * (multiple copies of the same instance running on separate hosts)
12  * implement their own cache coherency protocol that subsumes file
13  * system cache protocols.  Applications that process datasets
14  * considerably larger than the client's memory do not always benefit
15  * from a local cache.  A streaming video server, for instance, has no
16  * need to cache the contents of a file.
17  *
18  * When an application requests uncached I/O, all read and write requests
19  * are made directly to the server; data stored or fetched via these
20  * requests is not cached in the Linux page cache.  The client does not
21  * correct unaligned requests from applications.  All requested bytes are
22  * held on permanent storage before a direct write system call returns to
23  * an application.
24  *
25  * Solaris implements an uncached I/O facility called directio() that
26  * is used for backups and sequential I/O to very large files.  Solaris
27  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
28  * an undocumented mount option.
29  *
30  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
31  * help from Andrew Morton.
32  *
33  * 18 Dec 2001  Initial implementation for 2.4  --cel
34  * 08 Jul 2002  Version for 2.4.19, with bug fixes --trondmy
35  * 08 Jun 2003  Port to 2.5 APIs  --cel
36  * 31 Mar 2004  Handle direct I/O without VFS support  --cel
37  * 15 Sep 2004  Parallel async reads  --cel
38  * 04 May 2005  support O_DIRECT with aio  --cel
39  *
40  */
41
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/kernel.h>
45 #include <linux/file.h>
46 #include <linux/pagemap.h>
47 #include <linux/kref.h>
48 #include <linux/slab.h>
49 #include <linux/task_io_accounting_ops.h>
50 #include <linux/module.h>
51
52 #include <linux/nfs_fs.h>
53 #include <linux/nfs_page.h>
54 #include <linux/sunrpc/clnt.h>
55
56 #include <linux/uaccess.h>
57 #include <linux/atomic.h>
58
59 #include "internal.h"
60 #include "iostat.h"
61 #include "pnfs.h"
62 #include "fscache.h"
63 #include "nfstrace.h"
64
65 #define NFSDBG_FACILITY         NFSDBG_VFS
66
67 static struct kmem_cache *nfs_direct_cachep;
68
69 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
70 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
71 static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
72 static void nfs_direct_write_schedule_work(struct work_struct *work);
73
74 static inline void get_dreq(struct nfs_direct_req *dreq)
75 {
76         atomic_inc(&dreq->io_count);
77 }
78
79 static inline int put_dreq(struct nfs_direct_req *dreq)
80 {
81         return atomic_dec_and_test(&dreq->io_count);
82 }
83
84 static void
85 nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
86                             const struct nfs_pgio_header *hdr,
87                             ssize_t dreq_len)
88 {
89         if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
90               test_bit(NFS_IOHDR_EOF, &hdr->flags)))
91                 return;
92         if (dreq->max_count >= dreq_len) {
93                 dreq->max_count = dreq_len;
94                 if (dreq->count > dreq_len)
95                         dreq->count = dreq_len;
96         }
97
98         if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && !dreq->error)
99                 dreq->error = hdr->error;
100 }
101
102 static void
103 nfs_direct_count_bytes(struct nfs_direct_req *dreq,
104                        const struct nfs_pgio_header *hdr)
105 {
106         loff_t hdr_end = hdr->io_start + hdr->good_bytes;
107         ssize_t dreq_len = 0;
108
109         if (hdr_end > dreq->io_start)
110                 dreq_len = hdr_end - dreq->io_start;
111
112         nfs_direct_handle_truncated(dreq, hdr, dreq_len);
113
114         if (dreq_len > dreq->max_count)
115                 dreq_len = dreq->max_count;
116
117         if (dreq->count < dreq_len)
118                 dreq->count = dreq_len;
119 }
120
121 static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
122                                         struct nfs_page *req)
123 {
124         loff_t offs = req_offset(req);
125         size_t req_start = (size_t)(offs - dreq->io_start);
126
127         if (req_start < dreq->max_count)
128                 dreq->max_count = req_start;
129         if (req_start < dreq->count)
130                 dreq->count = req_start;
131 }
132
133 /**
134  * nfs_swap_rw - NFS address space operation for swap I/O
135  * @iocb: target I/O control block
136  * @iter: I/O buffer
137  *
138  * Perform IO to the swap-file.  This is much like direct IO.
139  */
140 int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
141 {
142         ssize_t ret;
143
144         if (iov_iter_rw(iter) == READ)
145                 ret = nfs_file_direct_read(iocb, iter, true);
146         else
147                 ret = nfs_file_direct_write(iocb, iter, true);
148         if (ret < 0)
149                 return ret;
150         return 0;
151 }
152
153 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
154 {
155         unsigned int i;
156         for (i = 0; i < npages; i++)
157                 put_page(pages[i]);
158 }
159
160 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
161                               struct nfs_direct_req *dreq)
162 {
163         cinfo->inode = dreq->inode;
164         cinfo->mds = &dreq->mds_cinfo;
165         cinfo->ds = &dreq->ds_cinfo;
166         cinfo->dreq = dreq;
167         cinfo->completion_ops = &nfs_direct_commit_completion_ops;
168 }
169
170 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
171 {
172         struct nfs_direct_req *dreq;
173
174         dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
175         if (!dreq)
176                 return NULL;
177
178         kref_init(&dreq->kref);
179         kref_get(&dreq->kref);
180         init_completion(&dreq->completion);
181         INIT_LIST_HEAD(&dreq->mds_cinfo.list);
182         pnfs_init_ds_commit_info(&dreq->ds_cinfo);
183         INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
184         spin_lock_init(&dreq->lock);
185
186         return dreq;
187 }
188
189 static void nfs_direct_req_free(struct kref *kref)
190 {
191         struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
192
193         pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode);
194         if (dreq->l_ctx != NULL)
195                 nfs_put_lock_context(dreq->l_ctx);
196         if (dreq->ctx != NULL)
197                 put_nfs_open_context(dreq->ctx);
198         kmem_cache_free(nfs_direct_cachep, dreq);
199 }
200
201 static void nfs_direct_req_release(struct nfs_direct_req *dreq)
202 {
203         kref_put(&dreq->kref, nfs_direct_req_free);
204 }
205
206 ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq, loff_t offset)
207 {
208         loff_t start = offset - dreq->io_start;
209         return dreq->max_count - start;
210 }
211 EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
212
213 /*
214  * Collects and returns the final error value/byte-count.
215  */
216 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
217 {
218         ssize_t result = -EIOCBQUEUED;
219
220         /* Async requests don't wait here */
221         if (dreq->iocb)
222                 goto out;
223
224         result = wait_for_completion_killable(&dreq->completion);
225
226         if (!result) {
227                 result = dreq->count;
228                 WARN_ON_ONCE(dreq->count < 0);
229         }
230         if (!result)
231                 result = dreq->error;
232
233 out:
234         return (ssize_t) result;
235 }
236
237 /*
238  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
239  * the iocb is still valid here if this is a synchronous request.
240  */
241 static void nfs_direct_complete(struct nfs_direct_req *dreq)
242 {
243         struct inode *inode = dreq->inode;
244
245         inode_dio_end(inode);
246
247         if (dreq->iocb) {
248                 long res = (long) dreq->error;
249                 if (dreq->count != 0) {
250                         res = (long) dreq->count;
251                         WARN_ON_ONCE(dreq->count < 0);
252                 }
253                 dreq->iocb->ki_complete(dreq->iocb, res);
254         }
255
256         complete(&dreq->completion);
257
258         nfs_direct_req_release(dreq);
259 }
260
261 static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
262 {
263         unsigned long bytes = 0;
264         struct nfs_direct_req *dreq = hdr->dreq;
265
266         spin_lock(&dreq->lock);
267         if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
268                 spin_unlock(&dreq->lock);
269                 goto out_put;
270         }
271
272         nfs_direct_count_bytes(dreq, hdr);
273         spin_unlock(&dreq->lock);
274
275         while (!list_empty(&hdr->pages)) {
276                 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
277                 struct page *page = req->wb_page;
278
279                 if (!PageCompound(page) && bytes < hdr->good_bytes &&
280                     (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
281                         set_page_dirty(page);
282                 bytes += req->wb_bytes;
283                 nfs_list_remove_request(req);
284                 nfs_release_request(req);
285         }
286 out_put:
287         if (put_dreq(dreq))
288                 nfs_direct_complete(dreq);
289         hdr->release(hdr);
290 }
291
292 static void nfs_read_sync_pgio_error(struct list_head *head, int error)
293 {
294         struct nfs_page *req;
295
296         while (!list_empty(head)) {
297                 req = nfs_list_entry(head->next);
298                 nfs_list_remove_request(req);
299                 nfs_release_request(req);
300         }
301 }
302
303 static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
304 {
305         get_dreq(hdr->dreq);
306 }
307
308 static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
309         .error_cleanup = nfs_read_sync_pgio_error,
310         .init_hdr = nfs_direct_pgio_init,
311         .completion = nfs_direct_read_completion,
312 };
313
314 /*
315  * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
316  * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
317  * bail and stop sending more reads.  Read length accounting is
318  * handled automatically by nfs_direct_read_result().  Otherwise, if
319  * no requests have been sent, just return an error.
320  */
321
322 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
323                                               struct iov_iter *iter,
324                                               loff_t pos)
325 {
326         struct nfs_pageio_descriptor desc;
327         struct inode *inode = dreq->inode;
328         ssize_t result = -EINVAL;
329         size_t requested_bytes = 0;
330         size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
331
332         nfs_pageio_init_read(&desc, dreq->inode, false,
333                              &nfs_direct_read_completion_ops);
334         get_dreq(dreq);
335         desc.pg_dreq = dreq;
336         inode_dio_begin(inode);
337
338         while (iov_iter_count(iter)) {
339                 struct page **pagevec;
340                 size_t bytes;
341                 size_t pgbase;
342                 unsigned npages, i;
343
344                 result = iov_iter_get_pages_alloc2(iter, &pagevec,
345                                                   rsize, &pgbase);
346                 if (result < 0)
347                         break;
348         
349                 bytes = result;
350                 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
351                 for (i = 0; i < npages; i++) {
352                         struct nfs_page *req;
353                         unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
354                         /* XXX do we need to do the eof zeroing found in async_filler? */
355                         req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
356                                                         pgbase, pos, req_len);
357                         if (IS_ERR(req)) {
358                                 result = PTR_ERR(req);
359                                 break;
360                         }
361                         if (!nfs_pageio_add_request(&desc, req)) {
362                                 result = desc.pg_error;
363                                 nfs_release_request(req);
364                                 break;
365                         }
366                         pgbase = 0;
367                         bytes -= req_len;
368                         requested_bytes += req_len;
369                         pos += req_len;
370                 }
371                 nfs_direct_release_pages(pagevec, npages);
372                 kvfree(pagevec);
373                 if (result < 0)
374                         break;
375         }
376
377         nfs_pageio_complete(&desc);
378
379         /*
380          * If no bytes were started, return the error, and let the
381          * generic layer handle the completion.
382          */
383         if (requested_bytes == 0) {
384                 inode_dio_end(inode);
385                 nfs_direct_req_release(dreq);
386                 return result < 0 ? result : -EIO;
387         }
388
389         if (put_dreq(dreq))
390                 nfs_direct_complete(dreq);
391         return requested_bytes;
392 }
393
394 /**
395  * nfs_file_direct_read - file direct read operation for NFS files
396  * @iocb: target I/O control block
397  * @iter: vector of user buffers into which to read data
398  * @swap: flag indicating this is swap IO, not O_DIRECT IO
399  *
400  * We use this function for direct reads instead of calling
401  * generic_file_aio_read() in order to avoid gfar's check to see if
402  * the request starts before the end of the file.  For that check
403  * to work, we must generate a GETATTR before each direct read, and
404  * even then there is a window between the GETATTR and the subsequent
405  * READ where the file size could change.  Our preference is simply
406  * to do all reads the application wants, and the server will take
407  * care of managing the end of file boundary.
408  *
409  * This function also eliminates unnecessarily updating the file's
410  * atime locally, as the NFS server sets the file's atime, and this
411  * client must read the updated atime from the server back into its
412  * cache.
413  */
414 ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
415                              bool swap)
416 {
417         struct file *file = iocb->ki_filp;
418         struct address_space *mapping = file->f_mapping;
419         struct inode *inode = mapping->host;
420         struct nfs_direct_req *dreq;
421         struct nfs_lock_context *l_ctx;
422         ssize_t result, requested;
423         size_t count = iov_iter_count(iter);
424         nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
425
426         dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
427                 file, count, (long long) iocb->ki_pos);
428
429         result = 0;
430         if (!count)
431                 goto out;
432
433         task_io_account_read(count);
434
435         result = -ENOMEM;
436         dreq = nfs_direct_req_alloc();
437         if (dreq == NULL)
438                 goto out;
439
440         dreq->inode = inode;
441         dreq->max_count = count;
442         dreq->io_start = iocb->ki_pos;
443         dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
444         l_ctx = nfs_get_lock_context(dreq->ctx);
445         if (IS_ERR(l_ctx)) {
446                 result = PTR_ERR(l_ctx);
447                 nfs_direct_req_release(dreq);
448                 goto out_release;
449         }
450         dreq->l_ctx = l_ctx;
451         if (!is_sync_kiocb(iocb))
452                 dreq->iocb = iocb;
453
454         if (user_backed_iter(iter))
455                 dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
456
457         if (!swap) {
458                 result = nfs_start_io_direct(inode);
459                 if (result) {
460                         /* release the reference that would usually be
461                          * consumed by nfs_direct_read_schedule_iovec()
462                          */
463                         nfs_direct_req_release(dreq);
464                         goto out_release;
465                 }
466         }
467
468         NFS_I(inode)->read_io += count;
469         requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
470
471         if (!swap)
472                 nfs_end_io_direct(inode);
473
474         if (requested > 0) {
475                 result = nfs_direct_wait(dreq);
476                 if (result > 0) {
477                         requested -= result;
478                         iocb->ki_pos += result;
479                 }
480                 iov_iter_revert(iter, requested);
481         } else {
482                 result = requested;
483         }
484
485 out_release:
486         nfs_direct_req_release(dreq);
487 out:
488         return result;
489 }
490
491 static void nfs_direct_add_page_head(struct list_head *list,
492                                      struct nfs_page *req)
493 {
494         struct nfs_page *head = req->wb_head;
495
496         if (!list_empty(&head->wb_list) || !nfs_lock_request(head))
497                 return;
498         if (!list_empty(&head->wb_list)) {
499                 nfs_unlock_request(head);
500                 return;
501         }
502         list_add(&head->wb_list, list);
503         kref_get(&head->wb_kref);
504         kref_get(&head->wb_kref);
505 }
506
507 static void nfs_direct_join_group(struct list_head *list,
508                                   struct nfs_commit_info *cinfo,
509                                   struct inode *inode)
510 {
511         struct nfs_page *req, *subreq;
512
513         list_for_each_entry(req, list, wb_list) {
514                 if (req->wb_head != req) {
515                         nfs_direct_add_page_head(&req->wb_list, req);
516                         continue;
517                 }
518                 subreq = req->wb_this_page;
519                 if (subreq == req)
520                         continue;
521                 do {
522                         /*
523                          * Remove subrequests from this list before freeing
524                          * them in the call to nfs_join_page_group().
525                          */
526                         if (!list_empty(&subreq->wb_list)) {
527                                 nfs_list_remove_request(subreq);
528                                 nfs_release_request(subreq);
529                         }
530                 } while ((subreq = subreq->wb_this_page) != req);
531                 nfs_join_page_group(req, cinfo, inode);
532         }
533 }
534
535 static void
536 nfs_direct_write_scan_commit_list(struct inode *inode,
537                                   struct list_head *list,
538                                   struct nfs_commit_info *cinfo)
539 {
540         mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
541         pnfs_recover_commit_reqs(list, cinfo);
542         nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
543         mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
544 }
545
546 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
547 {
548         struct nfs_pageio_descriptor desc;
549         struct nfs_page *req;
550         LIST_HEAD(reqs);
551         struct nfs_commit_info cinfo;
552
553         nfs_init_cinfo_from_dreq(&cinfo, dreq);
554         nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
555
556         nfs_direct_join_group(&reqs, &cinfo, dreq->inode);
557
558         nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
559         get_dreq(dreq);
560
561         nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
562                               &nfs_direct_write_completion_ops);
563         desc.pg_dreq = dreq;
564
565         while (!list_empty(&reqs)) {
566                 req = nfs_list_entry(reqs.next);
567                 /* Bump the transmission count */
568                 req->wb_nio++;
569                 if (!nfs_pageio_add_request(&desc, req)) {
570                         spin_lock(&dreq->lock);
571                         if (dreq->error < 0) {
572                                 desc.pg_error = dreq->error;
573                         } else if (desc.pg_error != -EAGAIN) {
574                                 dreq->flags = 0;
575                                 if (!desc.pg_error)
576                                         desc.pg_error = -EIO;
577                                 dreq->error = desc.pg_error;
578                         } else
579                                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
580                         spin_unlock(&dreq->lock);
581                         break;
582                 }
583                 nfs_release_request(req);
584         }
585         nfs_pageio_complete(&desc);
586
587         while (!list_empty(&reqs)) {
588                 req = nfs_list_entry(reqs.next);
589                 nfs_list_remove_request(req);
590                 nfs_unlock_and_release_request(req);
591                 if (desc.pg_error == -EAGAIN) {
592                         nfs_mark_request_commit(req, NULL, &cinfo, 0);
593                 } else {
594                         spin_lock(&dreq->lock);
595                         nfs_direct_truncate_request(dreq, req);
596                         spin_unlock(&dreq->lock);
597                         nfs_release_request(req);
598                 }
599         }
600
601         if (put_dreq(dreq))
602                 nfs_direct_write_complete(dreq);
603 }
604
605 static void nfs_direct_commit_complete(struct nfs_commit_data *data)
606 {
607         const struct nfs_writeverf *verf = data->res.verf;
608         struct nfs_direct_req *dreq = data->dreq;
609         struct nfs_commit_info cinfo;
610         struct nfs_page *req;
611         int status = data->task.tk_status;
612
613         trace_nfs_direct_commit_complete(dreq);
614
615         spin_lock(&dreq->lock);
616         if (status < 0) {
617                 /* Errors in commit are fatal */
618                 dreq->error = status;
619                 dreq->flags = NFS_ODIRECT_DONE;
620         } else {
621                 status = dreq->error;
622         }
623         spin_unlock(&dreq->lock);
624
625         nfs_init_cinfo_from_dreq(&cinfo, dreq);
626
627         while (!list_empty(&data->pages)) {
628                 req = nfs_list_entry(data->pages.next);
629                 nfs_list_remove_request(req);
630                 if (status < 0) {
631                         spin_lock(&dreq->lock);
632                         nfs_direct_truncate_request(dreq, req);
633                         spin_unlock(&dreq->lock);
634                         nfs_release_request(req);
635                 } else if (!nfs_write_match_verf(verf, req)) {
636                         spin_lock(&dreq->lock);
637                         if (dreq->flags == 0)
638                                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
639                         spin_unlock(&dreq->lock);
640                         /*
641                          * Despite the reboot, the write was successful,
642                          * so reset wb_nio.
643                          */
644                         req->wb_nio = 0;
645                         nfs_mark_request_commit(req, NULL, &cinfo, 0);
646                 } else
647                         nfs_release_request(req);
648                 nfs_unlock_and_release_request(req);
649         }
650
651         if (nfs_commit_end(cinfo.mds))
652                 nfs_direct_write_complete(dreq);
653 }
654
655 static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
656                 struct nfs_page *req)
657 {
658         struct nfs_direct_req *dreq = cinfo->dreq;
659
660         trace_nfs_direct_resched_write(dreq);
661
662         spin_lock(&dreq->lock);
663         if (dreq->flags != NFS_ODIRECT_DONE)
664                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
665         spin_unlock(&dreq->lock);
666         nfs_mark_request_commit(req, NULL, cinfo, 0);
667 }
668
669 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
670         .completion = nfs_direct_commit_complete,
671         .resched_write = nfs_direct_resched_write,
672 };
673
674 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
675 {
676         int res;
677         struct nfs_commit_info cinfo;
678         LIST_HEAD(mds_list);
679
680         nfs_init_cinfo_from_dreq(&cinfo, dreq);
681         nfs_commit_begin(cinfo.mds);
682         nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
683         res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
684         if (res < 0) { /* res == -ENOMEM */
685                 spin_lock(&dreq->lock);
686                 if (dreq->flags == 0)
687                         dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
688                 spin_unlock(&dreq->lock);
689         }
690         if (nfs_commit_end(cinfo.mds))
691                 nfs_direct_write_complete(dreq);
692 }
693
694 static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
695 {
696         struct nfs_commit_info cinfo;
697         struct nfs_page *req;
698         LIST_HEAD(reqs);
699
700         nfs_init_cinfo_from_dreq(&cinfo, dreq);
701         nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
702
703         while (!list_empty(&reqs)) {
704                 req = nfs_list_entry(reqs.next);
705                 nfs_list_remove_request(req);
706                 nfs_direct_truncate_request(dreq, req);
707                 nfs_release_request(req);
708                 nfs_unlock_and_release_request(req);
709         }
710 }
711
712 static void nfs_direct_write_schedule_work(struct work_struct *work)
713 {
714         struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
715         int flags = dreq->flags;
716
717         dreq->flags = 0;
718         switch (flags) {
719                 case NFS_ODIRECT_DO_COMMIT:
720                         nfs_direct_commit_schedule(dreq);
721                         break;
722                 case NFS_ODIRECT_RESCHED_WRITES:
723                         nfs_direct_write_reschedule(dreq);
724                         break;
725                 default:
726                         nfs_direct_write_clear_reqs(dreq);
727                         nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
728                         nfs_direct_complete(dreq);
729         }
730 }
731
732 static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
733 {
734         trace_nfs_direct_write_complete(dreq);
735         queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
736 }
737
738 static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
739 {
740         struct nfs_direct_req *dreq = hdr->dreq;
741         struct nfs_commit_info cinfo;
742         struct nfs_page *req = nfs_list_entry(hdr->pages.next);
743         int flags = NFS_ODIRECT_DONE;
744
745         trace_nfs_direct_write_completion(dreq);
746
747         nfs_init_cinfo_from_dreq(&cinfo, dreq);
748
749         spin_lock(&dreq->lock);
750         if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
751                 spin_unlock(&dreq->lock);
752                 goto out_put;
753         }
754
755         nfs_direct_count_bytes(dreq, hdr);
756         if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags) &&
757             !test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
758                 if (!dreq->flags)
759                         dreq->flags = NFS_ODIRECT_DO_COMMIT;
760                 flags = dreq->flags;
761         }
762         spin_unlock(&dreq->lock);
763
764         while (!list_empty(&hdr->pages)) {
765
766                 req = nfs_list_entry(hdr->pages.next);
767                 nfs_list_remove_request(req);
768                 if (flags == NFS_ODIRECT_DO_COMMIT) {
769                         kref_get(&req->wb_kref);
770                         memcpy(&req->wb_verf, &hdr->verf.verifier,
771                                sizeof(req->wb_verf));
772                         nfs_mark_request_commit(req, hdr->lseg, &cinfo,
773                                 hdr->ds_commit_idx);
774                 } else if (flags == NFS_ODIRECT_RESCHED_WRITES) {
775                         kref_get(&req->wb_kref);
776                         nfs_mark_request_commit(req, NULL, &cinfo, 0);
777                 }
778                 nfs_unlock_and_release_request(req);
779         }
780
781 out_put:
782         if (put_dreq(dreq))
783                 nfs_direct_write_complete(dreq);
784         hdr->release(hdr);
785 }
786
787 static void nfs_write_sync_pgio_error(struct list_head *head, int error)
788 {
789         struct nfs_page *req;
790
791         while (!list_empty(head)) {
792                 req = nfs_list_entry(head->next);
793                 nfs_list_remove_request(req);
794                 nfs_unlock_and_release_request(req);
795         }
796 }
797
798 static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
799 {
800         struct nfs_direct_req *dreq = hdr->dreq;
801         struct nfs_page *req;
802         struct nfs_commit_info cinfo;
803
804         trace_nfs_direct_write_reschedule_io(dreq);
805
806         nfs_init_cinfo_from_dreq(&cinfo, dreq);
807         spin_lock(&dreq->lock);
808         if (dreq->error == 0)
809                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
810         set_bit(NFS_IOHDR_REDO, &hdr->flags);
811         spin_unlock(&dreq->lock);
812         while (!list_empty(&hdr->pages)) {
813                 req = nfs_list_entry(hdr->pages.next);
814                 nfs_list_remove_request(req);
815                 nfs_unlock_request(req);
816                 nfs_mark_request_commit(req, NULL, &cinfo, 0);
817         }
818 }
819
820 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
821         .error_cleanup = nfs_write_sync_pgio_error,
822         .init_hdr = nfs_direct_pgio_init,
823         .completion = nfs_direct_write_completion,
824         .reschedule_io = nfs_direct_write_reschedule_io,
825 };
826
827
828 /*
829  * NB: Return the value of the first error return code.  Subsequent
830  *     errors after the first one are ignored.
831  */
832 /*
833  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
834  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
835  * bail and stop sending more writes.  Write length accounting is
836  * handled automatically by nfs_direct_write_result().  Otherwise, if
837  * no requests have been sent, just return an error.
838  */
839 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
840                                                struct iov_iter *iter,
841                                                loff_t pos, int ioflags)
842 {
843         struct nfs_pageio_descriptor desc;
844         struct inode *inode = dreq->inode;
845         struct nfs_commit_info cinfo;
846         ssize_t result = 0;
847         size_t requested_bytes = 0;
848         size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
849         bool defer = false;
850
851         trace_nfs_direct_write_schedule_iovec(dreq);
852
853         nfs_pageio_init_write(&desc, inode, ioflags, false,
854                               &nfs_direct_write_completion_ops);
855         desc.pg_dreq = dreq;
856         get_dreq(dreq);
857         inode_dio_begin(inode);
858
859         NFS_I(inode)->write_io += iov_iter_count(iter);
860         while (iov_iter_count(iter)) {
861                 struct page **pagevec;
862                 size_t bytes;
863                 size_t pgbase;
864                 unsigned npages, i;
865
866                 result = iov_iter_get_pages_alloc2(iter, &pagevec,
867                                                   wsize, &pgbase);
868                 if (result < 0)
869                         break;
870
871                 bytes = result;
872                 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
873                 for (i = 0; i < npages; i++) {
874                         struct nfs_page *req;
875                         unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
876
877                         req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
878                                                         pgbase, pos, req_len);
879                         if (IS_ERR(req)) {
880                                 result = PTR_ERR(req);
881                                 break;
882                         }
883
884                         if (desc.pg_error < 0) {
885                                 nfs_free_request(req);
886                                 result = desc.pg_error;
887                                 break;
888                         }
889
890                         pgbase = 0;
891                         bytes -= req_len;
892                         requested_bytes += req_len;
893                         pos += req_len;
894
895                         if (defer) {
896                                 nfs_mark_request_commit(req, NULL, &cinfo, 0);
897                                 continue;
898                         }
899
900                         nfs_lock_request(req);
901                         if (nfs_pageio_add_request(&desc, req))
902                                 continue;
903
904                         /* Exit on hard errors */
905                         if (desc.pg_error < 0 && desc.pg_error != -EAGAIN) {
906                                 result = desc.pg_error;
907                                 nfs_unlock_and_release_request(req);
908                                 break;
909                         }
910
911                         /* If the error is soft, defer remaining requests */
912                         nfs_init_cinfo_from_dreq(&cinfo, dreq);
913                         spin_lock(&dreq->lock);
914                         dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
915                         spin_unlock(&dreq->lock);
916                         nfs_unlock_request(req);
917                         nfs_mark_request_commit(req, NULL, &cinfo, 0);
918                         desc.pg_error = 0;
919                         defer = true;
920                 }
921                 nfs_direct_release_pages(pagevec, npages);
922                 kvfree(pagevec);
923                 if (result < 0)
924                         break;
925         }
926         nfs_pageio_complete(&desc);
927
928         /*
929          * If no bytes were started, return the error, and let the
930          * generic layer handle the completion.
931          */
932         if (requested_bytes == 0) {
933                 inode_dio_end(inode);
934                 nfs_direct_req_release(dreq);
935                 return result < 0 ? result : -EIO;
936         }
937
938         if (put_dreq(dreq))
939                 nfs_direct_write_complete(dreq);
940         return requested_bytes;
941 }
942
943 /**
944  * nfs_file_direct_write - file direct write operation for NFS files
945  * @iocb: target I/O control block
946  * @iter: vector of user buffers from which to write data
947  * @swap: flag indicating this is swap IO, not O_DIRECT IO
948  *
949  * We use this function for direct writes instead of calling
950  * generic_file_aio_write() in order to avoid taking the inode
951  * semaphore and updating the i_size.  The NFS server will set
952  * the new i_size and this client must read the updated size
953  * back into its cache.  We let the server do generic write
954  * parameter checking and report problems.
955  *
956  * We eliminate local atime updates, see direct read above.
957  *
958  * We avoid unnecessary page cache invalidations for normal cached
959  * readers of this file.
960  *
961  * Note that O_APPEND is not supported for NFS direct writes, as there
962  * is no atomic O_APPEND write facility in the NFS protocol.
963  */
964 ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
965                               bool swap)
966 {
967         ssize_t result, requested;
968         size_t count;
969         struct file *file = iocb->ki_filp;
970         struct address_space *mapping = file->f_mapping;
971         struct inode *inode = mapping->host;
972         struct nfs_direct_req *dreq;
973         struct nfs_lock_context *l_ctx;
974         loff_t pos, end;
975
976         dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
977                 file, iov_iter_count(iter), (long long) iocb->ki_pos);
978
979         if (swap)
980                 /* bypass generic checks */
981                 result =  iov_iter_count(iter);
982         else
983                 result = generic_write_checks(iocb, iter);
984         if (result <= 0)
985                 return result;
986         count = result;
987         nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
988
989         pos = iocb->ki_pos;
990         end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
991
992         task_io_account_write(count);
993
994         result = -ENOMEM;
995         dreq = nfs_direct_req_alloc();
996         if (!dreq)
997                 goto out;
998
999         dreq->inode = inode;
1000         dreq->max_count = count;
1001         dreq->io_start = pos;
1002         dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
1003         l_ctx = nfs_get_lock_context(dreq->ctx);
1004         if (IS_ERR(l_ctx)) {
1005                 result = PTR_ERR(l_ctx);
1006                 nfs_direct_req_release(dreq);
1007                 goto out_release;
1008         }
1009         dreq->l_ctx = l_ctx;
1010         if (!is_sync_kiocb(iocb))
1011                 dreq->iocb = iocb;
1012         pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
1013
1014         if (swap) {
1015                 requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
1016                                                             FLUSH_STABLE);
1017         } else {
1018                 result = nfs_start_io_direct(inode);
1019                 if (result) {
1020                         /* release the reference that would usually be
1021                          * consumed by nfs_direct_write_schedule_iovec()
1022                          */
1023                         nfs_direct_req_release(dreq);
1024                         goto out_release;
1025                 }
1026
1027                 requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
1028                                                             FLUSH_COND_STABLE);
1029
1030                 if (mapping->nrpages) {
1031                         invalidate_inode_pages2_range(mapping,
1032                                                       pos >> PAGE_SHIFT, end);
1033                 }
1034
1035                 nfs_end_io_direct(inode);
1036         }
1037
1038         if (requested > 0) {
1039                 result = nfs_direct_wait(dreq);
1040                 if (result > 0) {
1041                         requested -= result;
1042                         iocb->ki_pos = pos + result;
1043                         /* XXX: should check the generic_write_sync retval */
1044                         generic_write_sync(iocb, result);
1045                 }
1046                 iov_iter_revert(iter, requested);
1047         } else {
1048                 result = requested;
1049         }
1050         nfs_fscache_invalidate(inode, FSCACHE_INVAL_DIO_WRITE);
1051 out_release:
1052         nfs_direct_req_release(dreq);
1053 out:
1054         return result;
1055 }
1056
1057 /**
1058  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1059  *
1060  */
1061 int __init nfs_init_directcache(void)
1062 {
1063         nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1064                                                 sizeof(struct nfs_direct_req),
1065                                                 0, SLAB_RECLAIM_ACCOUNT,
1066                                                 NULL);
1067         if (nfs_direct_cachep == NULL)
1068                 return -ENOMEM;
1069
1070         return 0;
1071 }
1072
1073 /**
1074  * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1075  *
1076  */
1077 void nfs_destroy_directcache(void)
1078 {
1079         kmem_cache_destroy(nfs_direct_cachep);
1080 }
This page took 0.092225 seconds and 4 git commands to generate.