]> Git Repo - linux.git/blob - block/fops.c
ionic: Mark error paths in the data path as unlikely
[linux.git] / block / fops.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 1991, 1992  Linus Torvalds
4  * Copyright (C) 2001  Andrea Arcangeli <[email protected]> SuSE
5  * Copyright (C) 2016 - 2020 Christoph Hellwig
6  */
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/uio.h>
13 #include <linux/namei.h>
14 #include <linux/task_io_accounting_ops.h>
15 #include <linux/falloc.h>
16 #include <linux/suspend.h>
17 #include <linux/fs.h>
18 #include <linux/iomap.h>
19 #include <linux/module.h>
20 #include "blk.h"
21
22 static inline struct inode *bdev_file_inode(struct file *file)
23 {
24         return file->f_mapping->host;
25 }
26
27 static blk_opf_t dio_bio_write_op(struct kiocb *iocb)
28 {
29         blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
30
31         /* avoid the need for a I/O completion work item */
32         if (iocb_is_dsync(iocb))
33                 opf |= REQ_FUA;
34         return opf;
35 }
36
37 static bool blkdev_dio_unaligned(struct block_device *bdev, loff_t pos,
38                               struct iov_iter *iter)
39 {
40         return pos & (bdev_logical_block_size(bdev) - 1) ||
41                 !bdev_iter_is_aligned(bdev, iter);
42 }
43
44 #define DIO_INLINE_BIO_VECS 4
45
46 static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
47                 struct iov_iter *iter, struct block_device *bdev,
48                 unsigned int nr_pages)
49 {
50         struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
51         loff_t pos = iocb->ki_pos;
52         bool should_dirty = false;
53         struct bio bio;
54         ssize_t ret;
55
56         if (nr_pages <= DIO_INLINE_BIO_VECS)
57                 vecs = inline_vecs;
58         else {
59                 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
60                                      GFP_KERNEL);
61                 if (!vecs)
62                         return -ENOMEM;
63         }
64
65         if (iov_iter_rw(iter) == READ) {
66                 bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
67                 if (user_backed_iter(iter))
68                         should_dirty = true;
69         } else {
70                 bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
71         }
72         bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
73         bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
74         bio.bi_ioprio = iocb->ki_ioprio;
75
76         ret = bio_iov_iter_get_pages(&bio, iter);
77         if (unlikely(ret))
78                 goto out;
79         ret = bio.bi_iter.bi_size;
80
81         if (iov_iter_rw(iter) == WRITE)
82                 task_io_account_write(ret);
83
84         if (iocb->ki_flags & IOCB_NOWAIT)
85                 bio.bi_opf |= REQ_NOWAIT;
86
87         submit_bio_wait(&bio);
88
89         bio_release_pages(&bio, should_dirty);
90         if (unlikely(bio.bi_status))
91                 ret = blk_status_to_errno(bio.bi_status);
92
93 out:
94         if (vecs != inline_vecs)
95                 kfree(vecs);
96
97         bio_uninit(&bio);
98
99         return ret;
100 }
101
102 enum {
103         DIO_SHOULD_DIRTY        = 1,
104         DIO_IS_SYNC             = 2,
105 };
106
107 struct blkdev_dio {
108         union {
109                 struct kiocb            *iocb;
110                 struct task_struct      *waiter;
111         };
112         size_t                  size;
113         atomic_t                ref;
114         unsigned int            flags;
115         struct bio              bio ____cacheline_aligned_in_smp;
116 };
117
118 static struct bio_set blkdev_dio_pool;
119
120 static void blkdev_bio_end_io(struct bio *bio)
121 {
122         struct blkdev_dio *dio = bio->bi_private;
123         bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
124
125         if (bio->bi_status && !dio->bio.bi_status)
126                 dio->bio.bi_status = bio->bi_status;
127
128         if (atomic_dec_and_test(&dio->ref)) {
129                 if (!(dio->flags & DIO_IS_SYNC)) {
130                         struct kiocb *iocb = dio->iocb;
131                         ssize_t ret;
132
133                         WRITE_ONCE(iocb->private, NULL);
134
135                         if (likely(!dio->bio.bi_status)) {
136                                 ret = dio->size;
137                                 iocb->ki_pos += ret;
138                         } else {
139                                 ret = blk_status_to_errno(dio->bio.bi_status);
140                         }
141
142                         dio->iocb->ki_complete(iocb, ret);
143                         bio_put(&dio->bio);
144                 } else {
145                         struct task_struct *waiter = dio->waiter;
146
147                         WRITE_ONCE(dio->waiter, NULL);
148                         blk_wake_io_task(waiter);
149                 }
150         }
151
152         if (should_dirty) {
153                 bio_check_pages_dirty(bio);
154         } else {
155                 bio_release_pages(bio, false);
156                 bio_put(bio);
157         }
158 }
159
160 static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
161                 struct block_device *bdev, unsigned int nr_pages)
162 {
163         struct blk_plug plug;
164         struct blkdev_dio *dio;
165         struct bio *bio;
166         bool is_read = (iov_iter_rw(iter) == READ), is_sync;
167         blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
168         loff_t pos = iocb->ki_pos;
169         int ret = 0;
170
171         if (iocb->ki_flags & IOCB_ALLOC_CACHE)
172                 opf |= REQ_ALLOC_CACHE;
173         bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
174                                &blkdev_dio_pool);
175         dio = container_of(bio, struct blkdev_dio, bio);
176         atomic_set(&dio->ref, 1);
177         /*
178          * Grab an extra reference to ensure the dio structure which is embedded
179          * into the first bio stays around.
180          */
181         bio_get(bio);
182
183         is_sync = is_sync_kiocb(iocb);
184         if (is_sync) {
185                 dio->flags = DIO_IS_SYNC;
186                 dio->waiter = current;
187         } else {
188                 dio->flags = 0;
189                 dio->iocb = iocb;
190         }
191
192         dio->size = 0;
193         if (is_read && user_backed_iter(iter))
194                 dio->flags |= DIO_SHOULD_DIRTY;
195
196         blk_start_plug(&plug);
197
198         for (;;) {
199                 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
200                 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
201                 bio->bi_private = dio;
202                 bio->bi_end_io = blkdev_bio_end_io;
203                 bio->bi_ioprio = iocb->ki_ioprio;
204
205                 ret = bio_iov_iter_get_pages(bio, iter);
206                 if (unlikely(ret)) {
207                         bio->bi_status = BLK_STS_IOERR;
208                         bio_endio(bio);
209                         break;
210                 }
211                 if (iocb->ki_flags & IOCB_NOWAIT) {
212                         /*
213                          * This is nonblocking IO, and we need to allocate
214                          * another bio if we have data left to map. As we
215                          * cannot guarantee that one of the sub bios will not
216                          * fail getting issued FOR NOWAIT and as error results
217                          * are coalesced across all of them, be safe and ask for
218                          * a retry of this from blocking context.
219                          */
220                         if (unlikely(iov_iter_count(iter))) {
221                                 bio_release_pages(bio, false);
222                                 bio_clear_flag(bio, BIO_REFFED);
223                                 bio_put(bio);
224                                 blk_finish_plug(&plug);
225                                 return -EAGAIN;
226                         }
227                         bio->bi_opf |= REQ_NOWAIT;
228                 }
229
230                 if (is_read) {
231                         if (dio->flags & DIO_SHOULD_DIRTY)
232                                 bio_set_pages_dirty(bio);
233                 } else {
234                         task_io_account_write(bio->bi_iter.bi_size);
235                 }
236                 dio->size += bio->bi_iter.bi_size;
237                 pos += bio->bi_iter.bi_size;
238
239                 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
240                 if (!nr_pages) {
241                         submit_bio(bio);
242                         break;
243                 }
244                 atomic_inc(&dio->ref);
245                 submit_bio(bio);
246                 bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
247         }
248
249         blk_finish_plug(&plug);
250
251         if (!is_sync)
252                 return -EIOCBQUEUED;
253
254         for (;;) {
255                 set_current_state(TASK_UNINTERRUPTIBLE);
256                 if (!READ_ONCE(dio->waiter))
257                         break;
258                 blk_io_schedule();
259         }
260         __set_current_state(TASK_RUNNING);
261
262         if (!ret)
263                 ret = blk_status_to_errno(dio->bio.bi_status);
264         if (likely(!ret))
265                 ret = dio->size;
266
267         bio_put(&dio->bio);
268         return ret;
269 }
270
271 static void blkdev_bio_end_io_async(struct bio *bio)
272 {
273         struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
274         struct kiocb *iocb = dio->iocb;
275         ssize_t ret;
276
277         WRITE_ONCE(iocb->private, NULL);
278
279         if (likely(!bio->bi_status)) {
280                 ret = dio->size;
281                 iocb->ki_pos += ret;
282         } else {
283                 ret = blk_status_to_errno(bio->bi_status);
284         }
285
286         iocb->ki_complete(iocb, ret);
287
288         if (dio->flags & DIO_SHOULD_DIRTY) {
289                 bio_check_pages_dirty(bio);
290         } else {
291                 bio_release_pages(bio, false);
292                 bio_put(bio);
293         }
294 }
295
296 static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
297                                         struct iov_iter *iter,
298                                         struct block_device *bdev,
299                                         unsigned int nr_pages)
300 {
301         bool is_read = iov_iter_rw(iter) == READ;
302         blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
303         struct blkdev_dio *dio;
304         struct bio *bio;
305         loff_t pos = iocb->ki_pos;
306         int ret = 0;
307
308         if (iocb->ki_flags & IOCB_ALLOC_CACHE)
309                 opf |= REQ_ALLOC_CACHE;
310         bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
311                                &blkdev_dio_pool);
312         dio = container_of(bio, struct blkdev_dio, bio);
313         dio->flags = 0;
314         dio->iocb = iocb;
315         bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
316         bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
317         bio->bi_end_io = blkdev_bio_end_io_async;
318         bio->bi_ioprio = iocb->ki_ioprio;
319
320         if (iov_iter_is_bvec(iter)) {
321                 /*
322                  * Users don't rely on the iterator being in any particular
323                  * state for async I/O returning -EIOCBQUEUED, hence we can
324                  * avoid expensive iov_iter_advance(). Bypass
325                  * bio_iov_iter_get_pages() and set the bvec directly.
326                  */
327                 bio_iov_bvec_set(bio, iter);
328         } else {
329                 ret = bio_iov_iter_get_pages(bio, iter);
330                 if (unlikely(ret)) {
331                         bio_put(bio);
332                         return ret;
333                 }
334         }
335         dio->size = bio->bi_iter.bi_size;
336
337         if (is_read) {
338                 if (user_backed_iter(iter)) {
339                         dio->flags |= DIO_SHOULD_DIRTY;
340                         bio_set_pages_dirty(bio);
341                 }
342         } else {
343                 task_io_account_write(bio->bi_iter.bi_size);
344         }
345
346         if (iocb->ki_flags & IOCB_NOWAIT)
347                 bio->bi_opf |= REQ_NOWAIT;
348
349         if (iocb->ki_flags & IOCB_HIPRI) {
350                 bio->bi_opf |= REQ_POLLED;
351                 submit_bio(bio);
352                 WRITE_ONCE(iocb->private, bio);
353         } else {
354                 submit_bio(bio);
355         }
356         return -EIOCBQUEUED;
357 }
358
359 static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
360 {
361         struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
362         unsigned int nr_pages;
363
364         if (!iov_iter_count(iter))
365                 return 0;
366
367         if (blkdev_dio_unaligned(bdev, iocb->ki_pos, iter))
368                 return -EINVAL;
369
370         nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
371         if (likely(nr_pages <= BIO_MAX_VECS)) {
372                 if (is_sync_kiocb(iocb))
373                         return __blkdev_direct_IO_simple(iocb, iter, bdev,
374                                                         nr_pages);
375                 return __blkdev_direct_IO_async(iocb, iter, bdev, nr_pages);
376         }
377         return __blkdev_direct_IO(iocb, iter, bdev, bio_max_segs(nr_pages));
378 }
379
380 static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
381                 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
382 {
383         struct block_device *bdev = I_BDEV(inode);
384         loff_t isize = i_size_read(inode);
385
386         iomap->bdev = bdev;
387         iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
388         if (offset >= isize)
389                 return -EIO;
390         iomap->type = IOMAP_MAPPED;
391         iomap->addr = iomap->offset;
392         iomap->length = isize - iomap->offset;
393         iomap->flags |= IOMAP_F_BUFFER_HEAD; /* noop for !CONFIG_BUFFER_HEAD */
394         return 0;
395 }
396
397 static const struct iomap_ops blkdev_iomap_ops = {
398         .iomap_begin            = blkdev_iomap_begin,
399 };
400
401 #ifdef CONFIG_BUFFER_HEAD
402 static int blkdev_get_block(struct inode *inode, sector_t iblock,
403                 struct buffer_head *bh, int create)
404 {
405         bh->b_bdev = I_BDEV(inode);
406         bh->b_blocknr = iblock;
407         set_buffer_mapped(bh);
408         return 0;
409 }
410
411 /*
412  * We cannot call mpage_writepages() as it does not take the buffer lock.
413  * We must use block_write_full_folio() directly which holds the buffer
414  * lock.  The buffer lock provides the synchronisation with writeback
415  * that filesystems rely on when they use the blockdev's mapping.
416  */
417 static int blkdev_writepages(struct address_space *mapping,
418                 struct writeback_control *wbc)
419 {
420         struct blk_plug plug;
421         int err;
422
423         blk_start_plug(&plug);
424         err = write_cache_pages(mapping, wbc, block_write_full_folio,
425                         blkdev_get_block);
426         blk_finish_plug(&plug);
427
428         return err;
429 }
430
431 static int blkdev_read_folio(struct file *file, struct folio *folio)
432 {
433         return block_read_full_folio(folio, blkdev_get_block);
434 }
435
436 static void blkdev_readahead(struct readahead_control *rac)
437 {
438         mpage_readahead(rac, blkdev_get_block);
439 }
440
441 static int blkdev_write_begin(struct file *file, struct address_space *mapping,
442                 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
443 {
444         return block_write_begin(mapping, pos, len, pagep, blkdev_get_block);
445 }
446
447 static int blkdev_write_end(struct file *file, struct address_space *mapping,
448                 loff_t pos, unsigned len, unsigned copied, struct page *page,
449                 void *fsdata)
450 {
451         int ret;
452         ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
453
454         unlock_page(page);
455         put_page(page);
456
457         return ret;
458 }
459
460 const struct address_space_operations def_blk_aops = {
461         .dirty_folio    = block_dirty_folio,
462         .invalidate_folio = block_invalidate_folio,
463         .read_folio     = blkdev_read_folio,
464         .readahead      = blkdev_readahead,
465         .writepages     = blkdev_writepages,
466         .write_begin    = blkdev_write_begin,
467         .write_end      = blkdev_write_end,
468         .migrate_folio  = buffer_migrate_folio_norefs,
469         .is_dirty_writeback = buffer_check_dirty_writeback,
470 };
471 #else /* CONFIG_BUFFER_HEAD */
472 static int blkdev_read_folio(struct file *file, struct folio *folio)
473 {
474         return iomap_read_folio(folio, &blkdev_iomap_ops);
475 }
476
477 static void blkdev_readahead(struct readahead_control *rac)
478 {
479         iomap_readahead(rac, &blkdev_iomap_ops);
480 }
481
482 static int blkdev_map_blocks(struct iomap_writepage_ctx *wpc,
483                 struct inode *inode, loff_t offset, unsigned int len)
484 {
485         loff_t isize = i_size_read(inode);
486
487         if (WARN_ON_ONCE(offset >= isize))
488                 return -EIO;
489         if (offset >= wpc->iomap.offset &&
490             offset < wpc->iomap.offset + wpc->iomap.length)
491                 return 0;
492         return blkdev_iomap_begin(inode, offset, isize - offset,
493                                   IOMAP_WRITE, &wpc->iomap, NULL);
494 }
495
496 static const struct iomap_writeback_ops blkdev_writeback_ops = {
497         .map_blocks             = blkdev_map_blocks,
498 };
499
500 static int blkdev_writepages(struct address_space *mapping,
501                 struct writeback_control *wbc)
502 {
503         struct iomap_writepage_ctx wpc = { };
504
505         return iomap_writepages(mapping, wbc, &wpc, &blkdev_writeback_ops);
506 }
507
508 const struct address_space_operations def_blk_aops = {
509         .dirty_folio    = filemap_dirty_folio,
510         .release_folio          = iomap_release_folio,
511         .invalidate_folio       = iomap_invalidate_folio,
512         .read_folio             = blkdev_read_folio,
513         .readahead              = blkdev_readahead,
514         .writepages             = blkdev_writepages,
515         .is_partially_uptodate  = iomap_is_partially_uptodate,
516         .error_remove_folio     = generic_error_remove_folio,
517         .migrate_folio          = filemap_migrate_folio,
518 };
519 #endif /* CONFIG_BUFFER_HEAD */
520
521 /*
522  * for a block special file file_inode(file)->i_size is zero
523  * so we compute the size by hand (just as in block_read/write above)
524  */
525 static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
526 {
527         struct inode *bd_inode = bdev_file_inode(file);
528         loff_t retval;
529
530         inode_lock(bd_inode);
531         retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
532         inode_unlock(bd_inode);
533         return retval;
534 }
535
536 static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
537                 int datasync)
538 {
539         struct block_device *bdev = I_BDEV(filp->f_mapping->host);
540         int error;
541
542         error = file_write_and_wait_range(filp, start, end);
543         if (error)
544                 return error;
545
546         /*
547          * There is no need to serialise calls to blkdev_issue_flush with
548          * i_mutex and doing so causes performance issues with concurrent
549          * O_SYNC writers to a block device.
550          */
551         error = blkdev_issue_flush(bdev);
552         if (error == -EOPNOTSUPP)
553                 error = 0;
554
555         return error;
556 }
557
558 /**
559  * file_to_blk_mode - get block open flags from file flags
560  * @file: file whose open flags should be converted
561  *
562  * Look at file open flags and generate corresponding block open flags from
563  * them. The function works both for file just being open (e.g. during ->open
564  * callback) and for file that is already open. This is actually non-trivial
565  * (see comment in the function).
566  */
567 blk_mode_t file_to_blk_mode(struct file *file)
568 {
569         blk_mode_t mode = 0;
570
571         if (file->f_mode & FMODE_READ)
572                 mode |= BLK_OPEN_READ;
573         if (file->f_mode & FMODE_WRITE)
574                 mode |= BLK_OPEN_WRITE;
575         /*
576          * do_dentry_open() clears O_EXCL from f_flags, use file->private_data
577          * to determine whether the open was exclusive for already open files.
578          */
579         if (file->private_data)
580                 mode |= BLK_OPEN_EXCL;
581         else if (file->f_flags & O_EXCL)
582                 mode |= BLK_OPEN_EXCL;
583         if (file->f_flags & O_NDELAY)
584                 mode |= BLK_OPEN_NDELAY;
585
586         /*
587          * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy
588          * driver has historically allowed ioctls as if the file was opened for
589          * writing, but does not allow and actual reads or writes.
590          */
591         if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY))
592                 mode |= BLK_OPEN_WRITE_IOCTL;
593
594         return mode;
595 }
596
597 static int blkdev_open(struct inode *inode, struct file *filp)
598 {
599         struct block_device *bdev;
600         blk_mode_t mode;
601         int ret;
602
603         mode = file_to_blk_mode(filp);
604         /* Use the file as the holder. */
605         if (mode & BLK_OPEN_EXCL)
606                 filp->private_data = filp;
607         ret = bdev_permission(inode->i_rdev, mode, filp->private_data);
608         if (ret)
609                 return ret;
610
611         bdev = blkdev_get_no_open(inode->i_rdev);
612         if (!bdev)
613                 return -ENXIO;
614
615         ret = bdev_open(bdev, mode, filp->private_data, NULL, filp);
616         if (ret)
617                 blkdev_put_no_open(bdev);
618         return ret;
619 }
620
621 static int blkdev_release(struct inode *inode, struct file *filp)
622 {
623         bdev_release(filp);
624         return 0;
625 }
626
627 static ssize_t
628 blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from)
629 {
630         size_t count = iov_iter_count(from);
631         ssize_t written;
632
633         written = kiocb_invalidate_pages(iocb, count);
634         if (written) {
635                 if (written == -EBUSY)
636                         return 0;
637                 return written;
638         }
639
640         written = blkdev_direct_IO(iocb, from);
641         if (written > 0) {
642                 kiocb_invalidate_post_direct_write(iocb, count);
643                 iocb->ki_pos += written;
644                 count -= written;
645         }
646         if (written != -EIOCBQUEUED)
647                 iov_iter_revert(from, count - iov_iter_count(from));
648         return written;
649 }
650
651 static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
652 {
653         return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops);
654 }
655
656 /*
657  * Write data to the block device.  Only intended for the block device itself
658  * and the raw driver which basically is a fake block device.
659  *
660  * Does not take i_mutex for the write and thus is not for general purpose
661  * use.
662  */
663 static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
664 {
665         struct file *file = iocb->ki_filp;
666         struct inode *bd_inode = bdev_file_inode(file);
667         struct block_device *bdev = I_BDEV(bd_inode);
668         loff_t size = bdev_nr_bytes(bdev);
669         size_t shorted = 0;
670         ssize_t ret;
671
672         if (bdev_read_only(bdev))
673                 return -EPERM;
674
675         if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
676                 return -ETXTBSY;
677
678         if (!iov_iter_count(from))
679                 return 0;
680
681         if (iocb->ki_pos >= size)
682                 return -ENOSPC;
683
684         if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
685                 return -EOPNOTSUPP;
686
687         size -= iocb->ki_pos;
688         if (iov_iter_count(from) > size) {
689                 shorted = iov_iter_count(from) - size;
690                 iov_iter_truncate(from, size);
691         }
692
693         ret = file_update_time(file);
694         if (ret)
695                 return ret;
696
697         if (iocb->ki_flags & IOCB_DIRECT) {
698                 ret = blkdev_direct_write(iocb, from);
699                 if (ret >= 0 && iov_iter_count(from))
700                         ret = direct_write_fallback(iocb, from, ret,
701                                         blkdev_buffered_write(iocb, from));
702         } else {
703                 ret = blkdev_buffered_write(iocb, from);
704         }
705
706         if (ret > 0)
707                 ret = generic_write_sync(iocb, ret);
708         iov_iter_reexpand(from, iov_iter_count(from) + shorted);
709         return ret;
710 }
711
712 static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
713 {
714         struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
715         loff_t size = bdev_nr_bytes(bdev);
716         loff_t pos = iocb->ki_pos;
717         size_t shorted = 0;
718         ssize_t ret = 0;
719         size_t count;
720
721         if (unlikely(pos + iov_iter_count(to) > size)) {
722                 if (pos >= size)
723                         return 0;
724                 size -= pos;
725                 shorted = iov_iter_count(to) - size;
726                 iov_iter_truncate(to, size);
727         }
728
729         count = iov_iter_count(to);
730         if (!count)
731                 goto reexpand; /* skip atime */
732
733         if (iocb->ki_flags & IOCB_DIRECT) {
734                 ret = kiocb_write_and_wait(iocb, count);
735                 if (ret < 0)
736                         goto reexpand;
737                 file_accessed(iocb->ki_filp);
738
739                 ret = blkdev_direct_IO(iocb, to);
740                 if (ret >= 0) {
741                         iocb->ki_pos += ret;
742                         count -= ret;
743                 }
744                 iov_iter_revert(to, count - iov_iter_count(to));
745                 if (ret < 0 || !count)
746                         goto reexpand;
747         }
748
749         ret = filemap_read(iocb, to, ret);
750
751 reexpand:
752         if (unlikely(shorted))
753                 iov_iter_reexpand(to, iov_iter_count(to) + shorted);
754         return ret;
755 }
756
757 #define BLKDEV_FALLOC_FL_SUPPORTED                                      \
758                 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |           \
759                  FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
760
761 static long blkdev_fallocate(struct file *file, int mode, loff_t start,
762                              loff_t len)
763 {
764         struct inode *inode = bdev_file_inode(file);
765         struct block_device *bdev = I_BDEV(inode);
766         loff_t end = start + len - 1;
767         loff_t isize;
768         int error;
769
770         /* Fail if we don't recognize the flags. */
771         if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
772                 return -EOPNOTSUPP;
773
774         /* Don't go off the end of the device. */
775         isize = bdev_nr_bytes(bdev);
776         if (start >= isize)
777                 return -EINVAL;
778         if (end >= isize) {
779                 if (mode & FALLOC_FL_KEEP_SIZE) {
780                         len = isize - start;
781                         end = start + len - 1;
782                 } else
783                         return -EINVAL;
784         }
785
786         /*
787          * Don't allow IO that isn't aligned to logical block size.
788          */
789         if ((start | len) & (bdev_logical_block_size(bdev) - 1))
790                 return -EINVAL;
791
792         filemap_invalidate_lock(inode->i_mapping);
793
794         /*
795          * Invalidate the page cache, including dirty pages, for valid
796          * de-allocate mode calls to fallocate().
797          */
798         switch (mode) {
799         case FALLOC_FL_ZERO_RANGE:
800         case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
801                 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
802                 if (error)
803                         goto fail;
804
805                 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
806                                              len >> SECTOR_SHIFT, GFP_KERNEL,
807                                              BLKDEV_ZERO_NOUNMAP);
808                 break;
809         case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
810                 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
811                 if (error)
812                         goto fail;
813
814                 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
815                                              len >> SECTOR_SHIFT, GFP_KERNEL,
816                                              BLKDEV_ZERO_NOFALLBACK);
817                 break;
818         case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
819                 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
820                 if (error)
821                         goto fail;
822
823                 error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
824                                              len >> SECTOR_SHIFT, GFP_KERNEL);
825                 break;
826         default:
827                 error = -EOPNOTSUPP;
828         }
829
830  fail:
831         filemap_invalidate_unlock(inode->i_mapping);
832         return error;
833 }
834
835 static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
836 {
837         struct inode *bd_inode = bdev_file_inode(file);
838
839         if (bdev_read_only(I_BDEV(bd_inode)))
840                 return generic_file_readonly_mmap(file, vma);
841
842         return generic_file_mmap(file, vma);
843 }
844
845 const struct file_operations def_blk_fops = {
846         .open           = blkdev_open,
847         .release        = blkdev_release,
848         .llseek         = blkdev_llseek,
849         .read_iter      = blkdev_read_iter,
850         .write_iter     = blkdev_write_iter,
851         .iopoll         = iocb_bio_iopoll,
852         .mmap           = blkdev_mmap,
853         .fsync          = blkdev_fsync,
854         .unlocked_ioctl = blkdev_ioctl,
855 #ifdef CONFIG_COMPAT
856         .compat_ioctl   = compat_blkdev_ioctl,
857 #endif
858         .splice_read    = filemap_splice_read,
859         .splice_write   = iter_file_splice_write,
860         .fallocate      = blkdev_fallocate,
861         .fop_flags      = FOP_BUFFER_RASYNC,
862 };
863
864 static __init int blkdev_init(void)
865 {
866         return bioset_init(&blkdev_dio_pool, 4,
867                                 offsetof(struct blkdev_dio, bio),
868                                 BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
869 }
870 module_init(blkdev_init);
This page took 0.08002 seconds and 4 git commands to generate.