1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
7 #include <linux/init.h>
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/uio.h>
13 #include <linux/namei.h>
14 #include <linux/task_io_accounting_ops.h>
15 #include <linux/falloc.h>
16 #include <linux/suspend.h>
19 static struct inode *bdev_file_inode(struct file *file)
21 return file->f_mapping->host;
24 static int blkdev_get_block(struct inode *inode, sector_t iblock,
25 struct buffer_head *bh, int create)
27 bh->b_bdev = I_BDEV(inode);
28 bh->b_blocknr = iblock;
29 set_buffer_mapped(bh);
33 static unsigned int dio_bio_write_op(struct kiocb *iocb)
35 unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
37 /* avoid the need for a I/O completion work item */
38 if (iocb->ki_flags & IOCB_DSYNC)
43 #define DIO_INLINE_BIO_VECS 4
45 static void blkdev_bio_end_io_simple(struct bio *bio)
47 struct task_struct *waiter = bio->bi_private;
49 WRITE_ONCE(bio->bi_private, NULL);
50 blk_wake_io_task(waiter);
53 static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
54 struct iov_iter *iter, unsigned int nr_pages)
56 struct file *file = iocb->ki_filp;
57 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
58 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
59 loff_t pos = iocb->ki_pos;
60 bool should_dirty = false;
65 if ((pos | iov_iter_alignment(iter)) &
66 (bdev_logical_block_size(bdev) - 1))
69 if (nr_pages <= DIO_INLINE_BIO_VECS)
72 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
78 bio_init(&bio, vecs, nr_pages);
79 bio_set_dev(&bio, bdev);
80 bio.bi_iter.bi_sector = pos >> 9;
81 bio.bi_write_hint = iocb->ki_hint;
82 bio.bi_private = current;
83 bio.bi_end_io = blkdev_bio_end_io_simple;
84 bio.bi_ioprio = iocb->ki_ioprio;
86 ret = bio_iov_iter_get_pages(&bio, iter);
89 ret = bio.bi_iter.bi_size;
91 if (iov_iter_rw(iter) == READ) {
92 bio.bi_opf = REQ_OP_READ;
93 if (iter_is_iovec(iter))
96 bio.bi_opf = dio_bio_write_op(iocb);
97 task_io_account_write(ret);
99 if (iocb->ki_flags & IOCB_NOWAIT)
100 bio.bi_opf |= REQ_NOWAIT;
101 if (iocb->ki_flags & IOCB_HIPRI)
102 bio_set_polled(&bio, iocb);
104 qc = submit_bio(&bio);
106 set_current_state(TASK_UNINTERRUPTIBLE);
107 if (!READ_ONCE(bio.bi_private))
109 if (!(iocb->ki_flags & IOCB_HIPRI) ||
110 !blk_poll(bdev_get_queue(bdev), qc, true))
113 __set_current_state(TASK_RUNNING);
115 bio_release_pages(&bio, should_dirty);
116 if (unlikely(bio.bi_status))
117 ret = blk_status_to_errno(bio.bi_status);
120 if (vecs != inline_vecs)
131 struct task_struct *waiter;
136 bool should_dirty : 1;
141 static struct bio_set blkdev_dio_pool;
143 static int blkdev_iopoll(struct kiocb *kiocb, bool wait)
145 struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
146 struct request_queue *q = bdev_get_queue(bdev);
148 return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait);
151 static void blkdev_bio_end_io(struct bio *bio)
153 struct blkdev_dio *dio = bio->bi_private;
154 bool should_dirty = dio->should_dirty;
156 if (bio->bi_status && !dio->bio.bi_status)
157 dio->bio.bi_status = bio->bi_status;
159 if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
161 struct kiocb *iocb = dio->iocb;
164 if (likely(!dio->bio.bi_status)) {
168 ret = blk_status_to_errno(dio->bio.bi_status);
171 dio->iocb->ki_complete(iocb, ret, 0);
175 struct task_struct *waiter = dio->waiter;
177 WRITE_ONCE(dio->waiter, NULL);
178 blk_wake_io_task(waiter);
183 bio_check_pages_dirty(bio);
185 bio_release_pages(bio, false);
190 static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
191 unsigned int nr_pages)
193 struct file *file = iocb->ki_filp;
194 struct inode *inode = bdev_file_inode(file);
195 struct block_device *bdev = I_BDEV(inode);
196 struct blk_plug plug;
197 struct blkdev_dio *dio;
199 bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
200 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
201 loff_t pos = iocb->ki_pos;
202 blk_qc_t qc = BLK_QC_T_NONE;
205 if ((pos | iov_iter_alignment(iter)) &
206 (bdev_logical_block_size(bdev) - 1))
209 bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool);
211 dio = container_of(bio, struct blkdev_dio, bio);
212 dio->is_sync = is_sync = is_sync_kiocb(iocb);
214 dio->waiter = current;
221 dio->multi_bio = false;
222 dio->should_dirty = is_read && iter_is_iovec(iter);
225 * Don't plug for HIPRI/polled IO, as those should go straight
229 blk_start_plug(&plug);
232 bio_set_dev(bio, bdev);
233 bio->bi_iter.bi_sector = pos >> 9;
234 bio->bi_write_hint = iocb->ki_hint;
235 bio->bi_private = dio;
236 bio->bi_end_io = blkdev_bio_end_io;
237 bio->bi_ioprio = iocb->ki_ioprio;
239 ret = bio_iov_iter_get_pages(bio, iter);
241 bio->bi_status = BLK_STS_IOERR;
247 bio->bi_opf = REQ_OP_READ;
248 if (dio->should_dirty)
249 bio_set_pages_dirty(bio);
251 bio->bi_opf = dio_bio_write_op(iocb);
252 task_io_account_write(bio->bi_iter.bi_size);
254 if (iocb->ki_flags & IOCB_NOWAIT)
255 bio->bi_opf |= REQ_NOWAIT;
257 dio->size += bio->bi_iter.bi_size;
258 pos += bio->bi_iter.bi_size;
260 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
264 if (iocb->ki_flags & IOCB_HIPRI) {
265 bio_set_polled(bio, iocb);
269 qc = submit_bio(bio);
272 WRITE_ONCE(iocb->ki_cookie, qc);
276 if (!dio->multi_bio) {
278 * AIO needs an extra reference to ensure the dio
279 * structure which is embedded into the first bio
284 dio->multi_bio = true;
285 atomic_set(&dio->ref, 2);
287 atomic_inc(&dio->ref);
291 bio = bio_alloc(GFP_KERNEL, nr_pages);
295 blk_finish_plug(&plug);
301 set_current_state(TASK_UNINTERRUPTIBLE);
302 if (!READ_ONCE(dio->waiter))
305 if (!(iocb->ki_flags & IOCB_HIPRI) ||
306 !blk_poll(bdev_get_queue(bdev), qc, true))
309 __set_current_state(TASK_RUNNING);
312 ret = blk_status_to_errno(dio->bio.bi_status);
320 static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
322 unsigned int nr_pages;
324 if (!iov_iter_count(iter))
327 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
328 if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_VECS)
329 return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
331 return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages));
334 static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
336 return block_write_full_page(page, blkdev_get_block, wbc);
339 static int blkdev_readpage(struct file * file, struct page * page)
341 return block_read_full_page(page, blkdev_get_block);
344 static void blkdev_readahead(struct readahead_control *rac)
346 mpage_readahead(rac, blkdev_get_block);
349 static int blkdev_write_begin(struct file *file, struct address_space *mapping,
350 loff_t pos, unsigned len, unsigned flags, struct page **pagep,
353 return block_write_begin(mapping, pos, len, flags, pagep,
357 static int blkdev_write_end(struct file *file, struct address_space *mapping,
358 loff_t pos, unsigned len, unsigned copied, struct page *page,
362 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
370 static int blkdev_writepages(struct address_space *mapping,
371 struct writeback_control *wbc)
373 return generic_writepages(mapping, wbc);
376 const struct address_space_operations def_blk_aops = {
377 .set_page_dirty = __set_page_dirty_buffers,
378 .readpage = blkdev_readpage,
379 .readahead = blkdev_readahead,
380 .writepage = blkdev_writepage,
381 .write_begin = blkdev_write_begin,
382 .write_end = blkdev_write_end,
383 .writepages = blkdev_writepages,
384 .direct_IO = blkdev_direct_IO,
385 .migratepage = buffer_migrate_page_norefs,
386 .is_dirty_writeback = buffer_check_dirty_writeback,
390 * for a block special file file_inode(file)->i_size is zero
391 * so we compute the size by hand (just as in block_read/write above)
393 static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
395 struct inode *bd_inode = bdev_file_inode(file);
398 inode_lock(bd_inode);
399 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
400 inode_unlock(bd_inode);
404 static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
407 struct inode *bd_inode = bdev_file_inode(filp);
408 struct block_device *bdev = I_BDEV(bd_inode);
411 error = file_write_and_wait_range(filp, start, end);
416 * There is no need to serialise calls to blkdev_issue_flush with
417 * i_mutex and doing so causes performance issues with concurrent
418 * O_SYNC writers to a block device.
420 error = blkdev_issue_flush(bdev);
421 if (error == -EOPNOTSUPP)
427 static int blkdev_open(struct inode *inode, struct file *filp)
429 struct block_device *bdev;
432 * Preserve backwards compatibility and allow large file access
433 * even if userspace doesn't ask for it explicitly. Some mkfs
434 * binary needs it. We might want to drop this workaround
435 * during an unstable branch.
437 filp->f_flags |= O_LARGEFILE;
438 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
440 if (filp->f_flags & O_NDELAY)
441 filp->f_mode |= FMODE_NDELAY;
442 if (filp->f_flags & O_EXCL)
443 filp->f_mode |= FMODE_EXCL;
444 if ((filp->f_flags & O_ACCMODE) == 3)
445 filp->f_mode |= FMODE_WRITE_IOCTL;
447 bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp);
449 return PTR_ERR(bdev);
450 filp->f_mapping = bdev->bd_inode->i_mapping;
451 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
455 static int blkdev_close(struct inode *inode, struct file *filp)
457 struct block_device *bdev = I_BDEV(bdev_file_inode(filp));
459 blkdev_put(bdev, filp->f_mode);
463 static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
465 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
466 fmode_t mode = file->f_mode;
469 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
470 * to updated it before every ioctl.
472 if (file->f_flags & O_NDELAY)
473 mode |= FMODE_NDELAY;
475 mode &= ~FMODE_NDELAY;
477 return blkdev_ioctl(bdev, mode, cmd, arg);
481 * Write data to the block device. Only intended for the block device itself
482 * and the raw driver which basically is a fake block device.
484 * Does not take i_mutex for the write and thus is not for general purpose
487 static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
489 struct file *file = iocb->ki_filp;
490 struct inode *bd_inode = bdev_file_inode(file);
491 loff_t size = i_size_read(bd_inode);
492 struct blk_plug plug;
496 if (bdev_read_only(I_BDEV(bd_inode)))
499 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
502 if (!iov_iter_count(from))
505 if (iocb->ki_pos >= size)
508 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
511 size -= iocb->ki_pos;
512 if (iov_iter_count(from) > size) {
513 shorted = iov_iter_count(from) - size;
514 iov_iter_truncate(from, size);
517 blk_start_plug(&plug);
518 ret = __generic_file_write_iter(iocb, from);
520 ret = generic_write_sync(iocb, ret);
521 iov_iter_reexpand(from, iov_iter_count(from) + shorted);
522 blk_finish_plug(&plug);
526 static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
528 struct file *file = iocb->ki_filp;
529 struct inode *bd_inode = bdev_file_inode(file);
530 loff_t size = i_size_read(bd_inode);
531 loff_t pos = iocb->ki_pos;
539 if (iov_iter_count(to) > size) {
540 shorted = iov_iter_count(to) - size;
541 iov_iter_truncate(to, size);
544 ret = generic_file_read_iter(iocb, to);
545 iov_iter_reexpand(to, iov_iter_count(to) + shorted);
549 #define BLKDEV_FALLOC_FL_SUPPORTED \
550 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
551 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
553 static long blkdev_fallocate(struct file *file, int mode, loff_t start,
556 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
557 loff_t end = start + len - 1;
561 /* Fail if we don't recognize the flags. */
562 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
565 /* Don't go off the end of the device. */
566 isize = i_size_read(bdev->bd_inode);
570 if (mode & FALLOC_FL_KEEP_SIZE) {
572 end = start + len - 1;
578 * Don't allow IO that isn't aligned to logical block size.
580 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
583 /* Invalidate the page cache, including dirty pages. */
584 error = truncate_bdev_range(bdev, file->f_mode, start, end);
589 case FALLOC_FL_ZERO_RANGE:
590 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
591 error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
592 GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
594 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
595 error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
596 GFP_KERNEL, BLKDEV_ZERO_NOFALLBACK);
598 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
599 error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
609 * Invalidate the page cache again; if someone wandered in and dirtied
610 * a page, we just discard it - userspace has no way of knowing whether
611 * the write happened before or after discard completing...
613 return truncate_bdev_range(bdev, file->f_mode, start, end);
616 const struct file_operations def_blk_fops = {
618 .release = blkdev_close,
619 .llseek = blkdev_llseek,
620 .read_iter = blkdev_read_iter,
621 .write_iter = blkdev_write_iter,
622 .iopoll = blkdev_iopoll,
623 .mmap = generic_file_mmap,
624 .fsync = blkdev_fsync,
625 .unlocked_ioctl = block_ioctl,
627 .compat_ioctl = compat_blkdev_ioctl,
629 .splice_read = generic_file_splice_read,
630 .splice_write = iter_file_splice_write,
631 .fallocate = blkdev_fallocate,
634 static __init int blkdev_init(void)
636 return bioset_init(&blkdev_dio_pool, 4,
637 offsetof(struct blkdev_dio, bio),
638 BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
640 module_init(blkdev_init);