1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
7 #include <linux/init.h>
9 #include <linux/blkdev.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/uio.h>
13 #include <linux/namei.h>
14 #include <linux/task_io_accounting_ops.h>
15 #include <linux/falloc.h>
16 #include <linux/suspend.h>
18 #include <linux/iomap.h>
19 #include <linux/module.h>
20 #include <linux/io_uring/cmd.h>
23 static inline struct inode *bdev_file_inode(struct file *file)
25 return file->f_mapping->host;
28 static blk_opf_t dio_bio_write_op(struct kiocb *iocb)
30 blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
32 /* avoid the need for a I/O completion work item */
33 if (iocb_is_dsync(iocb))
38 static bool blkdev_dio_invalid(struct block_device *bdev, loff_t pos,
39 struct iov_iter *iter, bool is_atomic)
41 if (is_atomic && !generic_atomic_write_valid(iter, pos))
44 return pos & (bdev_logical_block_size(bdev) - 1) ||
45 !bdev_iter_is_aligned(bdev, iter);
48 #define DIO_INLINE_BIO_VECS 4
50 static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
51 struct iov_iter *iter, struct block_device *bdev,
52 unsigned int nr_pages)
54 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
55 loff_t pos = iocb->ki_pos;
56 bool should_dirty = false;
60 if (nr_pages <= DIO_INLINE_BIO_VECS)
63 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
69 if (iov_iter_rw(iter) == READ) {
70 bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
71 if (user_backed_iter(iter))
74 bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
76 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
77 bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
78 bio.bi_ioprio = iocb->ki_ioprio;
79 if (iocb->ki_flags & IOCB_ATOMIC)
80 bio.bi_opf |= REQ_ATOMIC;
82 ret = bio_iov_iter_get_pages(&bio, iter);
85 ret = bio.bi_iter.bi_size;
87 if (iov_iter_rw(iter) == WRITE)
88 task_io_account_write(ret);
90 if (iocb->ki_flags & IOCB_NOWAIT)
91 bio.bi_opf |= REQ_NOWAIT;
93 submit_bio_wait(&bio);
95 bio_release_pages(&bio, should_dirty);
96 if (unlikely(bio.bi_status))
97 ret = blk_status_to_errno(bio.bi_status);
100 if (vecs != inline_vecs)
109 DIO_SHOULD_DIRTY = 1,
116 struct task_struct *waiter;
121 struct bio bio ____cacheline_aligned_in_smp;
124 static struct bio_set blkdev_dio_pool;
126 static void blkdev_bio_end_io(struct bio *bio)
128 struct blkdev_dio *dio = bio->bi_private;
129 bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
131 if (bio->bi_status && !dio->bio.bi_status)
132 dio->bio.bi_status = bio->bi_status;
134 if (atomic_dec_and_test(&dio->ref)) {
135 if (!(dio->flags & DIO_IS_SYNC)) {
136 struct kiocb *iocb = dio->iocb;
139 WRITE_ONCE(iocb->private, NULL);
141 if (likely(!dio->bio.bi_status)) {
145 ret = blk_status_to_errno(dio->bio.bi_status);
148 dio->iocb->ki_complete(iocb, ret);
151 struct task_struct *waiter = dio->waiter;
153 WRITE_ONCE(dio->waiter, NULL);
154 blk_wake_io_task(waiter);
159 bio_check_pages_dirty(bio);
161 bio_release_pages(bio, false);
166 static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
167 struct block_device *bdev, unsigned int nr_pages)
169 struct blk_plug plug;
170 struct blkdev_dio *dio;
172 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
173 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
174 loff_t pos = iocb->ki_pos;
177 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
178 opf |= REQ_ALLOC_CACHE;
179 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
181 dio = container_of(bio, struct blkdev_dio, bio);
182 atomic_set(&dio->ref, 1);
184 * Grab an extra reference to ensure the dio structure which is embedded
185 * into the first bio stays around.
189 is_sync = is_sync_kiocb(iocb);
191 dio->flags = DIO_IS_SYNC;
192 dio->waiter = current;
199 if (is_read && user_backed_iter(iter))
200 dio->flags |= DIO_SHOULD_DIRTY;
202 blk_start_plug(&plug);
205 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
206 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
207 bio->bi_private = dio;
208 bio->bi_end_io = blkdev_bio_end_io;
209 bio->bi_ioprio = iocb->ki_ioprio;
211 ret = bio_iov_iter_get_pages(bio, iter);
213 bio->bi_status = BLK_STS_IOERR;
217 if (iocb->ki_flags & IOCB_NOWAIT) {
219 * This is nonblocking IO, and we need to allocate
220 * another bio if we have data left to map. As we
221 * cannot guarantee that one of the sub bios will not
222 * fail getting issued FOR NOWAIT and as error results
223 * are coalesced across all of them, be safe and ask for
224 * a retry of this from blocking context.
226 if (unlikely(iov_iter_count(iter))) {
227 bio_release_pages(bio, false);
228 bio_clear_flag(bio, BIO_REFFED);
230 blk_finish_plug(&plug);
233 bio->bi_opf |= REQ_NOWAIT;
237 if (dio->flags & DIO_SHOULD_DIRTY)
238 bio_set_pages_dirty(bio);
240 task_io_account_write(bio->bi_iter.bi_size);
242 dio->size += bio->bi_iter.bi_size;
243 pos += bio->bi_iter.bi_size;
245 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
250 atomic_inc(&dio->ref);
252 bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
255 blk_finish_plug(&plug);
261 set_current_state(TASK_UNINTERRUPTIBLE);
262 if (!READ_ONCE(dio->waiter))
266 __set_current_state(TASK_RUNNING);
269 ret = blk_status_to_errno(dio->bio.bi_status);
277 static void blkdev_bio_end_io_async(struct bio *bio)
279 struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
280 struct kiocb *iocb = dio->iocb;
283 WRITE_ONCE(iocb->private, NULL);
285 if (likely(!bio->bi_status)) {
289 ret = blk_status_to_errno(bio->bi_status);
292 iocb->ki_complete(iocb, ret);
294 if (dio->flags & DIO_SHOULD_DIRTY) {
295 bio_check_pages_dirty(bio);
297 bio_release_pages(bio, false);
302 static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
303 struct iov_iter *iter,
304 struct block_device *bdev,
305 unsigned int nr_pages)
307 bool is_read = iov_iter_rw(iter) == READ;
308 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
309 struct blkdev_dio *dio;
311 loff_t pos = iocb->ki_pos;
314 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
315 opf |= REQ_ALLOC_CACHE;
316 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
318 dio = container_of(bio, struct blkdev_dio, bio);
321 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
322 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
323 bio->bi_end_io = blkdev_bio_end_io_async;
324 bio->bi_ioprio = iocb->ki_ioprio;
326 if (iov_iter_is_bvec(iter)) {
328 * Users don't rely on the iterator being in any particular
329 * state for async I/O returning -EIOCBQUEUED, hence we can
330 * avoid expensive iov_iter_advance(). Bypass
331 * bio_iov_iter_get_pages() and set the bvec directly.
333 bio_iov_bvec_set(bio, iter);
335 ret = bio_iov_iter_get_pages(bio, iter);
341 dio->size = bio->bi_iter.bi_size;
344 if (user_backed_iter(iter)) {
345 dio->flags |= DIO_SHOULD_DIRTY;
346 bio_set_pages_dirty(bio);
349 task_io_account_write(bio->bi_iter.bi_size);
352 if (iocb->ki_flags & IOCB_ATOMIC)
353 bio->bi_opf |= REQ_ATOMIC;
355 if (iocb->ki_flags & IOCB_NOWAIT)
356 bio->bi_opf |= REQ_NOWAIT;
358 if (iocb->ki_flags & IOCB_HIPRI) {
359 bio->bi_opf |= REQ_POLLED;
361 WRITE_ONCE(iocb->private, bio);
368 static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
370 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
371 bool is_atomic = iocb->ki_flags & IOCB_ATOMIC;
372 unsigned int nr_pages;
374 if (!iov_iter_count(iter))
377 if (blkdev_dio_invalid(bdev, iocb->ki_pos, iter, is_atomic))
380 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
381 if (likely(nr_pages <= BIO_MAX_VECS)) {
382 if (is_sync_kiocb(iocb))
383 return __blkdev_direct_IO_simple(iocb, iter, bdev,
385 return __blkdev_direct_IO_async(iocb, iter, bdev, nr_pages);
386 } else if (is_atomic) {
389 return __blkdev_direct_IO(iocb, iter, bdev, bio_max_segs(nr_pages));
392 static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
393 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
395 struct block_device *bdev = I_BDEV(inode);
396 loff_t isize = i_size_read(inode);
402 iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
403 iomap->type = IOMAP_MAPPED;
404 iomap->addr = iomap->offset;
405 iomap->length = isize - iomap->offset;
406 iomap->flags |= IOMAP_F_BUFFER_HEAD; /* noop for !CONFIG_BUFFER_HEAD */
410 static const struct iomap_ops blkdev_iomap_ops = {
411 .iomap_begin = blkdev_iomap_begin,
414 #ifdef CONFIG_BUFFER_HEAD
415 static int blkdev_get_block(struct inode *inode, sector_t iblock,
416 struct buffer_head *bh, int create)
418 bh->b_bdev = I_BDEV(inode);
419 bh->b_blocknr = iblock;
420 set_buffer_mapped(bh);
425 * We cannot call mpage_writepages() as it does not take the buffer lock.
426 * We must use block_write_full_folio() directly which holds the buffer
427 * lock. The buffer lock provides the synchronisation with writeback
428 * that filesystems rely on when they use the blockdev's mapping.
430 static int blkdev_writepages(struct address_space *mapping,
431 struct writeback_control *wbc)
433 struct blk_plug plug;
436 blk_start_plug(&plug);
437 err = write_cache_pages(mapping, wbc, block_write_full_folio,
439 blk_finish_plug(&plug);
444 static int blkdev_read_folio(struct file *file, struct folio *folio)
446 return block_read_full_folio(folio, blkdev_get_block);
449 static void blkdev_readahead(struct readahead_control *rac)
451 mpage_readahead(rac, blkdev_get_block);
454 static int blkdev_write_begin(struct file *file, struct address_space *mapping,
455 loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
457 return block_write_begin(mapping, pos, len, foliop, blkdev_get_block);
460 static int blkdev_write_end(struct file *file, struct address_space *mapping,
461 loff_t pos, unsigned len, unsigned copied, struct folio *folio,
465 ret = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
473 const struct address_space_operations def_blk_aops = {
474 .dirty_folio = block_dirty_folio,
475 .invalidate_folio = block_invalidate_folio,
476 .read_folio = blkdev_read_folio,
477 .readahead = blkdev_readahead,
478 .writepages = blkdev_writepages,
479 .write_begin = blkdev_write_begin,
480 .write_end = blkdev_write_end,
481 .migrate_folio = buffer_migrate_folio_norefs,
482 .is_dirty_writeback = buffer_check_dirty_writeback,
484 #else /* CONFIG_BUFFER_HEAD */
485 static int blkdev_read_folio(struct file *file, struct folio *folio)
487 return iomap_read_folio(folio, &blkdev_iomap_ops);
490 static void blkdev_readahead(struct readahead_control *rac)
492 iomap_readahead(rac, &blkdev_iomap_ops);
495 static int blkdev_map_blocks(struct iomap_writepage_ctx *wpc,
496 struct inode *inode, loff_t offset, unsigned int len)
498 loff_t isize = i_size_read(inode);
500 if (WARN_ON_ONCE(offset >= isize))
502 if (offset >= wpc->iomap.offset &&
503 offset < wpc->iomap.offset + wpc->iomap.length)
505 return blkdev_iomap_begin(inode, offset, isize - offset,
506 IOMAP_WRITE, &wpc->iomap, NULL);
509 static const struct iomap_writeback_ops blkdev_writeback_ops = {
510 .map_blocks = blkdev_map_blocks,
513 static int blkdev_writepages(struct address_space *mapping,
514 struct writeback_control *wbc)
516 struct iomap_writepage_ctx wpc = { };
518 return iomap_writepages(mapping, wbc, &wpc, &blkdev_writeback_ops);
521 const struct address_space_operations def_blk_aops = {
522 .dirty_folio = filemap_dirty_folio,
523 .release_folio = iomap_release_folio,
524 .invalidate_folio = iomap_invalidate_folio,
525 .read_folio = blkdev_read_folio,
526 .readahead = blkdev_readahead,
527 .writepages = blkdev_writepages,
528 .is_partially_uptodate = iomap_is_partially_uptodate,
529 .error_remove_folio = generic_error_remove_folio,
530 .migrate_folio = filemap_migrate_folio,
532 #endif /* CONFIG_BUFFER_HEAD */
535 * for a block special file file_inode(file)->i_size is zero
536 * so we compute the size by hand (just as in block_read/write above)
538 static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
540 struct inode *bd_inode = bdev_file_inode(file);
543 inode_lock(bd_inode);
544 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
545 inode_unlock(bd_inode);
549 static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
552 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
555 error = file_write_and_wait_range(filp, start, end);
560 * There is no need to serialise calls to blkdev_issue_flush with
561 * i_mutex and doing so causes performance issues with concurrent
562 * O_SYNC writers to a block device.
564 error = blkdev_issue_flush(bdev);
565 if (error == -EOPNOTSUPP)
572 * file_to_blk_mode - get block open flags from file flags
573 * @file: file whose open flags should be converted
575 * Look at file open flags and generate corresponding block open flags from
576 * them. The function works both for file just being open (e.g. during ->open
577 * callback) and for file that is already open. This is actually non-trivial
578 * (see comment in the function).
580 blk_mode_t file_to_blk_mode(struct file *file)
584 if (file->f_mode & FMODE_READ)
585 mode |= BLK_OPEN_READ;
586 if (file->f_mode & FMODE_WRITE)
587 mode |= BLK_OPEN_WRITE;
589 * do_dentry_open() clears O_EXCL from f_flags, use file->private_data
590 * to determine whether the open was exclusive for already open files.
592 if (file->private_data)
593 mode |= BLK_OPEN_EXCL;
594 else if (file->f_flags & O_EXCL)
595 mode |= BLK_OPEN_EXCL;
596 if (file->f_flags & O_NDELAY)
597 mode |= BLK_OPEN_NDELAY;
600 * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy
601 * driver has historically allowed ioctls as if the file was opened for
602 * writing, but does not allow and actual reads or writes.
604 if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY))
605 mode |= BLK_OPEN_WRITE_IOCTL;
610 static int blkdev_open(struct inode *inode, struct file *filp)
612 struct block_device *bdev;
616 mode = file_to_blk_mode(filp);
617 /* Use the file as the holder. */
618 if (mode & BLK_OPEN_EXCL)
619 filp->private_data = filp;
620 ret = bdev_permission(inode->i_rdev, mode, filp->private_data);
624 bdev = blkdev_get_no_open(inode->i_rdev);
628 if (bdev_can_atomic_write(bdev) && filp->f_flags & O_DIRECT)
629 filp->f_mode |= FMODE_CAN_ATOMIC_WRITE;
631 ret = bdev_open(bdev, mode, filp->private_data, NULL, filp);
633 blkdev_put_no_open(bdev);
637 static int blkdev_release(struct inode *inode, struct file *filp)
644 blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from)
646 size_t count = iov_iter_count(from);
649 written = kiocb_invalidate_pages(iocb, count);
651 if (written == -EBUSY)
656 written = blkdev_direct_IO(iocb, from);
658 kiocb_invalidate_post_direct_write(iocb, count);
659 iocb->ki_pos += written;
662 if (written != -EIOCBQUEUED)
663 iov_iter_revert(from, count - iov_iter_count(from));
667 static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
669 return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL);
673 * Write data to the block device. Only intended for the block device itself
674 * and the raw driver which basically is a fake block device.
676 * Does not take i_mutex for the write and thus is not for general purpose
679 static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
681 struct file *file = iocb->ki_filp;
682 struct inode *bd_inode = bdev_file_inode(file);
683 struct block_device *bdev = I_BDEV(bd_inode);
684 loff_t size = bdev_nr_bytes(bdev);
688 if (bdev_read_only(bdev))
691 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
694 if (!iov_iter_count(from))
697 if (iocb->ki_pos >= size)
700 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
703 size -= iocb->ki_pos;
704 if (iov_iter_count(from) > size) {
705 shorted = iov_iter_count(from) - size;
706 iov_iter_truncate(from, size);
709 ret = file_update_time(file);
713 if (iocb->ki_flags & IOCB_DIRECT) {
714 ret = blkdev_direct_write(iocb, from);
715 if (ret >= 0 && iov_iter_count(from))
716 ret = direct_write_fallback(iocb, from, ret,
717 blkdev_buffered_write(iocb, from));
719 ret = blkdev_buffered_write(iocb, from);
723 ret = generic_write_sync(iocb, ret);
724 iov_iter_reexpand(from, iov_iter_count(from) + shorted);
728 static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
730 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
731 loff_t size = bdev_nr_bytes(bdev);
732 loff_t pos = iocb->ki_pos;
737 if (unlikely(pos + iov_iter_count(to) > size)) {
741 shorted = iov_iter_count(to) - size;
742 iov_iter_truncate(to, size);
745 count = iov_iter_count(to);
747 goto reexpand; /* skip atime */
749 if (iocb->ki_flags & IOCB_DIRECT) {
750 ret = kiocb_write_and_wait(iocb, count);
753 file_accessed(iocb->ki_filp);
755 ret = blkdev_direct_IO(iocb, to);
760 iov_iter_revert(to, count - iov_iter_count(to));
761 if (ret < 0 || !count)
765 ret = filemap_read(iocb, to, ret);
768 if (unlikely(shorted))
769 iov_iter_reexpand(to, iov_iter_count(to) + shorted);
773 #define BLKDEV_FALLOC_FL_SUPPORTED \
774 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
775 FALLOC_FL_ZERO_RANGE)
777 static long blkdev_fallocate(struct file *file, int mode, loff_t start,
780 struct inode *inode = bdev_file_inode(file);
781 struct block_device *bdev = I_BDEV(inode);
782 loff_t end = start + len - 1;
786 /* Fail if we don't recognize the flags. */
787 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
790 /* Don't go off the end of the device. */
791 isize = bdev_nr_bytes(bdev);
795 if (mode & FALLOC_FL_KEEP_SIZE) {
797 end = start + len - 1;
803 * Don't allow IO that isn't aligned to logical block size.
805 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
808 filemap_invalidate_lock(inode->i_mapping);
811 * Invalidate the page cache, including dirty pages, for valid
812 * de-allocate mode calls to fallocate().
815 case FALLOC_FL_ZERO_RANGE:
816 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
817 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
821 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
822 len >> SECTOR_SHIFT, GFP_KERNEL,
823 BLKDEV_ZERO_NOUNMAP);
825 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
826 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
830 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
831 len >> SECTOR_SHIFT, GFP_KERNEL,
832 BLKDEV_ZERO_NOFALLBACK);
839 filemap_invalidate_unlock(inode->i_mapping);
843 static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
845 struct inode *bd_inode = bdev_file_inode(file);
847 if (bdev_read_only(I_BDEV(bd_inode)))
848 return generic_file_readonly_mmap(file, vma);
850 return generic_file_mmap(file, vma);
853 const struct file_operations def_blk_fops = {
855 .release = blkdev_release,
856 .llseek = blkdev_llseek,
857 .read_iter = blkdev_read_iter,
858 .write_iter = blkdev_write_iter,
859 .iopoll = iocb_bio_iopoll,
861 .fsync = blkdev_fsync,
862 .unlocked_ioctl = blkdev_ioctl,
864 .compat_ioctl = compat_blkdev_ioctl,
866 .splice_read = filemap_splice_read,
867 .splice_write = iter_file_splice_write,
868 .fallocate = blkdev_fallocate,
869 .uring_cmd = blkdev_uring_cmd,
870 .fop_flags = FOP_BUFFER_RASYNC,
873 static __init int blkdev_init(void)
875 return bioset_init(&blkdev_dio_pool, 4,
876 offsetof(struct blkdev_dio, bio),
877 BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
879 module_init(blkdev_init);