4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <linux/device.h>
23 #include <linux/err.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/mutex.h>
30 #include <linux/backing-dev.h>
31 #include <linux/compat.h>
32 #include <linux/mount.h>
33 #include <linux/blkpg.h>
34 #include <linux/magic.h>
35 #include <linux/major.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/partitions.h>
38 #include <linux/mtd/map.h>
40 #include <linux/uaccess.h>
44 static DEFINE_MUTEX(mtd_mutex);
47 * Data structure to hold the pointer to the mtd device as well
48 * as mode information of various use cases.
50 struct mtd_file_info {
52 enum mtd_file_modes mode;
55 static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
57 struct mtd_file_info *mfi = file->private_data;
58 return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
61 static int mtdchar_open(struct inode *inode, struct file *file)
63 int minor = iminor(inode);
64 int devnum = minor >> 1;
67 struct mtd_file_info *mfi;
69 pr_debug("MTD_open\n");
71 /* You can't open the RO devices RW */
72 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
75 mutex_lock(&mtd_mutex);
76 mtd = get_mtd_device(NULL, devnum);
83 if (mtd->type == MTD_ABSENT) {
88 /* You can't open it RW if it's not a writeable device */
89 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
94 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
100 file->private_data = mfi;
101 mutex_unlock(&mtd_mutex);
107 mutex_unlock(&mtd_mutex);
111 /*====================================================================*/
113 static int mtdchar_close(struct inode *inode, struct file *file)
115 struct mtd_file_info *mfi = file->private_data;
116 struct mtd_info *mtd = mfi->mtd;
118 pr_debug("MTD_close\n");
120 /* Only sync if opened RW */
121 if ((file->f_mode & FMODE_WRITE))
125 file->private_data = NULL;
129 } /* mtdchar_close */
131 /* Back in June 2001, dwmw2 wrote:
133 * FIXME: This _really_ needs to die. In 2.5, we should lock the
134 * userspace buffer down and use it directly with readv/writev.
136 * The implementation below, using mtd_kmalloc_up_to, mitigates
137 * allocation failures when the system is under low-memory situations
138 * or if memory is highly fragmented at the cost of reducing the
139 * performance of the requested transfer due to a smaller buffer size.
141 * A more complex but more memory-efficient implementation based on
142 * get_user_pages and iovecs to cover extents of those pages is a
143 * longer-term goal, as intimated by dwmw2 above. However, for the
144 * write case, this requires yet more complex head and tail transfer
145 * handling when those head and tail offsets and sizes are such that
146 * alignment requirements are not met in the NAND subdriver.
149 static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
152 struct mtd_file_info *mfi = file->private_data;
153 struct mtd_info *mtd = mfi->mtd;
155 size_t total_retlen=0;
161 pr_debug("MTD_read\n");
163 if (*ppos + count > mtd->size)
164 count = mtd->size - *ppos;
169 kbuf = mtd_kmalloc_up_to(mtd, &size);
174 len = min_t(size_t, count, size);
177 case MTD_FILE_MODE_OTP_FACTORY:
178 ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
181 case MTD_FILE_MODE_OTP_USER:
182 ret = mtd_read_user_prot_reg(mtd, *ppos, len,
185 case MTD_FILE_MODE_RAW:
187 struct mtd_oob_ops ops;
189 ops.mode = MTD_OPS_RAW;
194 ret = mtd_read_oob(mtd, *ppos, &ops);
199 ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
201 /* Nand returns -EBADMSG on ECC errors, but it returns
202 * the data. For our userspace tools it is important
203 * to dump areas with ECC errors!
204 * For kernel internal usage it also might return -EUCLEAN
205 * to signal the caller that a bitflip has occurred and has
206 * been corrected by the ECC algorithm.
207 * Userspace software which accesses NAND this way
208 * must be aware of the fact that it deals with NAND
210 if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
212 if (copy_to_user(buf, kbuf, retlen)) {
217 total_retlen += retlen;
235 static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
238 struct mtd_file_info *mfi = file->private_data;
239 struct mtd_info *mtd = mfi->mtd;
243 size_t total_retlen=0;
247 pr_debug("MTD_write\n");
249 if (*ppos == mtd->size)
252 if (*ppos + count > mtd->size)
253 count = mtd->size - *ppos;
258 kbuf = mtd_kmalloc_up_to(mtd, &size);
263 len = min_t(size_t, count, size);
265 if (copy_from_user(kbuf, buf, len)) {
271 case MTD_FILE_MODE_OTP_FACTORY:
274 case MTD_FILE_MODE_OTP_USER:
275 ret = mtd_write_user_prot_reg(mtd, *ppos, len,
279 case MTD_FILE_MODE_RAW:
281 struct mtd_oob_ops ops;
283 ops.mode = MTD_OPS_RAW;
289 ret = mtd_write_oob(mtd, *ppos, &ops);
295 ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
299 * Return -ENOSPC only if no data could be written at all.
300 * Otherwise just return the number of bytes that actually
303 if ((ret == -ENOSPC) && (total_retlen))
308 total_retlen += retlen;
320 } /* mtdchar_write */
322 /*======================================================================
324 IOCTL calls for getting device parameters.
326 ======================================================================*/
327 static void mtdchar_erase_callback (struct erase_info *instr)
329 wake_up((wait_queue_head_t *)instr->priv);
332 static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
334 struct mtd_info *mtd = mfi->mtd;
338 case MTD_OTP_FACTORY:
339 if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
343 mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
346 if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
350 mfi->mode = MTD_FILE_MODE_OTP_USER;
353 mfi->mode = MTD_FILE_MODE_NORMAL;
362 static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
363 uint64_t start, uint32_t length, void __user *ptr,
364 uint32_t __user *retp)
366 struct mtd_file_info *mfi = file->private_data;
367 struct mtd_oob_ops ops;
371 if (!(file->f_mode & FMODE_WRITE))
377 if (!mtd->_write_oob)
381 ops.ooboffs = start & (mtd->writesize - 1);
383 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
386 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
389 ops.oobbuf = memdup_user(ptr, length);
390 if (IS_ERR(ops.oobbuf))
391 return PTR_ERR(ops.oobbuf);
393 start &= ~((uint64_t)mtd->writesize - 1);
394 ret = mtd_write_oob(mtd, start, &ops);
396 if (ops.oobretlen > 0xFFFFFFFFU)
398 retlen = ops.oobretlen;
399 if (copy_to_user(retp, &retlen, sizeof(length)))
406 static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
407 uint64_t start, uint32_t length, void __user *ptr,
408 uint32_t __user *retp)
410 struct mtd_file_info *mfi = file->private_data;
411 struct mtd_oob_ops ops;
418 ops.ooboffs = start & (mtd->writesize - 1);
420 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
423 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
426 ops.oobbuf = kmalloc(length, GFP_KERNEL);
430 start &= ~((uint64_t)mtd->writesize - 1);
431 ret = mtd_read_oob(mtd, start, &ops);
433 if (put_user(ops.oobretlen, retp))
435 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
442 * NAND returns -EBADMSG on ECC errors, but it returns the OOB
443 * data. For our userspace tools it is important to dump areas
445 * For kernel internal usage it also might return -EUCLEAN
446 * to signal the caller that a bitflip has occurred and has
447 * been corrected by the ECC algorithm.
449 * Note: currently the standard NAND function, nand_read_oob_std,
450 * does not calculate ECC for the OOB area, so do not rely on
451 * this behavior unless you have replaced it with your own.
453 if (mtd_is_bitflip_or_eccerr(ret))
460 * Copies (and truncates, if necessary) OOB layout information to the
461 * deprecated layout struct, nand_ecclayout_user. This is necessary only to
462 * support the deprecated API ioctl ECCGETLAYOUT while allowing all new
463 * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
464 * can describe any kind of OOB layout with almost zero overhead from a
465 * memory usage point of view).
467 static int shrink_ecclayout(struct mtd_info *mtd,
468 struct nand_ecclayout_user *to)
470 struct mtd_oob_region oobregion;
471 int i, section = 0, ret;
476 memset(to, 0, sizeof(*to));
479 for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
482 ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
490 eccpos = oobregion.offset;
491 for (; i < MTD_MAX_ECCPOS_ENTRIES &&
492 eccpos < oobregion.offset + oobregion.length; i++) {
493 to->eccpos[i] = eccpos++;
498 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
499 ret = mtd_ooblayout_free(mtd, i, &oobregion);
507 to->oobfree[i].offset = oobregion.offset;
508 to->oobfree[i].length = oobregion.length;
509 to->oobavail += to->oobfree[i].length;
515 static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
517 struct mtd_oob_region oobregion;
518 int i, section = 0, ret;
523 memset(to, 0, sizeof(*to));
526 for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
529 ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
537 if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
540 eccpos = oobregion.offset;
541 for (; eccpos < oobregion.offset + oobregion.length; i++) {
542 to->eccpos[i] = eccpos++;
547 for (i = 0; i < 8; i++) {
548 ret = mtd_ooblayout_free(mtd, i, &oobregion);
556 to->oobfree[i][0] = oobregion.offset;
557 to->oobfree[i][1] = oobregion.length;
560 to->useecc = MTD_NANDECC_AUTOPLACE;
565 static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
566 struct blkpg_ioctl_arg *arg)
568 struct blkpg_partition p;
570 if (!capable(CAP_SYS_ADMIN))
573 if (copy_from_user(&p, arg->data, sizeof(p)))
577 case BLKPG_ADD_PARTITION:
579 /* Only master mtd device must be used to add partitions */
580 if (mtd_is_partition(mtd))
583 /* Sanitize user input */
584 p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
586 return mtd_add_partition(mtd, p.devname, p.start, p.length);
588 case BLKPG_DEL_PARTITION:
593 return mtd_del_partition(mtd, p.pno);
600 static int mtdchar_write_ioctl(struct mtd_info *mtd,
601 struct mtd_write_req __user *argp)
603 struct mtd_write_req req;
604 struct mtd_oob_ops ops;
605 const void __user *usr_data, *usr_oob;
608 if (copy_from_user(&req, argp, sizeof(req)))
611 usr_data = (const void __user *)(uintptr_t)req.usr_data;
612 usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
614 if (!mtd->_write_oob)
618 ops.len = (size_t)req.len;
619 ops.ooblen = (size_t)req.ooblen;
623 ops.datbuf = memdup_user(usr_data, ops.len);
624 if (IS_ERR(ops.datbuf))
625 return PTR_ERR(ops.datbuf);
631 ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
632 if (IS_ERR(ops.oobbuf)) {
634 return PTR_ERR(ops.oobbuf);
640 ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
648 static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
650 struct mtd_file_info *mfi = file->private_data;
651 struct mtd_info *mtd = mfi->mtd;
652 void __user *argp = (void __user *)arg;
654 struct mtd_info_user info;
656 pr_debug("MTD_ioctl\n");
659 case MEMGETREGIONCOUNT:
660 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
664 case MEMGETREGIONINFO:
667 struct mtd_erase_region_info *kr;
668 struct region_info_user __user *ur = argp;
670 if (get_user(ur_idx, &(ur->regionindex)))
673 if (ur_idx >= mtd->numeraseregions)
676 kr = &(mtd->eraseregions[ur_idx]);
678 if (put_user(kr->offset, &(ur->offset))
679 || put_user(kr->erasesize, &(ur->erasesize))
680 || put_user(kr->numblocks, &(ur->numblocks)))
687 memset(&info, 0, sizeof(info));
688 info.type = mtd->type;
689 info.flags = mtd->flags;
690 info.size = mtd->size;
691 info.erasesize = mtd->erasesize;
692 info.writesize = mtd->writesize;
693 info.oobsize = mtd->oobsize;
694 /* The below field is obsolete */
696 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
703 struct erase_info *erase;
705 if(!(file->f_mode & FMODE_WRITE))
708 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
712 wait_queue_head_t waitq;
713 DECLARE_WAITQUEUE(wait, current);
715 init_waitqueue_head(&waitq);
717 if (cmd == MEMERASE64) {
718 struct erase_info_user64 einfo64;
720 if (copy_from_user(&einfo64, argp,
721 sizeof(struct erase_info_user64))) {
725 erase->addr = einfo64.start;
726 erase->len = einfo64.length;
728 struct erase_info_user einfo32;
730 if (copy_from_user(&einfo32, argp,
731 sizeof(struct erase_info_user))) {
735 erase->addr = einfo32.start;
736 erase->len = einfo32.length;
739 erase->callback = mtdchar_erase_callback;
740 erase->priv = (unsigned long)&waitq;
743 FIXME: Allow INTERRUPTIBLE. Which means
744 not having the wait_queue head on the stack.
746 If the wq_head is on the stack, and we
747 leave because we got interrupted, then the
748 wq_head is no longer there when the
749 callback routine tries to wake us up.
751 ret = mtd_erase(mtd, erase);
753 set_current_state(TASK_UNINTERRUPTIBLE);
754 add_wait_queue(&waitq, &wait);
755 if (erase->state != MTD_ERASE_DONE &&
756 erase->state != MTD_ERASE_FAILED)
758 remove_wait_queue(&waitq, &wait);
759 set_current_state(TASK_RUNNING);
761 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
770 struct mtd_oob_buf buf;
771 struct mtd_oob_buf __user *buf_user = argp;
773 /* NOTE: writes return length to buf_user->length */
774 if (copy_from_user(&buf, argp, sizeof(buf)))
777 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
778 buf.ptr, &buf_user->length);
784 struct mtd_oob_buf buf;
785 struct mtd_oob_buf __user *buf_user = argp;
787 /* NOTE: writes return length to buf_user->start */
788 if (copy_from_user(&buf, argp, sizeof(buf)))
791 ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
792 buf.ptr, &buf_user->start);
798 struct mtd_oob_buf64 buf;
799 struct mtd_oob_buf64 __user *buf_user = argp;
801 if (copy_from_user(&buf, argp, sizeof(buf)))
804 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
805 (void __user *)(uintptr_t)buf.usr_ptr,
812 struct mtd_oob_buf64 buf;
813 struct mtd_oob_buf64 __user *buf_user = argp;
815 if (copy_from_user(&buf, argp, sizeof(buf)))
818 ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
819 (void __user *)(uintptr_t)buf.usr_ptr,
826 ret = mtdchar_write_ioctl(mtd,
827 (struct mtd_write_req __user *)arg);
833 struct erase_info_user einfo;
835 if (copy_from_user(&einfo, argp, sizeof(einfo)))
838 ret = mtd_lock(mtd, einfo.start, einfo.length);
844 struct erase_info_user einfo;
846 if (copy_from_user(&einfo, argp, sizeof(einfo)))
849 ret = mtd_unlock(mtd, einfo.start, einfo.length);
855 struct erase_info_user einfo;
857 if (copy_from_user(&einfo, argp, sizeof(einfo)))
860 ret = mtd_is_locked(mtd, einfo.start, einfo.length);
864 /* Legacy interface */
867 struct nand_oobinfo oi;
872 ret = get_oobinfo(mtd, &oi);
876 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
885 if (copy_from_user(&offs, argp, sizeof(loff_t)))
887 return mtd_block_isbad(mtd, offs);
895 if (copy_from_user(&offs, argp, sizeof(loff_t)))
897 return mtd_block_markbad(mtd, offs);
904 if (copy_from_user(&mode, argp, sizeof(int)))
907 mfi->mode = MTD_FILE_MODE_NORMAL;
909 ret = otp_select_filemode(mfi, mode);
915 case OTPGETREGIONCOUNT:
916 case OTPGETREGIONINFO:
918 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
923 case MTD_FILE_MODE_OTP_FACTORY:
924 ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);
926 case MTD_FILE_MODE_OTP_USER:
927 ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);
934 if (cmd == OTPGETREGIONCOUNT) {
935 int nbr = retlen / sizeof(struct otp_info);
936 ret = copy_to_user(argp, &nbr, sizeof(int));
938 ret = copy_to_user(argp, buf, retlen);
948 struct otp_info oinfo;
950 if (mfi->mode != MTD_FILE_MODE_OTP_USER)
952 if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
954 ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
958 /* This ioctl is being deprecated - it truncates the ECC layout */
961 struct nand_ecclayout_user *usrlay;
966 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
970 shrink_ecclayout(mtd, usrlay);
972 if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
980 if (copy_to_user(argp, &mtd->ecc_stats,
981 sizeof(struct mtd_ecc_stats)))
991 case MTD_FILE_MODE_OTP_FACTORY:
992 case MTD_FILE_MODE_OTP_USER:
993 ret = otp_select_filemode(mfi, arg);
996 case MTD_FILE_MODE_RAW:
997 if (!mtd_has_oob(mtd))
1001 case MTD_FILE_MODE_NORMAL:
1012 struct blkpg_ioctl_arg __user *blk_arg = argp;
1013 struct blkpg_ioctl_arg a;
1015 if (copy_from_user(&a, blk_arg, sizeof(a)))
1018 ret = mtdchar_blkpg_ioctl(mtd, &a);
1024 /* No reread partition feature. Just return ok */
1034 } /* memory_ioctl */
1036 static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
1040 mutex_lock(&mtd_mutex);
1041 ret = mtdchar_ioctl(file, cmd, arg);
1042 mutex_unlock(&mtd_mutex);
1047 #ifdef CONFIG_COMPAT
1049 struct mtd_oob_buf32 {
1052 compat_caddr_t ptr; /* unsigned char* */
1055 #define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
1056 #define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
1058 static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
1061 struct mtd_file_info *mfi = file->private_data;
1062 struct mtd_info *mtd = mfi->mtd;
1063 void __user *argp = compat_ptr(arg);
1066 mutex_lock(&mtd_mutex);
1071 struct mtd_oob_buf32 buf;
1072 struct mtd_oob_buf32 __user *buf_user = argp;
1074 if (copy_from_user(&buf, argp, sizeof(buf)))
1077 ret = mtdchar_writeoob(file, mtd, buf.start,
1078 buf.length, compat_ptr(buf.ptr),
1085 struct mtd_oob_buf32 buf;
1086 struct mtd_oob_buf32 __user *buf_user = argp;
1088 /* NOTE: writes return length to buf->start */
1089 if (copy_from_user(&buf, argp, sizeof(buf)))
1092 ret = mtdchar_readoob(file, mtd, buf.start,
1093 buf.length, compat_ptr(buf.ptr),
1100 /* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */
1101 struct blkpg_compat_ioctl_arg __user *uarg = argp;
1102 struct blkpg_compat_ioctl_arg compat_arg;
1103 struct blkpg_ioctl_arg a;
1105 if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) {
1110 memset(&a, 0, sizeof(a));
1111 a.op = compat_arg.op;
1112 a.flags = compat_arg.flags;
1113 a.datalen = compat_arg.datalen;
1114 a.data = compat_ptr(compat_arg.data);
1116 ret = mtdchar_blkpg_ioctl(mtd, &a);
1121 ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
1124 mutex_unlock(&mtd_mutex);
1129 #endif /* CONFIG_COMPAT */
1132 * try to determine where a shared mapping can be made
1133 * - only supported for NOMMU at the moment (MMU can't doesn't copy private
1137 static unsigned long mtdchar_get_unmapped_area(struct file *file,
1140 unsigned long pgoff,
1141 unsigned long flags)
1143 struct mtd_file_info *mfi = file->private_data;
1144 struct mtd_info *mtd = mfi->mtd;
1145 unsigned long offset;
1149 return (unsigned long) -EINVAL;
1151 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
1152 return (unsigned long) -EINVAL;
1154 offset = pgoff << PAGE_SHIFT;
1155 if (offset > mtd->size - len)
1156 return (unsigned long) -EINVAL;
1158 ret = mtd_get_unmapped_area(mtd, len, offset, flags);
1159 return ret == -EOPNOTSUPP ? -ENODEV : ret;
1162 static unsigned mtdchar_mmap_capabilities(struct file *file)
1164 struct mtd_file_info *mfi = file->private_data;
1166 return mtd_mmap_capabilities(mfi->mtd);
1171 * set up a mapping for shared memory segments
1173 static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
1176 struct mtd_file_info *mfi = file->private_data;
1177 struct mtd_info *mtd = mfi->mtd;
1178 struct map_info *map = mtd->priv;
1180 /* This is broken because it assumes the MTD device is map-based
1181 and that mtd->priv is a valid struct map_info. It should be
1182 replaced with something that uses the mtd_get_unmapped_area()
1183 operation properly. */
1184 if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
1185 #ifdef pgprot_noncached
1186 if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
1187 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1189 return vm_iomap_memory(vma, map->phys, map->size);
1193 return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
1197 static const struct file_operations mtd_fops = {
1198 .owner = THIS_MODULE,
1199 .llseek = mtdchar_lseek,
1200 .read = mtdchar_read,
1201 .write = mtdchar_write,
1202 .unlocked_ioctl = mtdchar_unlocked_ioctl,
1203 #ifdef CONFIG_COMPAT
1204 .compat_ioctl = mtdchar_compat_ioctl,
1206 .open = mtdchar_open,
1207 .release = mtdchar_close,
1208 .mmap = mtdchar_mmap,
1210 .get_unmapped_area = mtdchar_get_unmapped_area,
1211 .mmap_capabilities = mtdchar_mmap_capabilities,
1215 int __init init_mtdchar(void)
1219 ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
1222 pr_err("Can't allocate major number %d for MTD\n",
1230 void __exit cleanup_mtdchar(void)
1232 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1235 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);