]> Git Repo - linux.git/blob - drivers/char/mem.c
nfsd4: a client's own opens needn't prevent delegations
[linux.git] / drivers / char / mem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/drivers/char/mem.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  Added devfs support.
8  *    Jan-11-1998, C. Scott Ananian <[email protected]>
9  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <[email protected]>
10  */
11
12 #include <linux/mm.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/ptrace.h>
23 #include <linux/device.h>
24 #include <linux/highmem.h>
25 #include <linux/backing-dev.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/export.h>
30 #include <linux/io.h>
31 #include <linux/uio.h>
32 #include <linux/uaccess.h>
33 #include <linux/security.h>
34 #include <linux/pseudo_fs.h>
35 #include <uapi/linux/magic.h>
36 #include <linux/mount.h>
37
38 #ifdef CONFIG_IA64
39 # include <linux/efi.h>
40 #endif
41
42 #define DEVMEM_MINOR    1
43 #define DEVPORT_MINOR   4
44
45 static inline unsigned long size_inside_page(unsigned long start,
46                                              unsigned long size)
47 {
48         unsigned long sz;
49
50         sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
51
52         return min(sz, size);
53 }
54
55 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
56 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
57 {
58         return addr + count <= __pa(high_memory);
59 }
60
61 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
62 {
63         return 1;
64 }
65 #endif
66
67 #ifdef CONFIG_STRICT_DEVMEM
68 static inline int page_is_allowed(unsigned long pfn)
69 {
70         return devmem_is_allowed(pfn);
71 }
72 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
73 {
74         u64 from = ((u64)pfn) << PAGE_SHIFT;
75         u64 to = from + size;
76         u64 cursor = from;
77
78         while (cursor < to) {
79                 if (!devmem_is_allowed(pfn))
80                         return 0;
81                 cursor += PAGE_SIZE;
82                 pfn++;
83         }
84         return 1;
85 }
86 #else
87 static inline int page_is_allowed(unsigned long pfn)
88 {
89         return 1;
90 }
91 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
92 {
93         return 1;
94 }
95 #endif
96
97 #ifndef unxlate_dev_mem_ptr
98 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
99 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
100 {
101 }
102 #endif
103
104 static inline bool should_stop_iteration(void)
105 {
106         if (need_resched())
107                 cond_resched();
108         return fatal_signal_pending(current);
109 }
110
111 /*
112  * This funcion reads the *physical* memory. The f_pos points directly to the
113  * memory location.
114  */
115 static ssize_t read_mem(struct file *file, char __user *buf,
116                         size_t count, loff_t *ppos)
117 {
118         phys_addr_t p = *ppos;
119         ssize_t read, sz;
120         void *ptr;
121         char *bounce;
122         int err;
123
124         if (p != *ppos)
125                 return 0;
126
127         if (!valid_phys_addr_range(p, count))
128                 return -EFAULT;
129         read = 0;
130 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
131         /* we don't have page 0 mapped on sparc and m68k.. */
132         if (p < PAGE_SIZE) {
133                 sz = size_inside_page(p, count);
134                 if (sz > 0) {
135                         if (clear_user(buf, sz))
136                                 return -EFAULT;
137                         buf += sz;
138                         p += sz;
139                         count -= sz;
140                         read += sz;
141                 }
142         }
143 #endif
144
145         bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
146         if (!bounce)
147                 return -ENOMEM;
148
149         while (count > 0) {
150                 unsigned long remaining;
151                 int allowed, probe;
152
153                 sz = size_inside_page(p, count);
154
155                 err = -EPERM;
156                 allowed = page_is_allowed(p >> PAGE_SHIFT);
157                 if (!allowed)
158                         goto failed;
159
160                 err = -EFAULT;
161                 if (allowed == 2) {
162                         /* Show zeros for restricted memory. */
163                         remaining = clear_user(buf, sz);
164                 } else {
165                         /*
166                          * On ia64 if a page has been mapped somewhere as
167                          * uncached, then it must also be accessed uncached
168                          * by the kernel or data corruption may occur.
169                          */
170                         ptr = xlate_dev_mem_ptr(p);
171                         if (!ptr)
172                                 goto failed;
173
174                         probe = copy_from_kernel_nofault(bounce, ptr, sz);
175                         unxlate_dev_mem_ptr(p, ptr);
176                         if (probe)
177                                 goto failed;
178
179                         remaining = copy_to_user(buf, bounce, sz);
180                 }
181
182                 if (remaining)
183                         goto failed;
184
185                 buf += sz;
186                 p += sz;
187                 count -= sz;
188                 read += sz;
189                 if (should_stop_iteration())
190                         break;
191         }
192         kfree(bounce);
193
194         *ppos += read;
195         return read;
196
197 failed:
198         kfree(bounce);
199         return err;
200 }
201
202 static ssize_t write_mem(struct file *file, const char __user *buf,
203                          size_t count, loff_t *ppos)
204 {
205         phys_addr_t p = *ppos;
206         ssize_t written, sz;
207         unsigned long copied;
208         void *ptr;
209
210         if (p != *ppos)
211                 return -EFBIG;
212
213         if (!valid_phys_addr_range(p, count))
214                 return -EFAULT;
215
216         written = 0;
217
218 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
219         /* we don't have page 0 mapped on sparc and m68k.. */
220         if (p < PAGE_SIZE) {
221                 sz = size_inside_page(p, count);
222                 /* Hmm. Do something? */
223                 buf += sz;
224                 p += sz;
225                 count -= sz;
226                 written += sz;
227         }
228 #endif
229
230         while (count > 0) {
231                 int allowed;
232
233                 sz = size_inside_page(p, count);
234
235                 allowed = page_is_allowed(p >> PAGE_SHIFT);
236                 if (!allowed)
237                         return -EPERM;
238
239                 /* Skip actual writing when a page is marked as restricted. */
240                 if (allowed == 1) {
241                         /*
242                          * On ia64 if a page has been mapped somewhere as
243                          * uncached, then it must also be accessed uncached
244                          * by the kernel or data corruption may occur.
245                          */
246                         ptr = xlate_dev_mem_ptr(p);
247                         if (!ptr) {
248                                 if (written)
249                                         break;
250                                 return -EFAULT;
251                         }
252
253                         copied = copy_from_user(ptr, buf, sz);
254                         unxlate_dev_mem_ptr(p, ptr);
255                         if (copied) {
256                                 written += sz - copied;
257                                 if (written)
258                                         break;
259                                 return -EFAULT;
260                         }
261                 }
262
263                 buf += sz;
264                 p += sz;
265                 count -= sz;
266                 written += sz;
267                 if (should_stop_iteration())
268                         break;
269         }
270
271         *ppos += written;
272         return written;
273 }
274
275 int __weak phys_mem_access_prot_allowed(struct file *file,
276         unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
277 {
278         return 1;
279 }
280
281 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
282
283 /*
284  * Architectures vary in how they handle caching for addresses
285  * outside of main memory.
286  *
287  */
288 #ifdef pgprot_noncached
289 static int uncached_access(struct file *file, phys_addr_t addr)
290 {
291 #if defined(CONFIG_IA64)
292         /*
293          * On ia64, we ignore O_DSYNC because we cannot tolerate memory
294          * attribute aliases.
295          */
296         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
297 #elif defined(CONFIG_MIPS)
298         {
299                 extern int __uncached_access(struct file *file,
300                                              unsigned long addr);
301
302                 return __uncached_access(file, addr);
303         }
304 #else
305         /*
306          * Accessing memory above the top the kernel knows about or through a
307          * file pointer
308          * that was marked O_DSYNC will be done non-cached.
309          */
310         if (file->f_flags & O_DSYNC)
311                 return 1;
312         return addr >= __pa(high_memory);
313 #endif
314 }
315 #endif
316
317 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
318                                      unsigned long size, pgprot_t vma_prot)
319 {
320 #ifdef pgprot_noncached
321         phys_addr_t offset = pfn << PAGE_SHIFT;
322
323         if (uncached_access(file, offset))
324                 return pgprot_noncached(vma_prot);
325 #endif
326         return vma_prot;
327 }
328 #endif
329
330 #ifndef CONFIG_MMU
331 static unsigned long get_unmapped_area_mem(struct file *file,
332                                            unsigned long addr,
333                                            unsigned long len,
334                                            unsigned long pgoff,
335                                            unsigned long flags)
336 {
337         if (!valid_mmap_phys_addr_range(pgoff, len))
338                 return (unsigned long) -EINVAL;
339         return pgoff << PAGE_SHIFT;
340 }
341
342 /* permit direct mmap, for read, write or exec */
343 static unsigned memory_mmap_capabilities(struct file *file)
344 {
345         return NOMMU_MAP_DIRECT |
346                 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
347 }
348
349 static unsigned zero_mmap_capabilities(struct file *file)
350 {
351         return NOMMU_MAP_COPY;
352 }
353
354 /* can't do an in-place private mapping if there's no MMU */
355 static inline int private_mapping_ok(struct vm_area_struct *vma)
356 {
357         return vma->vm_flags & VM_MAYSHARE;
358 }
359 #else
360
361 static inline int private_mapping_ok(struct vm_area_struct *vma)
362 {
363         return 1;
364 }
365 #endif
366
367 static const struct vm_operations_struct mmap_mem_ops = {
368 #ifdef CONFIG_HAVE_IOREMAP_PROT
369         .access = generic_access_phys
370 #endif
371 };
372
373 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
374 {
375         size_t size = vma->vm_end - vma->vm_start;
376         phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
377
378         /* Does it even fit in phys_addr_t? */
379         if (offset >> PAGE_SHIFT != vma->vm_pgoff)
380                 return -EINVAL;
381
382         /* It's illegal to wrap around the end of the physical address space. */
383         if (offset + (phys_addr_t)size - 1 < offset)
384                 return -EINVAL;
385
386         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
387                 return -EINVAL;
388
389         if (!private_mapping_ok(vma))
390                 return -ENOSYS;
391
392         if (!range_is_allowed(vma->vm_pgoff, size))
393                 return -EPERM;
394
395         if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
396                                                 &vma->vm_page_prot))
397                 return -EINVAL;
398
399         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
400                                                  size,
401                                                  vma->vm_page_prot);
402
403         vma->vm_ops = &mmap_mem_ops;
404
405         /* Remap-pfn-range will mark the range VM_IO */
406         if (remap_pfn_range(vma,
407                             vma->vm_start,
408                             vma->vm_pgoff,
409                             size,
410                             vma->vm_page_prot)) {
411                 return -EAGAIN;
412         }
413         return 0;
414 }
415
416 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
417 {
418         unsigned long pfn;
419
420         /* Turn a kernel-virtual address into a physical page frame */
421         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
422
423         /*
424          * RED-PEN: on some architectures there is more mapped memory than
425          * available in mem_map which pfn_valid checks for. Perhaps should add a
426          * new macro here.
427          *
428          * RED-PEN: vmalloc is not supported right now.
429          */
430         if (!pfn_valid(pfn))
431                 return -EIO;
432
433         vma->vm_pgoff = pfn;
434         return mmap_mem(file, vma);
435 }
436
437 /*
438  * This function reads the *virtual* memory as seen by the kernel.
439  */
440 static ssize_t read_kmem(struct file *file, char __user *buf,
441                          size_t count, loff_t *ppos)
442 {
443         unsigned long p = *ppos;
444         ssize_t low_count, read, sz;
445         char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
446         int err = 0;
447
448         read = 0;
449         if (p < (unsigned long) high_memory) {
450                 low_count = count;
451                 if (count > (unsigned long)high_memory - p)
452                         low_count = (unsigned long)high_memory - p;
453
454 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
455                 /* we don't have page 0 mapped on sparc and m68k.. */
456                 if (p < PAGE_SIZE && low_count > 0) {
457                         sz = size_inside_page(p, low_count);
458                         if (clear_user(buf, sz))
459                                 return -EFAULT;
460                         buf += sz;
461                         p += sz;
462                         read += sz;
463                         low_count -= sz;
464                         count -= sz;
465                 }
466 #endif
467                 while (low_count > 0) {
468                         sz = size_inside_page(p, low_count);
469
470                         /*
471                          * On ia64 if a page has been mapped somewhere as
472                          * uncached, then it must also be accessed uncached
473                          * by the kernel or data corruption may occur
474                          */
475                         kbuf = xlate_dev_kmem_ptr((void *)p);
476                         if (!virt_addr_valid(kbuf))
477                                 return -ENXIO;
478
479                         if (copy_to_user(buf, kbuf, sz))
480                                 return -EFAULT;
481                         buf += sz;
482                         p += sz;
483                         read += sz;
484                         low_count -= sz;
485                         count -= sz;
486                         if (should_stop_iteration()) {
487                                 count = 0;
488                                 break;
489                         }
490                 }
491         }
492
493         if (count > 0) {
494                 kbuf = (char *)__get_free_page(GFP_KERNEL);
495                 if (!kbuf)
496                         return -ENOMEM;
497                 while (count > 0) {
498                         sz = size_inside_page(p, count);
499                         if (!is_vmalloc_or_module_addr((void *)p)) {
500                                 err = -ENXIO;
501                                 break;
502                         }
503                         sz = vread(kbuf, (char *)p, sz);
504                         if (!sz)
505                                 break;
506                         if (copy_to_user(buf, kbuf, sz)) {
507                                 err = -EFAULT;
508                                 break;
509                         }
510                         count -= sz;
511                         buf += sz;
512                         read += sz;
513                         p += sz;
514                         if (should_stop_iteration())
515                                 break;
516                 }
517                 free_page((unsigned long)kbuf);
518         }
519         *ppos = p;
520         return read ? read : err;
521 }
522
523
524 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
525                                 size_t count, loff_t *ppos)
526 {
527         ssize_t written, sz;
528         unsigned long copied;
529
530         written = 0;
531 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
532         /* we don't have page 0 mapped on sparc and m68k.. */
533         if (p < PAGE_SIZE) {
534                 sz = size_inside_page(p, count);
535                 /* Hmm. Do something? */
536                 buf += sz;
537                 p += sz;
538                 count -= sz;
539                 written += sz;
540         }
541 #endif
542
543         while (count > 0) {
544                 void *ptr;
545
546                 sz = size_inside_page(p, count);
547
548                 /*
549                  * On ia64 if a page has been mapped somewhere as uncached, then
550                  * it must also be accessed uncached by the kernel or data
551                  * corruption may occur.
552                  */
553                 ptr = xlate_dev_kmem_ptr((void *)p);
554                 if (!virt_addr_valid(ptr))
555                         return -ENXIO;
556
557                 copied = copy_from_user(ptr, buf, sz);
558                 if (copied) {
559                         written += sz - copied;
560                         if (written)
561                                 break;
562                         return -EFAULT;
563                 }
564                 buf += sz;
565                 p += sz;
566                 count -= sz;
567                 written += sz;
568                 if (should_stop_iteration())
569                         break;
570         }
571
572         *ppos += written;
573         return written;
574 }
575
576 /*
577  * This function writes to the *virtual* memory as seen by the kernel.
578  */
579 static ssize_t write_kmem(struct file *file, const char __user *buf,
580                           size_t count, loff_t *ppos)
581 {
582         unsigned long p = *ppos;
583         ssize_t wrote = 0;
584         ssize_t virtr = 0;
585         char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
586         int err = 0;
587
588         if (p < (unsigned long) high_memory) {
589                 unsigned long to_write = min_t(unsigned long, count,
590                                                (unsigned long)high_memory - p);
591                 wrote = do_write_kmem(p, buf, to_write, ppos);
592                 if (wrote != to_write)
593                         return wrote;
594                 p += wrote;
595                 buf += wrote;
596                 count -= wrote;
597         }
598
599         if (count > 0) {
600                 kbuf = (char *)__get_free_page(GFP_KERNEL);
601                 if (!kbuf)
602                         return wrote ? wrote : -ENOMEM;
603                 while (count > 0) {
604                         unsigned long sz = size_inside_page(p, count);
605                         unsigned long n;
606
607                         if (!is_vmalloc_or_module_addr((void *)p)) {
608                                 err = -ENXIO;
609                                 break;
610                         }
611                         n = copy_from_user(kbuf, buf, sz);
612                         if (n) {
613                                 err = -EFAULT;
614                                 break;
615                         }
616                         vwrite(kbuf, (char *)p, sz);
617                         count -= sz;
618                         buf += sz;
619                         virtr += sz;
620                         p += sz;
621                         if (should_stop_iteration())
622                                 break;
623                 }
624                 free_page((unsigned long)kbuf);
625         }
626
627         *ppos = p;
628         return virtr + wrote ? : err;
629 }
630
631 static ssize_t read_port(struct file *file, char __user *buf,
632                          size_t count, loff_t *ppos)
633 {
634         unsigned long i = *ppos;
635         char __user *tmp = buf;
636
637         if (!access_ok(buf, count))
638                 return -EFAULT;
639         while (count-- > 0 && i < 65536) {
640                 if (__put_user(inb(i), tmp) < 0)
641                         return -EFAULT;
642                 i++;
643                 tmp++;
644         }
645         *ppos = i;
646         return tmp-buf;
647 }
648
649 static ssize_t write_port(struct file *file, const char __user *buf,
650                           size_t count, loff_t *ppos)
651 {
652         unsigned long i = *ppos;
653         const char __user *tmp = buf;
654
655         if (!access_ok(buf, count))
656                 return -EFAULT;
657         while (count-- > 0 && i < 65536) {
658                 char c;
659
660                 if (__get_user(c, tmp)) {
661                         if (tmp > buf)
662                                 break;
663                         return -EFAULT;
664                 }
665                 outb(c, i);
666                 i++;
667                 tmp++;
668         }
669         *ppos = i;
670         return tmp-buf;
671 }
672
673 static ssize_t read_null(struct file *file, char __user *buf,
674                          size_t count, loff_t *ppos)
675 {
676         return 0;
677 }
678
679 static ssize_t write_null(struct file *file, const char __user *buf,
680                           size_t count, loff_t *ppos)
681 {
682         return count;
683 }
684
685 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
686 {
687         return 0;
688 }
689
690 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
691 {
692         size_t count = iov_iter_count(from);
693         iov_iter_advance(from, count);
694         return count;
695 }
696
697 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
698                         struct splice_desc *sd)
699 {
700         return sd->len;
701 }
702
703 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
704                                  loff_t *ppos, size_t len, unsigned int flags)
705 {
706         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
707 }
708
709 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
710 {
711         size_t written = 0;
712
713         while (iov_iter_count(iter)) {
714                 size_t chunk = iov_iter_count(iter), n;
715
716                 if (chunk > PAGE_SIZE)
717                         chunk = PAGE_SIZE;      /* Just for latency reasons */
718                 n = iov_iter_zero(chunk, iter);
719                 if (!n && iov_iter_count(iter))
720                         return written ? written : -EFAULT;
721                 written += n;
722                 if (signal_pending(current))
723                         return written ? written : -ERESTARTSYS;
724                 cond_resched();
725         }
726         return written;
727 }
728
729 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
730 {
731 #ifndef CONFIG_MMU
732         return -ENOSYS;
733 #endif
734         if (vma->vm_flags & VM_SHARED)
735                 return shmem_zero_setup(vma);
736         vma_set_anonymous(vma);
737         return 0;
738 }
739
740 static unsigned long get_unmapped_area_zero(struct file *file,
741                                 unsigned long addr, unsigned long len,
742                                 unsigned long pgoff, unsigned long flags)
743 {
744 #ifdef CONFIG_MMU
745         if (flags & MAP_SHARED) {
746                 /*
747                  * mmap_zero() will call shmem_zero_setup() to create a file,
748                  * so use shmem's get_unmapped_area in case it can be huge;
749                  * and pass NULL for file as in mmap.c's get_unmapped_area(),
750                  * so as not to confuse shmem with our handle on "/dev/zero".
751                  */
752                 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
753         }
754
755         /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
756         return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
757 #else
758         return -ENOSYS;
759 #endif
760 }
761
762 static ssize_t write_full(struct file *file, const char __user *buf,
763                           size_t count, loff_t *ppos)
764 {
765         return -ENOSPC;
766 }
767
768 /*
769  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
770  * can fopen() both devices with "a" now.  This was previously impossible.
771  * -- SRB.
772  */
773 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
774 {
775         return file->f_pos = 0;
776 }
777
778 /*
779  * The memory devices use the full 32/64 bits of the offset, and so we cannot
780  * check against negative addresses: they are ok. The return value is weird,
781  * though, in that case (0).
782  *
783  * also note that seeking relative to the "end of file" isn't supported:
784  * it has no meaning, so it returns -EINVAL.
785  */
786 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
787 {
788         loff_t ret;
789
790         inode_lock(file_inode(file));
791         switch (orig) {
792         case SEEK_CUR:
793                 offset += file->f_pos;
794                 /* fall through */
795         case SEEK_SET:
796                 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
797                 if ((unsigned long long)offset >= -MAX_ERRNO) {
798                         ret = -EOVERFLOW;
799                         break;
800                 }
801                 file->f_pos = offset;
802                 ret = file->f_pos;
803                 force_successful_syscall_return();
804                 break;
805         default:
806                 ret = -EINVAL;
807         }
808         inode_unlock(file_inode(file));
809         return ret;
810 }
811
812 static struct inode *devmem_inode;
813
814 #ifdef CONFIG_IO_STRICT_DEVMEM
815 void revoke_devmem(struct resource *res)
816 {
817         struct inode *inode = READ_ONCE(devmem_inode);
818
819         /*
820          * Check that the initialization has completed. Losing the race
821          * is ok because it means drivers are claiming resources before
822          * the fs_initcall level of init and prevent /dev/mem from
823          * establishing mappings.
824          */
825         if (!inode)
826                 return;
827
828         /*
829          * The expectation is that the driver has successfully marked
830          * the resource busy by this point, so devmem_is_allowed()
831          * should start returning false, however for performance this
832          * does not iterate the entire resource range.
833          */
834         if (devmem_is_allowed(PHYS_PFN(res->start)) &&
835             devmem_is_allowed(PHYS_PFN(res->end))) {
836                 /*
837                  * *cringe* iomem=relaxed says "go ahead, what's the
838                  * worst that can happen?"
839                  */
840                 return;
841         }
842
843         unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
844 }
845 #endif
846
847 static int open_port(struct inode *inode, struct file *filp)
848 {
849         int rc;
850
851         if (!capable(CAP_SYS_RAWIO))
852                 return -EPERM;
853
854         rc = security_locked_down(LOCKDOWN_DEV_MEM);
855         if (rc)
856                 return rc;
857
858         if (iminor(inode) != DEVMEM_MINOR)
859                 return 0;
860
861         /*
862          * Use a unified address space to have a single point to manage
863          * revocations when drivers want to take over a /dev/mem mapped
864          * range.
865          */
866         inode->i_mapping = devmem_inode->i_mapping;
867         filp->f_mapping = inode->i_mapping;
868
869         return 0;
870 }
871
872 #define zero_lseek      null_lseek
873 #define full_lseek      null_lseek
874 #define write_zero      write_null
875 #define write_iter_zero write_iter_null
876 #define open_mem        open_port
877 #define open_kmem       open_mem
878
879 static const struct file_operations __maybe_unused mem_fops = {
880         .llseek         = memory_lseek,
881         .read           = read_mem,
882         .write          = write_mem,
883         .mmap           = mmap_mem,
884         .open           = open_mem,
885 #ifndef CONFIG_MMU
886         .get_unmapped_area = get_unmapped_area_mem,
887         .mmap_capabilities = memory_mmap_capabilities,
888 #endif
889 };
890
891 static const struct file_operations __maybe_unused kmem_fops = {
892         .llseek         = memory_lseek,
893         .read           = read_kmem,
894         .write          = write_kmem,
895         .mmap           = mmap_kmem,
896         .open           = open_kmem,
897 #ifndef CONFIG_MMU
898         .get_unmapped_area = get_unmapped_area_mem,
899         .mmap_capabilities = memory_mmap_capabilities,
900 #endif
901 };
902
903 static const struct file_operations null_fops = {
904         .llseek         = null_lseek,
905         .read           = read_null,
906         .write          = write_null,
907         .read_iter      = read_iter_null,
908         .write_iter     = write_iter_null,
909         .splice_write   = splice_write_null,
910 };
911
912 static const struct file_operations __maybe_unused port_fops = {
913         .llseek         = memory_lseek,
914         .read           = read_port,
915         .write          = write_port,
916         .open           = open_port,
917 };
918
919 static const struct file_operations zero_fops = {
920         .llseek         = zero_lseek,
921         .write          = write_zero,
922         .read_iter      = read_iter_zero,
923         .write_iter     = write_iter_zero,
924         .mmap           = mmap_zero,
925         .get_unmapped_area = get_unmapped_area_zero,
926 #ifndef CONFIG_MMU
927         .mmap_capabilities = zero_mmap_capabilities,
928 #endif
929 };
930
931 static const struct file_operations full_fops = {
932         .llseek         = full_lseek,
933         .read_iter      = read_iter_zero,
934         .write          = write_full,
935 };
936
937 static const struct memdev {
938         const char *name;
939         umode_t mode;
940         const struct file_operations *fops;
941         fmode_t fmode;
942 } devlist[] = {
943 #ifdef CONFIG_DEVMEM
944          [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
945 #endif
946 #ifdef CONFIG_DEVKMEM
947          [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
948 #endif
949          [3] = { "null", 0666, &null_fops, 0 },
950 #ifdef CONFIG_DEVPORT
951          [4] = { "port", 0, &port_fops, 0 },
952 #endif
953          [5] = { "zero", 0666, &zero_fops, 0 },
954          [7] = { "full", 0666, &full_fops, 0 },
955          [8] = { "random", 0666, &random_fops, 0 },
956          [9] = { "urandom", 0666, &urandom_fops, 0 },
957 #ifdef CONFIG_PRINTK
958         [11] = { "kmsg", 0644, &kmsg_fops, 0 },
959 #endif
960 };
961
962 static int memory_open(struct inode *inode, struct file *filp)
963 {
964         int minor;
965         const struct memdev *dev;
966
967         minor = iminor(inode);
968         if (minor >= ARRAY_SIZE(devlist))
969                 return -ENXIO;
970
971         dev = &devlist[minor];
972         if (!dev->fops)
973                 return -ENXIO;
974
975         filp->f_op = dev->fops;
976         filp->f_mode |= dev->fmode;
977
978         if (dev->fops->open)
979                 return dev->fops->open(inode, filp);
980
981         return 0;
982 }
983
984 static const struct file_operations memory_fops = {
985         .open = memory_open,
986         .llseek = noop_llseek,
987 };
988
989 static char *mem_devnode(struct device *dev, umode_t *mode)
990 {
991         if (mode && devlist[MINOR(dev->devt)].mode)
992                 *mode = devlist[MINOR(dev->devt)].mode;
993         return NULL;
994 }
995
996 static struct class *mem_class;
997
998 static int devmem_fs_init_fs_context(struct fs_context *fc)
999 {
1000         return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
1001 }
1002
1003 static struct file_system_type devmem_fs_type = {
1004         .name           = "devmem",
1005         .owner          = THIS_MODULE,
1006         .init_fs_context = devmem_fs_init_fs_context,
1007         .kill_sb        = kill_anon_super,
1008 };
1009
1010 static int devmem_init_inode(void)
1011 {
1012         static struct vfsmount *devmem_vfs_mount;
1013         static int devmem_fs_cnt;
1014         struct inode *inode;
1015         int rc;
1016
1017         rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt);
1018         if (rc < 0) {
1019                 pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc);
1020                 return rc;
1021         }
1022
1023         inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb);
1024         if (IS_ERR(inode)) {
1025                 rc = PTR_ERR(inode);
1026                 pr_err("Cannot allocate inode for /dev/mem: %d\n", rc);
1027                 simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt);
1028                 return rc;
1029         }
1030
1031         /* publish /dev/mem initialized */
1032         WRITE_ONCE(devmem_inode, inode);
1033
1034         return 0;
1035 }
1036
1037 static int __init chr_dev_init(void)
1038 {
1039         int minor;
1040
1041         if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
1042                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
1043
1044         mem_class = class_create(THIS_MODULE, "mem");
1045         if (IS_ERR(mem_class))
1046                 return PTR_ERR(mem_class);
1047
1048         mem_class->devnode = mem_devnode;
1049         for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
1050                 if (!devlist[minor].name)
1051                         continue;
1052
1053                 /*
1054                  * Create /dev/port?
1055                  */
1056                 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
1057                         continue;
1058                 if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0)
1059                         continue;
1060
1061                 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
1062                               NULL, devlist[minor].name);
1063         }
1064
1065         return tty_init();
1066 }
1067
1068 fs_initcall(chr_dev_init);
This page took 0.090499 seconds and 4 git commands to generate.