mm: add optional close() to struct vm_special_mapping
[linux.git] / mm / nommu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/nommu.c
4  *
5  *  Replacement code for mm functions to support CPU's that don't
6  *  have any form of memory management unit (thus no virtual memory).
7  *
8  *  See Documentation/admin-guide/mm/nommu-mmap.rst
9  *
10  *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
11  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
12  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
13  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
14  *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/export.h>
20 #include <linux/mm.h>
21 #include <linux/sched/mm.h>
22 #include <linux/mman.h>
23 #include <linux/swap.h>
24 #include <linux/file.h>
25 #include <linux/highmem.h>
26 #include <linux/pagemap.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 #include <linux/backing-dev.h>
30 #include <linux/compiler.h>
31 #include <linux/mount.h>
32 #include <linux/personality.h>
33 #include <linux/security.h>
34 #include <linux/syscalls.h>
35 #include <linux/audit.h>
36 #include <linux/printk.h>
37
38 #include <linux/uaccess.h>
39 #include <linux/uio.h>
40 #include <asm/tlb.h>
41 #include <asm/tlbflush.h>
42 #include <asm/mmu_context.h>
43 #include "internal.h"
44
45 void *high_memory;
46 EXPORT_SYMBOL(high_memory);
47 struct page *mem_map;
48 unsigned long max_mapnr;
49 EXPORT_SYMBOL(max_mapnr);
50 unsigned long highest_memmap_pfn;
51 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
52 int heap_stack_gap = 0;
53
54 atomic_long_t mmap_pages_allocated;
55
56 EXPORT_SYMBOL(mem_map);
57
58 /* list of mapped, potentially shareable regions */
59 static struct kmem_cache *vm_region_jar;
60 struct rb_root nommu_region_tree = RB_ROOT;
61 DECLARE_RWSEM(nommu_region_sem);
62
63 const struct vm_operations_struct generic_file_vm_ops = {
64 };
65
66 /*
67  * Return the total memory allocated for this pointer, not
68  * just what the caller asked for.
69  *
70  * Doesn't have to be accurate, i.e. may have races.
71  */
72 unsigned int kobjsize(const void *objp)
73 {
74         struct page *page;
75
76         /*
77          * If the object we have should not have ksize performed on it,
78          * return size of 0
79          */
80         if (!objp || !virt_addr_valid(objp))
81                 return 0;
82
83         page = virt_to_head_page(objp);
84
85         /*
86          * If the allocator sets PageSlab, we know the pointer came from
87          * kmalloc().
88          */
89         if (PageSlab(page))
90                 return ksize(objp);
91
92         /*
93          * If it's not a compound page, see if we have a matching VMA
94          * region. This test is intentionally done in reverse order,
95          * so if there's no VMA, we still fall through and hand back
96          * PAGE_SIZE for 0-order pages.
97          */
98         if (!PageCompound(page)) {
99                 struct vm_area_struct *vma;
100
101                 vma = find_vma(current->mm, (unsigned long)objp);
102                 if (vma)
103                         return vma->vm_end - vma->vm_start;
104         }
105
106         /*
107          * The ksize() function is only guaranteed to work for pointers
108          * returned by kmalloc(). So handle arbitrary pointers here.
109          */
110         return page_size(page);
111 }
112
113 void vfree(const void *addr)
114 {
115         kfree(addr);
116 }
117 EXPORT_SYMBOL(vfree);
118
119 void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
120 {
121         /*
122          *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
123          * returns only a logical address.
124          */
125         return kmalloc_noprof(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
126 }
127 EXPORT_SYMBOL(__vmalloc_noprof);
128
129 void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
130 {
131         return krealloc_noprof(p, size, (flags | __GFP_COMP) & ~__GFP_HIGHMEM);
132 }
133
134 void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
135                 unsigned long start, unsigned long end, gfp_t gfp_mask,
136                 pgprot_t prot, unsigned long vm_flags, int node,
137                 const void *caller)
138 {
139         return __vmalloc_noprof(size, gfp_mask);
140 }
141
142 void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
143                 int node, const void *caller)
144 {
145         return __vmalloc_noprof(size, gfp_mask);
146 }
147
148 static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
149 {
150         void *ret;
151
152         ret = __vmalloc(size, flags);
153         if (ret) {
154                 struct vm_area_struct *vma;
155
156                 mmap_write_lock(current->mm);
157                 vma = find_vma(current->mm, (unsigned long)ret);
158                 if (vma)
159                         vm_flags_set(vma, VM_USERMAP);
160                 mmap_write_unlock(current->mm);
161         }
162
163         return ret;
164 }
165
166 void *vmalloc_user_noprof(unsigned long size)
167 {
168         return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
169 }
170 EXPORT_SYMBOL(vmalloc_user_noprof);
171
172 struct page *vmalloc_to_page(const void *addr)
173 {
174         return virt_to_page(addr);
175 }
176 EXPORT_SYMBOL(vmalloc_to_page);
177
178 unsigned long vmalloc_to_pfn(const void *addr)
179 {
180         return page_to_pfn(virt_to_page(addr));
181 }
182 EXPORT_SYMBOL(vmalloc_to_pfn);
183
184 long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
185 {
186         /* Don't allow overflow */
187         if ((unsigned long) addr + count < count)
188                 count = -(unsigned long) addr;
189
190         return copy_to_iter(addr, count, iter);
191 }
192
193 /*
194  *      vmalloc  -  allocate virtually contiguous memory
195  *
196  *      @size:          allocation size
197  *
198  *      Allocate enough pages to cover @size from the page level
199  *      allocator and map them into contiguous kernel virtual space.
200  *
201  *      For tight control over page level allocator and protection flags
202  *      use __vmalloc() instead.
203  */
204 void *vmalloc_noprof(unsigned long size)
205 {
206         return __vmalloc_noprof(size, GFP_KERNEL);
207 }
208 EXPORT_SYMBOL(vmalloc_noprof);
209
210 void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc_noprof);
211
212 /*
213  *      vzalloc - allocate virtually contiguous memory with zero fill
214  *
215  *      @size:          allocation size
216  *
217  *      Allocate enough pages to cover @size from the page level
218  *      allocator and map them into contiguous kernel virtual space.
219  *      The memory allocated is set to zero.
220  *
221  *      For tight control over page level allocator and protection flags
222  *      use __vmalloc() instead.
223  */
224 void *vzalloc_noprof(unsigned long size)
225 {
226         return __vmalloc_noprof(size, GFP_KERNEL | __GFP_ZERO);
227 }
228 EXPORT_SYMBOL(vzalloc_noprof);
229
230 /**
231  * vmalloc_node - allocate memory on a specific node
232  * @size:       allocation size
233  * @node:       numa node
234  *
235  * Allocate enough pages to cover @size from the page level
236  * allocator and map them into contiguous kernel virtual space.
237  *
238  * For tight control over page level allocator and protection flags
239  * use __vmalloc() instead.
240  */
241 void *vmalloc_node_noprof(unsigned long size, int node)
242 {
243         return vmalloc_noprof(size);
244 }
245 EXPORT_SYMBOL(vmalloc_node_noprof);
246
247 /**
248  * vzalloc_node - allocate memory on a specific node with zero fill
249  * @size:       allocation size
250  * @node:       numa node
251  *
252  * Allocate enough pages to cover @size from the page level
253  * allocator and map them into contiguous kernel virtual space.
254  * The memory allocated is set to zero.
255  *
256  * For tight control over page level allocator and protection flags
257  * use __vmalloc() instead.
258  */
259 void *vzalloc_node_noprof(unsigned long size, int node)
260 {
261         return vzalloc_noprof(size);
262 }
263 EXPORT_SYMBOL(vzalloc_node_noprof);
264
265 /**
266  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
267  *      @size:          allocation size
268  *
269  *      Allocate enough 32bit PA addressable pages to cover @size from the
270  *      page level allocator and map them into contiguous kernel virtual space.
271  */
272 void *vmalloc_32_noprof(unsigned long size)
273 {
274         return __vmalloc_noprof(size, GFP_KERNEL);
275 }
276 EXPORT_SYMBOL(vmalloc_32_noprof);
277
278 /**
279  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
280  *      @size:          allocation size
281  *
282  * The resulting memory area is 32bit addressable and zeroed so it can be
283  * mapped to userspace without leaking data.
284  *
285  * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
286  * remap_vmalloc_range() are permissible.
287  */
288 void *vmalloc_32_user_noprof(unsigned long size)
289 {
290         /*
291          * We'll have to sort out the ZONE_DMA bits for 64-bit,
292          * but for now this can simply use vmalloc_user() directly.
293          */
294         return vmalloc_user_noprof(size);
295 }
296 EXPORT_SYMBOL(vmalloc_32_user_noprof);
297
298 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
299 {
300         BUG();
301         return NULL;
302 }
303 EXPORT_SYMBOL(vmap);
304
305 void vunmap(const void *addr)
306 {
307         BUG();
308 }
309 EXPORT_SYMBOL(vunmap);
310
311 void *vm_map_ram(struct page **pages, unsigned int count, int node)
312 {
313         BUG();
314         return NULL;
315 }
316 EXPORT_SYMBOL(vm_map_ram);
317
318 void vm_unmap_ram(const void *mem, unsigned int count)
319 {
320         BUG();
321 }
322 EXPORT_SYMBOL(vm_unmap_ram);
323
324 void vm_unmap_aliases(void)
325 {
326 }
327 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
328
329 void free_vm_area(struct vm_struct *area)
330 {
331         BUG();
332 }
333 EXPORT_SYMBOL_GPL(free_vm_area);
334
335 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
336                    struct page *page)
337 {
338         return -EINVAL;
339 }
340 EXPORT_SYMBOL(vm_insert_page);
341
342 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
343                         struct page **pages, unsigned long *num)
344 {
345         return -EINVAL;
346 }
347 EXPORT_SYMBOL(vm_insert_pages);
348
349 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
350                         unsigned long num)
351 {
352         return -EINVAL;
353 }
354 EXPORT_SYMBOL(vm_map_pages);
355
356 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
357                                 unsigned long num)
358 {
359         return -EINVAL;
360 }
361 EXPORT_SYMBOL(vm_map_pages_zero);
362
363 /*
364  *  sys_brk() for the most part doesn't need the global kernel
365  *  lock, except when an application is doing something nasty
366  *  like trying to un-brk an area that has already been mapped
367  *  to a regular file.  in this case, the unmapping will need
368  *  to invoke file system routines that need the global lock.
369  */
370 SYSCALL_DEFINE1(brk, unsigned long, brk)
371 {
372         struct mm_struct *mm = current->mm;
373
374         if (brk < mm->start_brk || brk > mm->context.end_brk)
375                 return mm->brk;
376
377         if (mm->brk == brk)
378                 return mm->brk;
379
380         /*
381          * Always allow shrinking brk
382          */
383         if (brk <= mm->brk) {
384                 mm->brk = brk;
385                 return brk;
386         }
387
388         /*
389          * Ok, looks good - let it rip.
390          */
391         flush_icache_user_range(mm->brk, brk);
392         return mm->brk = brk;
393 }
394
395 /*
396  * initialise the percpu counter for VM and region record slabs
397  */
398 void __init mmap_init(void)
399 {
400         int ret;
401
402         ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
403         VM_BUG_ON(ret);
404         vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
405 }
406
407 /*
408  * validate the region tree
409  * - the caller must hold the region lock
410  */
411 #ifdef CONFIG_DEBUG_NOMMU_REGIONS
412 static noinline void validate_nommu_regions(void)
413 {
414         struct vm_region *region, *last;
415         struct rb_node *p, *lastp;
416
417         lastp = rb_first(&nommu_region_tree);
418         if (!lastp)
419                 return;
420
421         last = rb_entry(lastp, struct vm_region, vm_rb);
422         BUG_ON(last->vm_end <= last->vm_start);
423         BUG_ON(last->vm_top < last->vm_end);
424
425         while ((p = rb_next(lastp))) {
426                 region = rb_entry(p, struct vm_region, vm_rb);
427                 last = rb_entry(lastp, struct vm_region, vm_rb);
428
429                 BUG_ON(region->vm_end <= region->vm_start);
430                 BUG_ON(region->vm_top < region->vm_end);
431                 BUG_ON(region->vm_start < last->vm_top);
432
433                 lastp = p;
434         }
435 }
436 #else
437 static void validate_nommu_regions(void)
438 {
439 }
440 #endif
441
442 /*
443  * add a region into the global tree
444  */
445 static void add_nommu_region(struct vm_region *region)
446 {
447         struct vm_region *pregion;
448         struct rb_node **p, *parent;
449
450         validate_nommu_regions();
451
452         parent = NULL;
453         p = &nommu_region_tree.rb_node;
454         while (*p) {
455                 parent = *p;
456                 pregion = rb_entry(parent, struct vm_region, vm_rb);
457                 if (region->vm_start < pregion->vm_start)
458                         p = &(*p)->rb_left;
459                 else if (region->vm_start > pregion->vm_start)
460                         p = &(*p)->rb_right;
461                 else if (pregion == region)
462                         return;
463                 else
464                         BUG();
465         }
466
467         rb_link_node(&region->vm_rb, parent, p);
468         rb_insert_color(&region->vm_rb, &nommu_region_tree);
469
470         validate_nommu_regions();
471 }
472
473 /*
474  * delete a region from the global tree
475  */
476 static void delete_nommu_region(struct vm_region *region)
477 {
478         BUG_ON(!nommu_region_tree.rb_node);
479
480         validate_nommu_regions();
481         rb_erase(&region->vm_rb, &nommu_region_tree);
482         validate_nommu_regions();
483 }
484
485 /*
486  * free a contiguous series of pages
487  */
488 static void free_page_series(unsigned long from, unsigned long to)
489 {
490         for (; from < to; from += PAGE_SIZE) {
491                 struct page *page = virt_to_page((void *)from);
492
493                 atomic_long_dec(&mmap_pages_allocated);
494                 put_page(page);
495         }
496 }
497
498 /*
499  * release a reference to a region
500  * - the caller must hold the region semaphore for writing, which this releases
501  * - the region may not have been added to the tree yet, in which case vm_top
502  *   will equal vm_start
503  */
504 static void __put_nommu_region(struct vm_region *region)
505         __releases(nommu_region_sem)
506 {
507         BUG_ON(!nommu_region_tree.rb_node);
508
509         if (--region->vm_usage == 0) {
510                 if (region->vm_top > region->vm_start)
511                         delete_nommu_region(region);
512                 up_write(&nommu_region_sem);
513
514                 if (region->vm_file)
515                         fput(region->vm_file);
516
517                 /* IO memory and memory shared directly out of the pagecache
518                  * from ramfs/tmpfs mustn't be released here */
519                 if (region->vm_flags & VM_MAPPED_COPY)
520                         free_page_series(region->vm_start, region->vm_top);
521                 kmem_cache_free(vm_region_jar, region);
522         } else {
523                 up_write(&nommu_region_sem);
524         }
525 }
526
527 /*
528  * release a reference to a region
529  */
530 static void put_nommu_region(struct vm_region *region)
531 {
532         down_write(&nommu_region_sem);
533         __put_nommu_region(region);
534 }
535
536 static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
537 {
538         vma->vm_mm = mm;
539
540         /* add the VMA to the mapping */
541         if (vma->vm_file) {
542                 struct address_space *mapping = vma->vm_file->f_mapping;
543
544                 i_mmap_lock_write(mapping);
545                 flush_dcache_mmap_lock(mapping);
546                 vma_interval_tree_insert(vma, &mapping->i_mmap);
547                 flush_dcache_mmap_unlock(mapping);
548                 i_mmap_unlock_write(mapping);
549         }
550 }
551
552 static void cleanup_vma_from_mm(struct vm_area_struct *vma)
553 {
554         vma->vm_mm->map_count--;
555         /* remove the VMA from the mapping */
556         if (vma->vm_file) {
557                 struct address_space *mapping;
558                 mapping = vma->vm_file->f_mapping;
559
560                 i_mmap_lock_write(mapping);
561                 flush_dcache_mmap_lock(mapping);
562                 vma_interval_tree_remove(vma, &mapping->i_mmap);
563                 flush_dcache_mmap_unlock(mapping);
564                 i_mmap_unlock_write(mapping);
565         }
566 }
567
568 /*
569  * delete a VMA from its owning mm_struct and address space
570  */
571 static int delete_vma_from_mm(struct vm_area_struct *vma)
572 {
573         VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start);
574
575         vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
576         if (vma_iter_prealloc(&vmi, vma)) {
577                 pr_warn("Allocation of vma tree for process %d failed\n",
578                        current->pid);
579                 return -ENOMEM;
580         }
581         cleanup_vma_from_mm(vma);
582
583         /* remove from the MM's tree and list */
584         vma_iter_clear(&vmi);
585         return 0;
586 }
587 /*
588  * destroy a VMA record
589  */
590 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
591 {
592         if (vma->vm_ops && vma->vm_ops->close)
593                 vma->vm_ops->close(vma);
594         if (vma->vm_file)
595                 fput(vma->vm_file);
596         put_nommu_region(vma->vm_region);
597         vm_area_free(vma);
598 }
599
600 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
601                                              unsigned long start_addr,
602                                              unsigned long end_addr)
603 {
604         unsigned long index = start_addr;
605
606         mmap_assert_locked(mm);
607         return mt_find(&mm->mm_mt, &index, end_addr - 1);
608 }
609 EXPORT_SYMBOL(find_vma_intersection);
610
611 /*
612  * look up the first VMA in which addr resides, NULL if none
613  * - should be called with mm->mmap_lock at least held readlocked
614  */
615 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
616 {
617         VMA_ITERATOR(vmi, mm, addr);
618
619         return vma_iter_load(&vmi);
620 }
621 EXPORT_SYMBOL(find_vma);
622
623 /*
624  * At least xtensa ends up having protection faults even with no
625  * MMU.. No stack expansion, at least.
626  */
627 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
628                         unsigned long addr, struct pt_regs *regs)
629 {
630         struct vm_area_struct *vma;
631
632         mmap_read_lock(mm);
633         vma = vma_lookup(mm, addr);
634         if (!vma)
635                 mmap_read_unlock(mm);
636         return vma;
637 }
638
639 /*
640  * expand a stack to a given address
641  * - not supported under NOMMU conditions
642  */
643 int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr)
644 {
645         return -ENOMEM;
646 }
647
648 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
649 {
650         mmap_read_unlock(mm);
651         return NULL;
652 }
653
654 /*
655  * look up the first VMA exactly that exactly matches addr
656  * - should be called with mm->mmap_lock at least held readlocked
657  */
658 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
659                                              unsigned long addr,
660                                              unsigned long len)
661 {
662         struct vm_area_struct *vma;
663         unsigned long end = addr + len;
664         VMA_ITERATOR(vmi, mm, addr);
665
666         vma = vma_iter_load(&vmi);
667         if (!vma)
668                 return NULL;
669         if (vma->vm_start != addr)
670                 return NULL;
671         if (vma->vm_end != end)
672                 return NULL;
673
674         return vma;
675 }
676
677 /*
678  * determine whether a mapping should be permitted and, if so, what sort of
679  * mapping we're capable of supporting
680  */
681 static int validate_mmap_request(struct file *file,
682                                  unsigned long addr,
683                                  unsigned long len,
684                                  unsigned long prot,
685                                  unsigned long flags,
686                                  unsigned long pgoff,
687                                  unsigned long *_capabilities)
688 {
689         unsigned long capabilities, rlen;
690         int ret;
691
692         /* do the simple checks first */
693         if (flags & MAP_FIXED)
694                 return -EINVAL;
695
696         if ((flags & MAP_TYPE) != MAP_PRIVATE &&
697             (flags & MAP_TYPE) != MAP_SHARED)
698                 return -EINVAL;
699
700         if (!len)
701                 return -EINVAL;
702
703         /* Careful about overflows.. */
704         rlen = PAGE_ALIGN(len);
705         if (!rlen || rlen > TASK_SIZE)
706                 return -ENOMEM;
707
708         /* offset overflow? */
709         if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
710                 return -EOVERFLOW;
711
712         if (file) {
713                 /* files must support mmap */
714                 if (!file->f_op->mmap)
715                         return -ENODEV;
716
717                 /* work out if what we've got could possibly be shared
718                  * - we support chardevs that provide their own "memory"
719                  * - we support files/blockdevs that are memory backed
720                  */
721                 if (file->f_op->mmap_capabilities) {
722                         capabilities = file->f_op->mmap_capabilities(file);
723                 } else {
724                         /* no explicit capabilities set, so assume some
725                          * defaults */
726                         switch (file_inode(file)->i_mode & S_IFMT) {
727                         case S_IFREG:
728                         case S_IFBLK:
729                                 capabilities = NOMMU_MAP_COPY;
730                                 break;
731
732                         case S_IFCHR:
733                                 capabilities =
734                                         NOMMU_MAP_DIRECT |
735                                         NOMMU_MAP_READ |
736                                         NOMMU_MAP_WRITE;
737                                 break;
738
739                         default:
740                                 return -EINVAL;
741                         }
742                 }
743
744                 /* eliminate any capabilities that we can't support on this
745                  * device */
746                 if (!file->f_op->get_unmapped_area)
747                         capabilities &= ~NOMMU_MAP_DIRECT;
748                 if (!(file->f_mode & FMODE_CAN_READ))
749                         capabilities &= ~NOMMU_MAP_COPY;
750
751                 /* The file shall have been opened with read permission. */
752                 if (!(file->f_mode & FMODE_READ))
753                         return -EACCES;
754
755                 if (flags & MAP_SHARED) {
756                         /* do checks for writing, appending and locking */
757                         if ((prot & PROT_WRITE) &&
758                             !(file->f_mode & FMODE_WRITE))
759                                 return -EACCES;
760
761                         if (IS_APPEND(file_inode(file)) &&
762                             (file->f_mode & FMODE_WRITE))
763                                 return -EACCES;
764
765                         if (!(capabilities & NOMMU_MAP_DIRECT))
766                                 return -ENODEV;
767
768                         /* we mustn't privatise shared mappings */
769                         capabilities &= ~NOMMU_MAP_COPY;
770                 } else {
771                         /* we're going to read the file into private memory we
772                          * allocate */
773                         if (!(capabilities & NOMMU_MAP_COPY))
774                                 return -ENODEV;
775
776                         /* we don't permit a private writable mapping to be
777                          * shared with the backing device */
778                         if (prot & PROT_WRITE)
779                                 capabilities &= ~NOMMU_MAP_DIRECT;
780                 }
781
782                 if (capabilities & NOMMU_MAP_DIRECT) {
783                         if (((prot & PROT_READ)  && !(capabilities & NOMMU_MAP_READ))  ||
784                             ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
785                             ((prot & PROT_EXEC)  && !(capabilities & NOMMU_MAP_EXEC))
786                             ) {
787                                 capabilities &= ~NOMMU_MAP_DIRECT;
788                                 if (flags & MAP_SHARED) {
789                                         pr_warn("MAP_SHARED not completely supported on !MMU\n");
790                                         return -EINVAL;
791                                 }
792                         }
793                 }
794
795                 /* handle executable mappings and implied executable
796                  * mappings */
797                 if (path_noexec(&file->f_path)) {
798                         if (prot & PROT_EXEC)
799                                 return -EPERM;
800                 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
801                         /* handle implication of PROT_EXEC by PROT_READ */
802                         if (current->personality & READ_IMPLIES_EXEC) {
803                                 if (capabilities & NOMMU_MAP_EXEC)
804                                         prot |= PROT_EXEC;
805                         }
806                 } else if ((prot & PROT_READ) &&
807                          (prot & PROT_EXEC) &&
808                          !(capabilities & NOMMU_MAP_EXEC)
809                          ) {
810                         /* backing file is not executable, try to copy */
811                         capabilities &= ~NOMMU_MAP_DIRECT;
812                 }
813         } else {
814                 /* anonymous mappings are always memory backed and can be
815                  * privately mapped
816                  */
817                 capabilities = NOMMU_MAP_COPY;
818
819                 /* handle PROT_EXEC implication by PROT_READ */
820                 if ((prot & PROT_READ) &&
821                     (current->personality & READ_IMPLIES_EXEC))
822                         prot |= PROT_EXEC;
823         }
824
825         /* allow the security API to have its say */
826         ret = security_mmap_addr(addr);
827         if (ret < 0)
828                 return ret;
829
830         /* looks okay */
831         *_capabilities = capabilities;
832         return 0;
833 }
834
835 /*
836  * we've determined that we can make the mapping, now translate what we
837  * now know into VMA flags
838  */
839 static unsigned long determine_vm_flags(struct file *file,
840                                         unsigned long prot,
841                                         unsigned long flags,
842                                         unsigned long capabilities)
843 {
844         unsigned long vm_flags;
845
846         vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
847
848         if (!file) {
849                 /*
850                  * MAP_ANONYMOUS. MAP_SHARED is mapped to MAP_PRIVATE, because
851                  * there is no fork().
852                  */
853                 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
854         } else if (flags & MAP_PRIVATE) {
855                 /* MAP_PRIVATE file mapping */
856                 if (capabilities & NOMMU_MAP_DIRECT)
857                         vm_flags |= (capabilities & NOMMU_VMFLAGS);
858                 else
859                         vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
860
861                 if (!(prot & PROT_WRITE) && !current->ptrace)
862                         /*
863                          * R/O private file mapping which cannot be used to
864                          * modify memory, especially also not via active ptrace
865                          * (e.g., set breakpoints) or later by upgrading
866                          * permissions (no mprotect()). We can try overlaying
867                          * the file mapping, which will work e.g., on chardevs,
868                          * ramfs/tmpfs/shmfs and romfs/cramf.
869                          */
870                         vm_flags |= VM_MAYOVERLAY;
871         } else {
872                 /* MAP_SHARED file mapping: NOMMU_MAP_DIRECT is set. */
873                 vm_flags |= VM_SHARED | VM_MAYSHARE |
874                             (capabilities & NOMMU_VMFLAGS);
875         }
876
877         return vm_flags;
878 }
879
880 /*
881  * set up a shared mapping on a file (the driver or filesystem provides and
882  * pins the storage)
883  */
884 static int do_mmap_shared_file(struct vm_area_struct *vma)
885 {
886         int ret;
887
888         ret = call_mmap(vma->vm_file, vma);
889         if (ret == 0) {
890                 vma->vm_region->vm_top = vma->vm_region->vm_end;
891                 return 0;
892         }
893         if (ret != -ENOSYS)
894                 return ret;
895
896         /* getting -ENOSYS indicates that direct mmap isn't possible (as
897          * opposed to tried but failed) so we can only give a suitable error as
898          * it's not possible to make a private copy if MAP_SHARED was given */
899         return -ENODEV;
900 }
901
902 /*
903  * set up a private mapping or an anonymous shared mapping
904  */
905 static int do_mmap_private(struct vm_area_struct *vma,
906                            struct vm_region *region,
907                            unsigned long len,
908                            unsigned long capabilities)
909 {
910         unsigned long total, point;
911         void *base;
912         int ret, order;
913
914         /*
915          * Invoke the file's mapping function so that it can keep track of
916          * shared mappings on devices or memory. VM_MAYOVERLAY will be set if
917          * it may attempt to share, which will make is_nommu_shared_mapping()
918          * happy.
919          */
920         if (capabilities & NOMMU_MAP_DIRECT) {
921                 ret = call_mmap(vma->vm_file, vma);
922                 /* shouldn't return success if we're not sharing */
923                 if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags)))
924                         ret = -ENOSYS;
925                 if (ret == 0) {
926                         vma->vm_region->vm_top = vma->vm_region->vm_end;
927                         return 0;
928                 }
929                 if (ret != -ENOSYS)
930                         return ret;
931
932                 /* getting an ENOSYS error indicates that direct mmap isn't
933                  * possible (as opposed to tried but failed) so we'll try to
934                  * make a private copy of the data and map that instead */
935         }
936
937
938         /* allocate some memory to hold the mapping
939          * - note that this may not return a page-aligned address if the object
940          *   we're allocating is smaller than a page
941          */
942         order = get_order(len);
943         total = 1 << order;
944         point = len >> PAGE_SHIFT;
945
946         /* we don't want to allocate a power-of-2 sized page set */
947         if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
948                 total = point;
949
950         base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
951         if (!base)
952                 goto enomem;
953
954         atomic_long_add(total, &mmap_pages_allocated);
955
956         vm_flags_set(vma, VM_MAPPED_COPY);
957         region->vm_flags = vma->vm_flags;
958         region->vm_start = (unsigned long) base;
959         region->vm_end   = region->vm_start + len;
960         region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
961
962         vma->vm_start = region->vm_start;
963         vma->vm_end   = region->vm_start + len;
964
965         if (vma->vm_file) {
966                 /* read the contents of a file into the copy */
967                 loff_t fpos;
968
969                 fpos = vma->vm_pgoff;
970                 fpos <<= PAGE_SHIFT;
971
972                 ret = kernel_read(vma->vm_file, base, len, &fpos);
973                 if (ret < 0)
974                         goto error_free;
975
976                 /* clear the last little bit */
977                 if (ret < len)
978                         memset(base + ret, 0, len - ret);
979
980         } else {
981                 vma_set_anonymous(vma);
982         }
983
984         return 0;
985
986 error_free:
987         free_page_series(region->vm_start, region->vm_top);
988         region->vm_start = vma->vm_start = 0;
989         region->vm_end   = vma->vm_end = 0;
990         region->vm_top   = 0;
991         return ret;
992
993 enomem:
994         pr_err("Allocation of length %lu from process %d (%s) failed\n",
995                len, current->pid, current->comm);
996         show_mem();
997         return -ENOMEM;
998 }
999
1000 /*
1001  * handle mapping creation for uClinux
1002  */
1003 unsigned long do_mmap(struct file *file,
1004                         unsigned long addr,
1005                         unsigned long len,
1006                         unsigned long prot,
1007                         unsigned long flags,
1008                         vm_flags_t vm_flags,
1009                         unsigned long pgoff,
1010                         unsigned long *populate,
1011                         struct list_head *uf)
1012 {
1013         struct vm_area_struct *vma;
1014         struct vm_region *region;
1015         struct rb_node *rb;
1016         unsigned long capabilities, result;
1017         int ret;
1018         VMA_ITERATOR(vmi, current->mm, 0);
1019
1020         *populate = 0;
1021
1022         /* decide whether we should attempt the mapping, and if so what sort of
1023          * mapping */
1024         ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1025                                     &capabilities);
1026         if (ret < 0)
1027                 return ret;
1028
1029         /* we ignore the address hint */
1030         addr = 0;
1031         len = PAGE_ALIGN(len);
1032
1033         /* we've determined that we can make the mapping, now translate what we
1034          * now know into VMA flags */
1035         vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
1036
1037
1038         /* we're going to need to record the mapping */
1039         region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1040         if (!region)
1041                 goto error_getting_region;
1042
1043         vma = vm_area_alloc(current->mm);
1044         if (!vma)
1045                 goto error_getting_vma;
1046
1047         region->vm_usage = 1;
1048         region->vm_flags = vm_flags;
1049         region->vm_pgoff = pgoff;
1050
1051         vm_flags_init(vma, vm_flags);
1052         vma->vm_pgoff = pgoff;
1053
1054         if (file) {
1055                 region->vm_file = get_file(file);
1056                 vma->vm_file = get_file(file);
1057         }
1058
1059         down_write(&nommu_region_sem);
1060
1061         /* if we want to share, we need to check for regions created by other
1062          * mmap() calls that overlap with our proposed mapping
1063          * - we can only share with a superset match on most regular files
1064          * - shared mappings on character devices and memory backed files are
1065          *   permitted to overlap inexactly as far as we are concerned for in
1066          *   these cases, sharing is handled in the driver or filesystem rather
1067          *   than here
1068          */
1069         if (is_nommu_shared_mapping(vm_flags)) {
1070                 struct vm_region *pregion;
1071                 unsigned long pglen, rpglen, pgend, rpgend, start;
1072
1073                 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1074                 pgend = pgoff + pglen;
1075
1076                 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1077                         pregion = rb_entry(rb, struct vm_region, vm_rb);
1078
1079                         if (!is_nommu_shared_mapping(pregion->vm_flags))
1080                                 continue;
1081
1082                         /* search for overlapping mappings on the same file */
1083                         if (file_inode(pregion->vm_file) !=
1084                             file_inode(file))
1085                                 continue;
1086
1087                         if (pregion->vm_pgoff >= pgend)
1088                                 continue;
1089
1090                         rpglen = pregion->vm_end - pregion->vm_start;
1091                         rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1092                         rpgend = pregion->vm_pgoff + rpglen;
1093                         if (pgoff >= rpgend)
1094                                 continue;
1095
1096                         /* handle inexactly overlapping matches between
1097                          * mappings */
1098                         if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1099                             !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1100                                 /* new mapping is not a subset of the region */
1101                                 if (!(capabilities & NOMMU_MAP_DIRECT))
1102                                         goto sharing_violation;
1103                                 continue;
1104                         }
1105
1106                         /* we've found a region we can share */
1107                         pregion->vm_usage++;
1108                         vma->vm_region = pregion;
1109                         start = pregion->vm_start;
1110                         start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1111                         vma->vm_start = start;
1112                         vma->vm_end = start + len;
1113
1114                         if (pregion->vm_flags & VM_MAPPED_COPY)
1115                                 vm_flags_set(vma, VM_MAPPED_COPY);
1116                         else {
1117                                 ret = do_mmap_shared_file(vma);
1118                                 if (ret < 0) {
1119                                         vma->vm_region = NULL;
1120                                         vma->vm_start = 0;
1121                                         vma->vm_end = 0;
1122                                         pregion->vm_usage--;
1123                                         pregion = NULL;
1124                                         goto error_just_free;
1125                                 }
1126                         }
1127                         fput(region->vm_file);
1128                         kmem_cache_free(vm_region_jar, region);
1129                         region = pregion;
1130                         result = start;
1131                         goto share;
1132                 }
1133
1134                 /* obtain the address at which to make a shared mapping
1135                  * - this is the hook for quasi-memory character devices to
1136                  *   tell us the location of a shared mapping
1137                  */
1138                 if (capabilities & NOMMU_MAP_DIRECT) {
1139                         addr = file->f_op->get_unmapped_area(file, addr, len,
1140                                                              pgoff, flags);
1141                         if (IS_ERR_VALUE(addr)) {
1142                                 ret = addr;
1143                                 if (ret != -ENOSYS)
1144                                         goto error_just_free;
1145
1146                                 /* the driver refused to tell us where to site
1147                                  * the mapping so we'll have to attempt to copy
1148                                  * it */
1149                                 ret = -ENODEV;
1150                                 if (!(capabilities & NOMMU_MAP_COPY))
1151                                         goto error_just_free;
1152
1153                                 capabilities &= ~NOMMU_MAP_DIRECT;
1154                         } else {
1155                                 vma->vm_start = region->vm_start = addr;
1156                                 vma->vm_end = region->vm_end = addr + len;
1157                         }
1158                 }
1159         }
1160
1161         vma->vm_region = region;
1162
1163         /* set up the mapping
1164          * - the region is filled in if NOMMU_MAP_DIRECT is still set
1165          */
1166         if (file && vma->vm_flags & VM_SHARED)
1167                 ret = do_mmap_shared_file(vma);
1168         else
1169                 ret = do_mmap_private(vma, region, len, capabilities);
1170         if (ret < 0)
1171                 goto error_just_free;
1172         add_nommu_region(region);
1173
1174         /* clear anonymous mappings that don't ask for uninitialized data */
1175         if (!vma->vm_file &&
1176             (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1177              !(flags & MAP_UNINITIALIZED)))
1178                 memset((void *)region->vm_start, 0,
1179                        region->vm_end - region->vm_start);
1180
1181         /* okay... we have a mapping; now we have to register it */
1182         result = vma->vm_start;
1183
1184         current->mm->total_vm += len >> PAGE_SHIFT;
1185
1186 share:
1187         BUG_ON(!vma->vm_region);
1188         vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1189         if (vma_iter_prealloc(&vmi, vma))
1190                 goto error_just_free;
1191
1192         setup_vma_to_mm(vma, current->mm);
1193         current->mm->map_count++;
1194         /* add the VMA to the tree */
1195         vma_iter_store(&vmi, vma);
1196
1197         /* we flush the region from the icache only when the first executable
1198          * mapping of it is made  */
1199         if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1200                 flush_icache_user_range(region->vm_start, region->vm_end);
1201                 region->vm_icache_flushed = true;
1202         }
1203
1204         up_write(&nommu_region_sem);
1205
1206         return result;
1207
1208 error_just_free:
1209         up_write(&nommu_region_sem);
1210 error:
1211         vma_iter_free(&vmi);
1212         if (region->vm_file)
1213                 fput(region->vm_file);
1214         kmem_cache_free(vm_region_jar, region);
1215         if (vma->vm_file)
1216                 fput(vma->vm_file);
1217         vm_area_free(vma);
1218         return ret;
1219
1220 sharing_violation:
1221         up_write(&nommu_region_sem);
1222         pr_warn("Attempt to share mismatched mappings\n");
1223         ret = -EINVAL;
1224         goto error;
1225
1226 error_getting_vma:
1227         kmem_cache_free(vm_region_jar, region);
1228         pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1229                         len, current->pid);
1230         show_mem();
1231         return -ENOMEM;
1232
1233 error_getting_region:
1234         pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1235                         len, current->pid);
1236         show_mem();
1237         return -ENOMEM;
1238 }
1239
1240 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1241                               unsigned long prot, unsigned long flags,
1242                               unsigned long fd, unsigned long pgoff)
1243 {
1244         struct file *file = NULL;
1245         unsigned long retval = -EBADF;
1246
1247         audit_mmap_fd(fd, flags);
1248         if (!(flags & MAP_ANONYMOUS)) {
1249                 file = fget(fd);
1250                 if (!file)
1251                         goto out;
1252         }
1253
1254         retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1255
1256         if (file)
1257                 fput(file);
1258 out:
1259         return retval;
1260 }
1261
1262 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1263                 unsigned long, prot, unsigned long, flags,
1264                 unsigned long, fd, unsigned long, pgoff)
1265 {
1266         return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1267 }
1268
1269 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1270 struct mmap_arg_struct {
1271         unsigned long addr;
1272         unsigned long len;
1273         unsigned long prot;
1274         unsigned long flags;
1275         unsigned long fd;
1276         unsigned long offset;
1277 };
1278
1279 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1280 {
1281         struct mmap_arg_struct a;
1282
1283         if (copy_from_user(&a, arg, sizeof(a)))
1284                 return -EFAULT;
1285         if (offset_in_page(a.offset))
1286                 return -EINVAL;
1287
1288         return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1289                                a.offset >> PAGE_SHIFT);
1290 }
1291 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1292
1293 /*
1294  * split a vma into two pieces at address 'addr', a new vma is allocated either
1295  * for the first part or the tail.
1296  */
1297 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
1298                      unsigned long addr, int new_below)
1299 {
1300         struct vm_area_struct *new;
1301         struct vm_region *region;
1302         unsigned long npages;
1303         struct mm_struct *mm;
1304
1305         /* we're only permitted to split anonymous regions (these should have
1306          * only a single usage on the region) */
1307         if (vma->vm_file)
1308                 return -ENOMEM;
1309
1310         mm = vma->vm_mm;
1311         if (mm->map_count >= sysctl_max_map_count)
1312                 return -ENOMEM;
1313
1314         region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1315         if (!region)
1316                 return -ENOMEM;
1317
1318         new = vm_area_dup(vma);
1319         if (!new)
1320                 goto err_vma_dup;
1321
1322         /* most fields are the same, copy all, and then fixup */
1323         *region = *vma->vm_region;
1324         new->vm_region = region;
1325
1326         npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1327
1328         if (new_below) {
1329                 region->vm_top = region->vm_end = new->vm_end = addr;
1330         } else {
1331                 region->vm_start = new->vm_start = addr;
1332                 region->vm_pgoff = new->vm_pgoff += npages;
1333         }
1334
1335         vma_iter_config(vmi, new->vm_start, new->vm_end);
1336         if (vma_iter_prealloc(vmi, vma)) {
1337                 pr_warn("Allocation of vma tree for process %d failed\n",
1338                         current->pid);
1339                 goto err_vmi_preallocate;
1340         }
1341
1342         if (new->vm_ops && new->vm_ops->open)
1343                 new->vm_ops->open(new);
1344
1345         down_write(&nommu_region_sem);
1346         delete_nommu_region(vma->vm_region);
1347         if (new_below) {
1348                 vma->vm_region->vm_start = vma->vm_start = addr;
1349                 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1350         } else {
1351                 vma->vm_region->vm_end = vma->vm_end = addr;
1352                 vma->vm_region->vm_top = addr;
1353         }
1354         add_nommu_region(vma->vm_region);
1355         add_nommu_region(new->vm_region);
1356         up_write(&nommu_region_sem);
1357
1358         setup_vma_to_mm(vma, mm);
1359         setup_vma_to_mm(new, mm);
1360         vma_iter_store(vmi, new);
1361         mm->map_count++;
1362         return 0;
1363
1364 err_vmi_preallocate:
1365         vm_area_free(new);
1366 err_vma_dup:
1367         kmem_cache_free(vm_region_jar, region);
1368         return -ENOMEM;
1369 }
1370
1371 /*
1372  * shrink a VMA by removing the specified chunk from either the beginning or
1373  * the end
1374  */
1375 static int vmi_shrink_vma(struct vma_iterator *vmi,
1376                       struct vm_area_struct *vma,
1377                       unsigned long from, unsigned long to)
1378 {
1379         struct vm_region *region;
1380
1381         /* adjust the VMA's pointers, which may reposition it in the MM's tree
1382          * and list */
1383         if (from > vma->vm_start) {
1384                 if (vma_iter_clear_gfp(vmi, from, vma->vm_end, GFP_KERNEL))
1385                         return -ENOMEM;
1386                 vma->vm_end = from;
1387         } else {
1388                 if (vma_iter_clear_gfp(vmi, vma->vm_start, to, GFP_KERNEL))
1389                         return -ENOMEM;
1390                 vma->vm_start = to;
1391         }
1392
1393         /* cut the backing region down to size */
1394         region = vma->vm_region;
1395         BUG_ON(region->vm_usage != 1);
1396
1397         down_write(&nommu_region_sem);
1398         delete_nommu_region(region);
1399         if (from > region->vm_start) {
1400                 to = region->vm_top;
1401                 region->vm_top = region->vm_end = from;
1402         } else {
1403                 region->vm_start = to;
1404         }
1405         add_nommu_region(region);
1406         up_write(&nommu_region_sem);
1407
1408         free_page_series(from, to);
1409         return 0;
1410 }
1411
1412 /*
1413  * release a mapping
1414  * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1415  *   VMA, though it need not cover the whole VMA
1416  */
1417 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1418 {
1419         VMA_ITERATOR(vmi, mm, start);
1420         struct vm_area_struct *vma;
1421         unsigned long end;
1422         int ret = 0;
1423
1424         len = PAGE_ALIGN(len);
1425         if (len == 0)
1426                 return -EINVAL;
1427
1428         end = start + len;
1429
1430         /* find the first potentially overlapping VMA */
1431         vma = vma_find(&vmi, end);
1432         if (!vma) {
1433                 static int limit;
1434                 if (limit < 5) {
1435                         pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1436                                         current->pid, current->comm,
1437                                         start, start + len - 1);
1438                         limit++;
1439                 }
1440                 return -EINVAL;
1441         }
1442
1443         /* we're allowed to split an anonymous VMA but not a file-backed one */
1444         if (vma->vm_file) {
1445                 do {
1446                         if (start > vma->vm_start)
1447                                 return -EINVAL;
1448                         if (end == vma->vm_end)
1449                                 goto erase_whole_vma;
1450                         vma = vma_find(&vmi, end);
1451                 } while (vma);
1452                 return -EINVAL;
1453         } else {
1454                 /* the chunk must be a subset of the VMA found */
1455                 if (start == vma->vm_start && end == vma->vm_end)
1456                         goto erase_whole_vma;
1457                 if (start < vma->vm_start || end > vma->vm_end)
1458                         return -EINVAL;
1459                 if (offset_in_page(start))
1460                         return -EINVAL;
1461                 if (end != vma->vm_end && offset_in_page(end))
1462                         return -EINVAL;
1463                 if (start != vma->vm_start && end != vma->vm_end) {
1464                         ret = split_vma(&vmi, vma, start, 1);
1465                         if (ret < 0)
1466                                 return ret;
1467                 }
1468                 return vmi_shrink_vma(&vmi, vma, start, end);
1469         }
1470
1471 erase_whole_vma:
1472         if (delete_vma_from_mm(vma))
1473                 ret = -ENOMEM;
1474         else
1475                 delete_vma(mm, vma);
1476         return ret;
1477 }
1478
1479 int vm_munmap(unsigned long addr, size_t len)
1480 {
1481         struct mm_struct *mm = current->mm;
1482         int ret;
1483
1484         mmap_write_lock(mm);
1485         ret = do_munmap(mm, addr, len, NULL);
1486         mmap_write_unlock(mm);
1487         return ret;
1488 }
1489 EXPORT_SYMBOL(vm_munmap);
1490
1491 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1492 {
1493         return vm_munmap(addr, len);
1494 }
1495
1496 /*
1497  * release all the mappings made in a process's VM space
1498  */
1499 void exit_mmap(struct mm_struct *mm)
1500 {
1501         VMA_ITERATOR(vmi, mm, 0);
1502         struct vm_area_struct *vma;
1503
1504         if (!mm)
1505                 return;
1506
1507         mm->total_vm = 0;
1508
1509         /*
1510          * Lock the mm to avoid assert complaining even though this is the only
1511          * user of the mm
1512          */
1513         mmap_write_lock(mm);
1514         for_each_vma(vmi, vma) {
1515                 cleanup_vma_from_mm(vma);
1516                 delete_vma(mm, vma);
1517                 cond_resched();
1518         }
1519         __mt_destroy(&mm->mm_mt);
1520         mmap_write_unlock(mm);
1521 }
1522
1523 /*
1524  * expand (or shrink) an existing mapping, potentially moving it at the same
1525  * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1526  *
1527  * under NOMMU conditions, we only permit changing a mapping's size, and only
1528  * as long as it stays within the region allocated by do_mmap_private() and the
1529  * block is not shareable
1530  *
1531  * MREMAP_FIXED is not supported under NOMMU conditions
1532  */
1533 static unsigned long do_mremap(unsigned long addr,
1534                         unsigned long old_len, unsigned long new_len,
1535                         unsigned long flags, unsigned long new_addr)
1536 {
1537         struct vm_area_struct *vma;
1538
1539         /* insanity checks first */
1540         old_len = PAGE_ALIGN(old_len);
1541         new_len = PAGE_ALIGN(new_len);
1542         if (old_len == 0 || new_len == 0)
1543                 return (unsigned long) -EINVAL;
1544
1545         if (offset_in_page(addr))
1546                 return -EINVAL;
1547
1548         if (flags & MREMAP_FIXED && new_addr != addr)
1549                 return (unsigned long) -EINVAL;
1550
1551         vma = find_vma_exact(current->mm, addr, old_len);
1552         if (!vma)
1553                 return (unsigned long) -EINVAL;
1554
1555         if (vma->vm_end != vma->vm_start + old_len)
1556                 return (unsigned long) -EFAULT;
1557
1558         if (is_nommu_shared_mapping(vma->vm_flags))
1559                 return (unsigned long) -EPERM;
1560
1561         if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1562                 return (unsigned long) -ENOMEM;
1563
1564         /* all checks complete - do it */
1565         vma->vm_end = vma->vm_start + new_len;
1566         return vma->vm_start;
1567 }
1568
1569 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1570                 unsigned long, new_len, unsigned long, flags,
1571                 unsigned long, new_addr)
1572 {
1573         unsigned long ret;
1574
1575         mmap_write_lock(current->mm);
1576         ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1577         mmap_write_unlock(current->mm);
1578         return ret;
1579 }
1580
1581 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1582                 unsigned long pfn, unsigned long size, pgprot_t prot)
1583 {
1584         if (addr != (pfn << PAGE_SHIFT))
1585                 return -EINVAL;
1586
1587         vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1588         return 0;
1589 }
1590 EXPORT_SYMBOL(remap_pfn_range);
1591
1592 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1593 {
1594         unsigned long pfn = start >> PAGE_SHIFT;
1595         unsigned long vm_len = vma->vm_end - vma->vm_start;
1596
1597         pfn += vma->vm_pgoff;
1598         return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1599 }
1600 EXPORT_SYMBOL(vm_iomap_memory);
1601
1602 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1603                         unsigned long pgoff)
1604 {
1605         unsigned int size = vma->vm_end - vma->vm_start;
1606
1607         if (!(vma->vm_flags & VM_USERMAP))
1608                 return -EINVAL;
1609
1610         vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1611         vma->vm_end = vma->vm_start + size;
1612
1613         return 0;
1614 }
1615 EXPORT_SYMBOL(remap_vmalloc_range);
1616
1617 vm_fault_t filemap_fault(struct vm_fault *vmf)
1618 {
1619         BUG();
1620         return 0;
1621 }
1622 EXPORT_SYMBOL(filemap_fault);
1623
1624 vm_fault_t filemap_map_pages(struct vm_fault *vmf,
1625                 pgoff_t start_pgoff, pgoff_t end_pgoff)
1626 {
1627         BUG();
1628         return 0;
1629 }
1630 EXPORT_SYMBOL(filemap_map_pages);
1631
1632 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
1633                               void *buf, int len, unsigned int gup_flags)
1634 {
1635         struct vm_area_struct *vma;
1636         int write = gup_flags & FOLL_WRITE;
1637
1638         if (mmap_read_lock_killable(mm))
1639                 return 0;
1640
1641         /* the access must start within one of the target process's mappings */
1642         vma = find_vma(mm, addr);
1643         if (vma) {
1644                 /* don't overrun this mapping */
1645                 if (addr + len >= vma->vm_end)
1646                         len = vma->vm_end - addr;
1647
1648                 /* only read or write mappings where it is permitted */
1649                 if (write && vma->vm_flags & VM_MAYWRITE)
1650                         copy_to_user_page(vma, NULL, addr,
1651                                          (void *) addr, buf, len);
1652                 else if (!write && vma->vm_flags & VM_MAYREAD)
1653                         copy_from_user_page(vma, NULL, addr,
1654                                             buf, (void *) addr, len);
1655                 else
1656                         len = 0;
1657         } else {
1658                 len = 0;
1659         }
1660
1661         mmap_read_unlock(mm);
1662
1663         return len;
1664 }
1665
1666 /**
1667  * access_remote_vm - access another process' address space
1668  * @mm:         the mm_struct of the target address space
1669  * @addr:       start address to access
1670  * @buf:        source or destination buffer
1671  * @len:        number of bytes to transfer
1672  * @gup_flags:  flags modifying lookup behaviour
1673  *
1674  * The caller must hold a reference on @mm.
1675  */
1676 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1677                 void *buf, int len, unsigned int gup_flags)
1678 {
1679         return __access_remote_vm(mm, addr, buf, len, gup_flags);
1680 }
1681
1682 /*
1683  * Access another process' address space.
1684  * - source/target buffer must be kernel space
1685  */
1686 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1687                 unsigned int gup_flags)
1688 {
1689         struct mm_struct *mm;
1690
1691         if (addr + len < addr)
1692                 return 0;
1693
1694         mm = get_task_mm(tsk);
1695         if (!mm)
1696                 return 0;
1697
1698         len = __access_remote_vm(mm, addr, buf, len, gup_flags);
1699
1700         mmput(mm);
1701         return len;
1702 }
1703 EXPORT_SYMBOL_GPL(access_process_vm);
1704
1705 /**
1706  * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1707  * @inode: The inode to check
1708  * @size: The current filesize of the inode
1709  * @newsize: The proposed filesize of the inode
1710  *
1711  * Check the shared mappings on an inode on behalf of a shrinking truncate to
1712  * make sure that any outstanding VMAs aren't broken and then shrink the
1713  * vm_regions that extend beyond so that do_mmap() doesn't
1714  * automatically grant mappings that are too large.
1715  */
1716 int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1717                                 size_t newsize)
1718 {
1719         struct vm_area_struct *vma;
1720         struct vm_region *region;
1721         pgoff_t low, high;
1722         size_t r_size, r_top;
1723
1724         low = newsize >> PAGE_SHIFT;
1725         high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1726
1727         down_write(&nommu_region_sem);
1728         i_mmap_lock_read(inode->i_mapping);
1729
1730         /* search for VMAs that fall within the dead zone */
1731         vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1732                 /* found one - only interested if it's shared out of the page
1733                  * cache */
1734                 if (vma->vm_flags & VM_SHARED) {
1735                         i_mmap_unlock_read(inode->i_mapping);
1736                         up_write(&nommu_region_sem);
1737                         return -ETXTBSY; /* not quite true, but near enough */
1738                 }
1739         }
1740
1741         /* reduce any regions that overlap the dead zone - if in existence,
1742          * these will be pointed to by VMAs that don't overlap the dead zone
1743          *
1744          * we don't check for any regions that start beyond the EOF as there
1745          * shouldn't be any
1746          */
1747         vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1748                 if (!(vma->vm_flags & VM_SHARED))
1749                         continue;
1750
1751                 region = vma->vm_region;
1752                 r_size = region->vm_top - region->vm_start;
1753                 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1754
1755                 if (r_top > newsize) {
1756                         region->vm_top -= r_top - newsize;
1757                         if (region->vm_end > region->vm_top)
1758                                 region->vm_end = region->vm_top;
1759                 }
1760         }
1761
1762         i_mmap_unlock_read(inode->i_mapping);
1763         up_write(&nommu_region_sem);
1764         return 0;
1765 }
1766
1767 /*
1768  * Initialise sysctl_user_reserve_kbytes.
1769  *
1770  * This is intended to prevent a user from starting a single memory hogging
1771  * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1772  * mode.
1773  *
1774  * The default value is min(3% of free memory, 128MB)
1775  * 128MB is enough to recover with sshd/login, bash, and top/kill.
1776  */
1777 static int __meminit init_user_reserve(void)
1778 {
1779         unsigned long free_kbytes;
1780
1781         free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1782
1783         sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1784         return 0;
1785 }
1786 subsys_initcall(init_user_reserve);
1787
1788 /*
1789  * Initialise sysctl_admin_reserve_kbytes.
1790  *
1791  * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1792  * to log in and kill a memory hogging process.
1793  *
1794  * Systems with more than 256MB will reserve 8MB, enough to recover
1795  * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1796  * only reserve 3% of free pages by default.
1797  */
1798 static int __meminit init_admin_reserve(void)
1799 {
1800         unsigned long free_kbytes;
1801
1802         free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1803
1804         sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1805         return 0;
1806 }
1807 subsys_initcall(init_admin_reserve);
This page took 0.128433 seconds and 4 git commands to generate.