]> Git Repo - linux.git/blob - mm/mmap.c
mmu_notifiers: call invalidate_range() when invalidating TLBs
[linux.git] / mm / mmap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/mmap.c
4  *
5  * Written by obz.
6  *
7  * Address space accounting code        <[email protected]>
8  */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/backing-dev.h>
15 #include <linux/mm.h>
16 #include <linux/mm_inline.h>
17 #include <linux/shm.h>
18 #include <linux/mman.h>
19 #include <linux/pagemap.h>
20 #include <linux/swap.h>
21 #include <linux/syscalls.h>
22 #include <linux/capability.h>
23 #include <linux/init.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/personality.h>
27 #include <linux/security.h>
28 #include <linux/hugetlb.h>
29 #include <linux/shmem_fs.h>
30 #include <linux/profile.h>
31 #include <linux/export.h>
32 #include <linux/mount.h>
33 #include <linux/mempolicy.h>
34 #include <linux/rmap.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/mmdebug.h>
37 #include <linux/perf_event.h>
38 #include <linux/audit.h>
39 #include <linux/khugepaged.h>
40 #include <linux/uprobes.h>
41 #include <linux/notifier.h>
42 #include <linux/memory.h>
43 #include <linux/printk.h>
44 #include <linux/userfaultfd_k.h>
45 #include <linux/moduleparam.h>
46 #include <linux/pkeys.h>
47 #include <linux/oom.h>
48 #include <linux/sched/mm.h>
49 #include <linux/ksm.h>
50
51 #include <linux/uaccess.h>
52 #include <asm/cacheflush.h>
53 #include <asm/tlb.h>
54 #include <asm/mmu_context.h>
55
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/mmap.h>
58
59 #include "internal.h"
60
61 #ifndef arch_mmap_check
62 #define arch_mmap_check(addr, len, flags)       (0)
63 #endif
64
65 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
66 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
67 const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
68 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
69 #endif
70 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
71 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
72 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
73 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
74 #endif
75
76 static bool ignore_rlimit_data;
77 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
78
79 static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
80                 struct vm_area_struct *vma, struct vm_area_struct *prev,
81                 struct vm_area_struct *next, unsigned long start,
82                 unsigned long end, bool mm_wr_locked);
83
84 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
85 {
86         return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
87 }
88
89 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
90 void vma_set_page_prot(struct vm_area_struct *vma)
91 {
92         unsigned long vm_flags = vma->vm_flags;
93         pgprot_t vm_page_prot;
94
95         vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
96         if (vma_wants_writenotify(vma, vm_page_prot)) {
97                 vm_flags &= ~VM_SHARED;
98                 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
99         }
100         /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
101         WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
102 }
103
104 /*
105  * Requires inode->i_mapping->i_mmap_rwsem
106  */
107 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
108                 struct file *file, struct address_space *mapping)
109 {
110         if (vma->vm_flags & VM_SHARED)
111                 mapping_unmap_writable(mapping);
112
113         flush_dcache_mmap_lock(mapping);
114         vma_interval_tree_remove(vma, &mapping->i_mmap);
115         flush_dcache_mmap_unlock(mapping);
116 }
117
118 /*
119  * Unlink a file-based vm structure from its interval tree, to hide
120  * vma from rmap and vmtruncate before freeing its page tables.
121  */
122 void unlink_file_vma(struct vm_area_struct *vma)
123 {
124         struct file *file = vma->vm_file;
125
126         if (file) {
127                 struct address_space *mapping = file->f_mapping;
128                 i_mmap_lock_write(mapping);
129                 __remove_shared_vm_struct(vma, file, mapping);
130                 i_mmap_unlock_write(mapping);
131         }
132 }
133
134 /*
135  * Close a vm structure and free it.
136  */
137 static void remove_vma(struct vm_area_struct *vma, bool unreachable)
138 {
139         might_sleep();
140         if (vma->vm_ops && vma->vm_ops->close)
141                 vma->vm_ops->close(vma);
142         if (vma->vm_file)
143                 fput(vma->vm_file);
144         mpol_put(vma_policy(vma));
145         if (unreachable)
146                 __vm_area_free(vma);
147         else
148                 vm_area_free(vma);
149 }
150
151 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
152                                                     unsigned long min)
153 {
154         return mas_prev(&vmi->mas, min);
155 }
156
157 static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
158                         unsigned long start, unsigned long end, gfp_t gfp)
159 {
160         vmi->mas.index = start;
161         vmi->mas.last = end - 1;
162         mas_store_gfp(&vmi->mas, NULL, gfp);
163         if (unlikely(mas_is_err(&vmi->mas)))
164                 return -ENOMEM;
165
166         return 0;
167 }
168
169 /*
170  * check_brk_limits() - Use platform specific check of range & verify mlock
171  * limits.
172  * @addr: The address to check
173  * @len: The size of increase.
174  *
175  * Return: 0 on success.
176  */
177 static int check_brk_limits(unsigned long addr, unsigned long len)
178 {
179         unsigned long mapped_addr;
180
181         mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
182         if (IS_ERR_VALUE(mapped_addr))
183                 return mapped_addr;
184
185         return mlock_future_ok(current->mm, current->mm->def_flags, len)
186                 ? 0 : -EAGAIN;
187 }
188 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
189                 unsigned long addr, unsigned long request, unsigned long flags);
190 SYSCALL_DEFINE1(brk, unsigned long, brk)
191 {
192         unsigned long newbrk, oldbrk, origbrk;
193         struct mm_struct *mm = current->mm;
194         struct vm_area_struct *brkvma, *next = NULL;
195         unsigned long min_brk;
196         bool populate = false;
197         LIST_HEAD(uf);
198         struct vma_iterator vmi;
199
200         if (mmap_write_lock_killable(mm))
201                 return -EINTR;
202
203         origbrk = mm->brk;
204
205 #ifdef CONFIG_COMPAT_BRK
206         /*
207          * CONFIG_COMPAT_BRK can still be overridden by setting
208          * randomize_va_space to 2, which will still cause mm->start_brk
209          * to be arbitrarily shifted
210          */
211         if (current->brk_randomized)
212                 min_brk = mm->start_brk;
213         else
214                 min_brk = mm->end_data;
215 #else
216         min_brk = mm->start_brk;
217 #endif
218         if (brk < min_brk)
219                 goto out;
220
221         /*
222          * Check against rlimit here. If this check is done later after the test
223          * of oldbrk with newbrk then it can escape the test and let the data
224          * segment grow beyond its set limit the in case where the limit is
225          * not page aligned -Ram Gupta
226          */
227         if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
228                               mm->end_data, mm->start_data))
229                 goto out;
230
231         newbrk = PAGE_ALIGN(brk);
232         oldbrk = PAGE_ALIGN(mm->brk);
233         if (oldbrk == newbrk) {
234                 mm->brk = brk;
235                 goto success;
236         }
237
238         /* Always allow shrinking brk. */
239         if (brk <= mm->brk) {
240                 /* Search one past newbrk */
241                 vma_iter_init(&vmi, mm, newbrk);
242                 brkvma = vma_find(&vmi, oldbrk);
243                 if (!brkvma || brkvma->vm_start >= oldbrk)
244                         goto out; /* mapping intersects with an existing non-brk vma. */
245                 /*
246                  * mm->brk must be protected by write mmap_lock.
247                  * do_vma_munmap() will drop the lock on success,  so update it
248                  * before calling do_vma_munmap().
249                  */
250                 mm->brk = brk;
251                 if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true))
252                         goto out;
253
254                 goto success_unlocked;
255         }
256
257         if (check_brk_limits(oldbrk, newbrk - oldbrk))
258                 goto out;
259
260         /*
261          * Only check if the next VMA is within the stack_guard_gap of the
262          * expansion area
263          */
264         vma_iter_init(&vmi, mm, oldbrk);
265         next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap);
266         if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
267                 goto out;
268
269         brkvma = vma_prev_limit(&vmi, mm->start_brk);
270         /* Ok, looks good - let it rip. */
271         if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
272                 goto out;
273
274         mm->brk = brk;
275         if (mm->def_flags & VM_LOCKED)
276                 populate = true;
277
278 success:
279         mmap_write_unlock(mm);
280 success_unlocked:
281         userfaultfd_unmap_complete(mm, &uf);
282         if (populate)
283                 mm_populate(oldbrk, newbrk - oldbrk);
284         return brk;
285
286 out:
287         mm->brk = origbrk;
288         mmap_write_unlock(mm);
289         return origbrk;
290 }
291
292 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
293 static void validate_mm(struct mm_struct *mm)
294 {
295         int bug = 0;
296         int i = 0;
297         struct vm_area_struct *vma;
298         VMA_ITERATOR(vmi, mm, 0);
299
300         mt_validate(&mm->mm_mt);
301         for_each_vma(vmi, vma) {
302 #ifdef CONFIG_DEBUG_VM_RB
303                 struct anon_vma *anon_vma = vma->anon_vma;
304                 struct anon_vma_chain *avc;
305 #endif
306                 unsigned long vmi_start, vmi_end;
307                 bool warn = 0;
308
309                 vmi_start = vma_iter_addr(&vmi);
310                 vmi_end = vma_iter_end(&vmi);
311                 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
312                         warn = 1;
313
314                 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
315                         warn = 1;
316
317                 if (warn) {
318                         pr_emerg("issue in %s\n", current->comm);
319                         dump_stack();
320                         dump_vma(vma);
321                         pr_emerg("tree range: %px start %lx end %lx\n", vma,
322                                  vmi_start, vmi_end - 1);
323                         vma_iter_dump_tree(&vmi);
324                 }
325
326 #ifdef CONFIG_DEBUG_VM_RB
327                 if (anon_vma) {
328                         anon_vma_lock_read(anon_vma);
329                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
330                                 anon_vma_interval_tree_verify(avc);
331                         anon_vma_unlock_read(anon_vma);
332                 }
333 #endif
334                 i++;
335         }
336         if (i != mm->map_count) {
337                 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
338                 bug = 1;
339         }
340         VM_BUG_ON_MM(bug, mm);
341 }
342
343 #else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
344 #define validate_mm(mm) do { } while (0)
345 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
346
347 /*
348  * vma has some anon_vma assigned, and is already inserted on that
349  * anon_vma's interval trees.
350  *
351  * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
352  * vma must be removed from the anon_vma's interval trees using
353  * anon_vma_interval_tree_pre_update_vma().
354  *
355  * After the update, the vma will be reinserted using
356  * anon_vma_interval_tree_post_update_vma().
357  *
358  * The entire update must be protected by exclusive mmap_lock and by
359  * the root anon_vma's mutex.
360  */
361 static inline void
362 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
363 {
364         struct anon_vma_chain *avc;
365
366         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
367                 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
368 }
369
370 static inline void
371 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
372 {
373         struct anon_vma_chain *avc;
374
375         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
376                 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
377 }
378
379 static unsigned long count_vma_pages_range(struct mm_struct *mm,
380                 unsigned long addr, unsigned long end)
381 {
382         VMA_ITERATOR(vmi, mm, addr);
383         struct vm_area_struct *vma;
384         unsigned long nr_pages = 0;
385
386         for_each_vma_range(vmi, vma, end) {
387                 unsigned long vm_start = max(addr, vma->vm_start);
388                 unsigned long vm_end = min(end, vma->vm_end);
389
390                 nr_pages += PHYS_PFN(vm_end - vm_start);
391         }
392
393         return nr_pages;
394 }
395
396 static void __vma_link_file(struct vm_area_struct *vma,
397                             struct address_space *mapping)
398 {
399         if (vma->vm_flags & VM_SHARED)
400                 mapping_allow_writable(mapping);
401
402         flush_dcache_mmap_lock(mapping);
403         vma_interval_tree_insert(vma, &mapping->i_mmap);
404         flush_dcache_mmap_unlock(mapping);
405 }
406
407 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
408 {
409         VMA_ITERATOR(vmi, mm, 0);
410         struct address_space *mapping = NULL;
411
412         if (vma_iter_prealloc(&vmi))
413                 return -ENOMEM;
414
415         vma_iter_store(&vmi, vma);
416
417         if (vma->vm_file) {
418                 mapping = vma->vm_file->f_mapping;
419                 i_mmap_lock_write(mapping);
420                 __vma_link_file(vma, mapping);
421                 i_mmap_unlock_write(mapping);
422         }
423
424         mm->map_count++;
425         validate_mm(mm);
426         return 0;
427 }
428
429 /*
430  * init_multi_vma_prep() - Initializer for struct vma_prepare
431  * @vp: The vma_prepare struct
432  * @vma: The vma that will be altered once locked
433  * @next: The next vma if it is to be adjusted
434  * @remove: The first vma to be removed
435  * @remove2: The second vma to be removed
436  */
437 static inline void init_multi_vma_prep(struct vma_prepare *vp,
438                 struct vm_area_struct *vma, struct vm_area_struct *next,
439                 struct vm_area_struct *remove, struct vm_area_struct *remove2)
440 {
441         memset(vp, 0, sizeof(struct vma_prepare));
442         vp->vma = vma;
443         vp->anon_vma = vma->anon_vma;
444         vp->remove = remove;
445         vp->remove2 = remove2;
446         vp->adj_next = next;
447         if (!vp->anon_vma && next)
448                 vp->anon_vma = next->anon_vma;
449
450         vp->file = vma->vm_file;
451         if (vp->file)
452                 vp->mapping = vma->vm_file->f_mapping;
453
454 }
455
456 /*
457  * init_vma_prep() - Initializer wrapper for vma_prepare struct
458  * @vp: The vma_prepare struct
459  * @vma: The vma that will be altered once locked
460  */
461 static inline void init_vma_prep(struct vma_prepare *vp,
462                                  struct vm_area_struct *vma)
463 {
464         init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
465 }
466
467
468 /*
469  * vma_prepare() - Helper function for handling locking VMAs prior to altering
470  * @vp: The initialized vma_prepare struct
471  */
472 static inline void vma_prepare(struct vma_prepare *vp)
473 {
474         vma_start_write(vp->vma);
475         if (vp->adj_next)
476                 vma_start_write(vp->adj_next);
477         /* vp->insert is always a newly created VMA, no need for locking */
478         if (vp->remove)
479                 vma_start_write(vp->remove);
480         if (vp->remove2)
481                 vma_start_write(vp->remove2);
482
483         if (vp->file) {
484                 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
485
486                 if (vp->adj_next)
487                         uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
488                                       vp->adj_next->vm_end);
489
490                 i_mmap_lock_write(vp->mapping);
491                 if (vp->insert && vp->insert->vm_file) {
492                         /*
493                          * Put into interval tree now, so instantiated pages
494                          * are visible to arm/parisc __flush_dcache_page
495                          * throughout; but we cannot insert into address
496                          * space until vma start or end is updated.
497                          */
498                         __vma_link_file(vp->insert,
499                                         vp->insert->vm_file->f_mapping);
500                 }
501         }
502
503         if (vp->anon_vma) {
504                 anon_vma_lock_write(vp->anon_vma);
505                 anon_vma_interval_tree_pre_update_vma(vp->vma);
506                 if (vp->adj_next)
507                         anon_vma_interval_tree_pre_update_vma(vp->adj_next);
508         }
509
510         if (vp->file) {
511                 flush_dcache_mmap_lock(vp->mapping);
512                 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
513                 if (vp->adj_next)
514                         vma_interval_tree_remove(vp->adj_next,
515                                                  &vp->mapping->i_mmap);
516         }
517
518 }
519
520 /*
521  * vma_complete- Helper function for handling the unlocking after altering VMAs,
522  * or for inserting a VMA.
523  *
524  * @vp: The vma_prepare struct
525  * @vmi: The vma iterator
526  * @mm: The mm_struct
527  */
528 static inline void vma_complete(struct vma_prepare *vp,
529                                 struct vma_iterator *vmi, struct mm_struct *mm)
530 {
531         if (vp->file) {
532                 if (vp->adj_next)
533                         vma_interval_tree_insert(vp->adj_next,
534                                                  &vp->mapping->i_mmap);
535                 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
536                 flush_dcache_mmap_unlock(vp->mapping);
537         }
538
539         if (vp->remove && vp->file) {
540                 __remove_shared_vm_struct(vp->remove, vp->file, vp->mapping);
541                 if (vp->remove2)
542                         __remove_shared_vm_struct(vp->remove2, vp->file,
543                                                   vp->mapping);
544         } else if (vp->insert) {
545                 /*
546                  * split_vma has split insert from vma, and needs
547                  * us to insert it before dropping the locks
548                  * (it may either follow vma or precede it).
549                  */
550                 vma_iter_store(vmi, vp->insert);
551                 mm->map_count++;
552         }
553
554         if (vp->anon_vma) {
555                 anon_vma_interval_tree_post_update_vma(vp->vma);
556                 if (vp->adj_next)
557                         anon_vma_interval_tree_post_update_vma(vp->adj_next);
558                 anon_vma_unlock_write(vp->anon_vma);
559         }
560
561         if (vp->file) {
562                 i_mmap_unlock_write(vp->mapping);
563                 uprobe_mmap(vp->vma);
564
565                 if (vp->adj_next)
566                         uprobe_mmap(vp->adj_next);
567         }
568
569         if (vp->remove) {
570 again:
571                 vma_mark_detached(vp->remove, true);
572                 if (vp->file) {
573                         uprobe_munmap(vp->remove, vp->remove->vm_start,
574                                       vp->remove->vm_end);
575                         fput(vp->file);
576                 }
577                 if (vp->remove->anon_vma)
578                         anon_vma_merge(vp->vma, vp->remove);
579                 mm->map_count--;
580                 mpol_put(vma_policy(vp->remove));
581                 if (!vp->remove2)
582                         WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
583                 vm_area_free(vp->remove);
584
585                 /*
586                  * In mprotect's case 6 (see comments on vma_merge),
587                  * we are removing both mid and next vmas
588                  */
589                 if (vp->remove2) {
590                         vp->remove = vp->remove2;
591                         vp->remove2 = NULL;
592                         goto again;
593                 }
594         }
595         if (vp->insert && vp->file)
596                 uprobe_mmap(vp->insert);
597         validate_mm(mm);
598 }
599
600 /*
601  * dup_anon_vma() - Helper function to duplicate anon_vma
602  * @dst: The destination VMA
603  * @src: The source VMA
604  *
605  * Returns: 0 on success.
606  */
607 static inline int dup_anon_vma(struct vm_area_struct *dst,
608                                struct vm_area_struct *src)
609 {
610         /*
611          * Easily overlooked: when mprotect shifts the boundary, make sure the
612          * expanding vma has anon_vma set if the shrinking vma had, to cover any
613          * anon pages imported.
614          */
615         if (src->anon_vma && !dst->anon_vma) {
616                 vma_start_write(dst);
617                 dst->anon_vma = src->anon_vma;
618                 return anon_vma_clone(dst, src);
619         }
620
621         return 0;
622 }
623
624 /*
625  * vma_expand - Expand an existing VMA
626  *
627  * @vmi: The vma iterator
628  * @vma: The vma to expand
629  * @start: The start of the vma
630  * @end: The exclusive end of the vma
631  * @pgoff: The page offset of vma
632  * @next: The current of next vma.
633  *
634  * Expand @vma to @start and @end.  Can expand off the start and end.  Will
635  * expand over @next if it's different from @vma and @end == @next->vm_end.
636  * Checking if the @vma can expand and merge with @next needs to be handled by
637  * the caller.
638  *
639  * Returns: 0 on success
640  */
641 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
642                unsigned long start, unsigned long end, pgoff_t pgoff,
643                struct vm_area_struct *next)
644 {
645         bool remove_next = false;
646         struct vma_prepare vp;
647
648         if (next && (vma != next) && (end == next->vm_end)) {
649                 int ret;
650
651                 remove_next = true;
652                 ret = dup_anon_vma(vma, next);
653                 if (ret)
654                         return ret;
655         }
656
657         init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL);
658         /* Not merging but overwriting any part of next is not handled. */
659         VM_WARN_ON(next && !vp.remove &&
660                   next != vma && end > next->vm_start);
661         /* Only handles expanding */
662         VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
663
664         if (vma_iter_prealloc(vmi))
665                 goto nomem;
666
667         vma_prepare(&vp);
668         vma_adjust_trans_huge(vma, start, end, 0);
669         /* VMA iterator points to previous, so set to start if necessary */
670         if (vma_iter_addr(vmi) != start)
671                 vma_iter_set(vmi, start);
672
673         vma->vm_start = start;
674         vma->vm_end = end;
675         vma->vm_pgoff = pgoff;
676         /* Note: mas must be pointing to the expanding VMA */
677         vma_iter_store(vmi, vma);
678
679         vma_complete(&vp, vmi, vma->vm_mm);
680         return 0;
681
682 nomem:
683         return -ENOMEM;
684 }
685
686 /*
687  * vma_shrink() - Reduce an existing VMAs memory area
688  * @vmi: The vma iterator
689  * @vma: The VMA to modify
690  * @start: The new start
691  * @end: The new end
692  *
693  * Returns: 0 on success, -ENOMEM otherwise
694  */
695 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
696                unsigned long start, unsigned long end, pgoff_t pgoff)
697 {
698         struct vma_prepare vp;
699
700         WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
701
702         if (vma_iter_prealloc(vmi))
703                 return -ENOMEM;
704
705         init_vma_prep(&vp, vma);
706         vma_prepare(&vp);
707         vma_adjust_trans_huge(vma, start, end, 0);
708
709         if (vma->vm_start < start)
710                 vma_iter_clear(vmi, vma->vm_start, start);
711
712         if (vma->vm_end > end)
713                 vma_iter_clear(vmi, end, vma->vm_end);
714
715         vma->vm_start = start;
716         vma->vm_end = end;
717         vma->vm_pgoff = pgoff;
718         vma_complete(&vp, vmi, vma->vm_mm);
719         return 0;
720 }
721
722 /*
723  * If the vma has a ->close operation then the driver probably needs to release
724  * per-vma resources, so we don't attempt to merge those if the caller indicates
725  * the current vma may be removed as part of the merge.
726  */
727 static inline bool is_mergeable_vma(struct vm_area_struct *vma,
728                 struct file *file, unsigned long vm_flags,
729                 struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
730                 struct anon_vma_name *anon_name, bool may_remove_vma)
731 {
732         /*
733          * VM_SOFTDIRTY should not prevent from VMA merging, if we
734          * match the flags but dirty bit -- the caller should mark
735          * merged VMA as dirty. If dirty bit won't be excluded from
736          * comparison, we increase pressure on the memory system forcing
737          * the kernel to generate new VMAs when old one could be
738          * extended instead.
739          */
740         if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
741                 return false;
742         if (vma->vm_file != file)
743                 return false;
744         if (may_remove_vma && vma->vm_ops && vma->vm_ops->close)
745                 return false;
746         if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
747                 return false;
748         if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
749                 return false;
750         return true;
751 }
752
753 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
754                  struct anon_vma *anon_vma2, struct vm_area_struct *vma)
755 {
756         /*
757          * The list_is_singular() test is to avoid merging VMA cloned from
758          * parents. This can improve scalability caused by anon_vma lock.
759          */
760         if ((!anon_vma1 || !anon_vma2) && (!vma ||
761                 list_is_singular(&vma->anon_vma_chain)))
762                 return true;
763         return anon_vma1 == anon_vma2;
764 }
765
766 /*
767  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
768  * in front of (at a lower virtual address and file offset than) the vma.
769  *
770  * We cannot merge two vmas if they have differently assigned (non-NULL)
771  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
772  *
773  * We don't check here for the merged mmap wrapping around the end of pagecache
774  * indices (16TB on ia32) because do_mmap() does not permit mmap's which
775  * wrap, nor mmaps which cover the final page at index -1UL.
776  *
777  * We assume the vma may be removed as part of the merge.
778  */
779 static bool
780 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
781                 struct anon_vma *anon_vma, struct file *file,
782                 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
783                 struct anon_vma_name *anon_name)
784 {
785         if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) &&
786             is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
787                 if (vma->vm_pgoff == vm_pgoff)
788                         return true;
789         }
790         return false;
791 }
792
793 /*
794  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
795  * beyond (at a higher virtual address and file offset than) the vma.
796  *
797  * We cannot merge two vmas if they have differently assigned (non-NULL)
798  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
799  *
800  * We assume that vma is not removed as part of the merge.
801  */
802 static bool
803 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
804                 struct anon_vma *anon_vma, struct file *file,
805                 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
806                 struct anon_vma_name *anon_name)
807 {
808         if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) &&
809             is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
810                 pgoff_t vm_pglen;
811                 vm_pglen = vma_pages(vma);
812                 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
813                         return true;
814         }
815         return false;
816 }
817
818 /*
819  * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
820  * figure out whether that can be merged with its predecessor or its
821  * successor.  Or both (it neatly fills a hole).
822  *
823  * In most cases - when called for mmap, brk or mremap - [addr,end) is
824  * certain not to be mapped by the time vma_merge is called; but when
825  * called for mprotect, it is certain to be already mapped (either at
826  * an offset within prev, or at the start of next), and the flags of
827  * this area are about to be changed to vm_flags - and the no-change
828  * case has already been eliminated.
829  *
830  * The following mprotect cases have to be considered, where **** is
831  * the area passed down from mprotect_fixup, never extending beyond one
832  * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts
833  * at the same address as **** and is of the same or larger span, and
834  * NNNN the next vma after ****:
835  *
836  *     ****             ****                   ****
837  *    PPPPPPNNNNNN    PPPPPPNNNNNN       PPPPPPCCCCCC
838  *    cannot merge    might become       might become
839  *                    PPNNNNNNNNNN       PPPPPPPPPPCC
840  *    mmap, brk or    case 4 below       case 5 below
841  *    mremap move:
842  *                        ****               ****
843  *                    PPPP    NNNN       PPPPCCCCNNNN
844  *                    might become       might become
845  *                    PPPPPPPPPPPP 1 or  PPPPPPPPPPPP 6 or
846  *                    PPPPPPPPNNNN 2 or  PPPPPPPPNNNN 7 or
847  *                    PPPPNNNNNNNN 3     PPPPNNNNNNNN 8
848  *
849  * It is important for case 8 that the vma CCCC overlapping the
850  * region **** is never going to extended over NNNN. Instead NNNN must
851  * be extended in region **** and CCCC must be removed. This way in
852  * all cases where vma_merge succeeds, the moment vma_merge drops the
853  * rmap_locks, the properties of the merged vma will be already
854  * correct for the whole merged range. Some of those properties like
855  * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
856  * be correct for the whole merged range immediately after the
857  * rmap_locks are released. Otherwise if NNNN would be removed and
858  * CCCC would be extended over the NNNN range, remove_migration_ptes
859  * or other rmap walkers (if working on addresses beyond the "end"
860  * parameter) may establish ptes with the wrong permissions of CCCC
861  * instead of the right permissions of NNNN.
862  *
863  * In the code below:
864  * PPPP is represented by *prev
865  * CCCC is represented by *curr or not represented at all (NULL)
866  * NNNN is represented by *next or not represented at all (NULL)
867  * **** is not represented - it will be merged and the vma containing the
868  *      area is returned, or the function will return NULL
869  */
870 struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
871                         struct vm_area_struct *prev, unsigned long addr,
872                         unsigned long end, unsigned long vm_flags,
873                         struct anon_vma *anon_vma, struct file *file,
874                         pgoff_t pgoff, struct mempolicy *policy,
875                         struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
876                         struct anon_vma_name *anon_name)
877 {
878         struct vm_area_struct *curr, *next, *res;
879         struct vm_area_struct *vma, *adjust, *remove, *remove2;
880         struct vma_prepare vp;
881         pgoff_t vma_pgoff;
882         int err = 0;
883         bool merge_prev = false;
884         bool merge_next = false;
885         bool vma_expanded = false;
886         unsigned long vma_start = addr;
887         unsigned long vma_end = end;
888         pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
889         long adj_start = 0;
890
891         /*
892          * We later require that vma->vm_flags == vm_flags,
893          * so this tests vma->vm_flags & VM_SPECIAL, too.
894          */
895         if (vm_flags & VM_SPECIAL)
896                 return NULL;
897
898         /* Does the input range span an existing VMA? (cases 5 - 8) */
899         curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end);
900
901         if (!curr ||                    /* cases 1 - 4 */
902             end == curr->vm_end)        /* cases 6 - 8, adjacent VMA */
903                 next = vma_lookup(mm, end);
904         else
905                 next = NULL;            /* case 5 */
906
907         if (prev) {
908                 vma_start = prev->vm_start;
909                 vma_pgoff = prev->vm_pgoff;
910
911                 /* Can we merge the predecessor? */
912                 if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy)
913                     && can_vma_merge_after(prev, vm_flags, anon_vma, file,
914                                            pgoff, vm_userfaultfd_ctx, anon_name)) {
915                         merge_prev = true;
916                         vma_prev(vmi);
917                 }
918         }
919
920         /* Can we merge the successor? */
921         if (next && mpol_equal(policy, vma_policy(next)) &&
922             can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen,
923                                  vm_userfaultfd_ctx, anon_name)) {
924                 merge_next = true;
925         }
926
927         /* Verify some invariant that must be enforced by the caller. */
928         VM_WARN_ON(prev && addr <= prev->vm_start);
929         VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
930         VM_WARN_ON(addr >= end);
931
932         if (!merge_prev && !merge_next)
933                 return NULL; /* Not mergeable. */
934
935         res = vma = prev;
936         remove = remove2 = adjust = NULL;
937
938         /* Can we merge both the predecessor and the successor? */
939         if (merge_prev && merge_next &&
940             is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
941                 remove = next;                          /* case 1 */
942                 vma_end = next->vm_end;
943                 err = dup_anon_vma(prev, next);
944                 if (curr) {                             /* case 6 */
945                         remove = curr;
946                         remove2 = next;
947                         if (!next->anon_vma)
948                                 err = dup_anon_vma(prev, curr);
949                 }
950         } else if (merge_prev) {                        /* case 2 */
951                 if (curr) {
952                         err = dup_anon_vma(prev, curr);
953                         if (end == curr->vm_end) {      /* case 7 */
954                                 remove = curr;
955                         } else {                        /* case 5 */
956                                 adjust = curr;
957                                 adj_start = (end - curr->vm_start);
958                         }
959                 }
960         } else { /* merge_next */
961                 res = next;
962                 if (prev && addr < prev->vm_end) {      /* case 4 */
963                         vma_end = addr;
964                         adjust = next;
965                         adj_start = -(prev->vm_end - addr);
966                         err = dup_anon_vma(next, prev);
967                 } else {
968                         /*
969                          * Note that cases 3 and 8 are the ONLY ones where prev
970                          * is permitted to be (but is not necessarily) NULL.
971                          */
972                         vma = next;                     /* case 3 */
973                         vma_start = addr;
974                         vma_end = next->vm_end;
975                         vma_pgoff = next->vm_pgoff - pglen;
976                         if (curr) {                     /* case 8 */
977                                 vma_pgoff = curr->vm_pgoff;
978                                 remove = curr;
979                                 err = dup_anon_vma(next, curr);
980                         }
981                 }
982         }
983
984         /* Error in anon_vma clone. */
985         if (err)
986                 return NULL;
987
988         if (vma_iter_prealloc(vmi))
989                 return NULL;
990
991         init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
992         VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
993                    vp.anon_vma != adjust->anon_vma);
994
995         vma_prepare(&vp);
996         vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
997         if (vma_start < vma->vm_start || vma_end > vma->vm_end)
998                 vma_expanded = true;
999
1000         vma->vm_start = vma_start;
1001         vma->vm_end = vma_end;
1002         vma->vm_pgoff = vma_pgoff;
1003
1004         if (vma_expanded)
1005                 vma_iter_store(vmi, vma);
1006
1007         if (adj_start) {
1008                 adjust->vm_start += adj_start;
1009                 adjust->vm_pgoff += adj_start >> PAGE_SHIFT;
1010                 if (adj_start < 0) {
1011                         WARN_ON(vma_expanded);
1012                         vma_iter_store(vmi, next);
1013                 }
1014         }
1015
1016         vma_complete(&vp, vmi, mm);
1017         khugepaged_enter_vma(res, vm_flags);
1018         return res;
1019 }
1020
1021 /*
1022  * Rough compatibility check to quickly see if it's even worth looking
1023  * at sharing an anon_vma.
1024  *
1025  * They need to have the same vm_file, and the flags can only differ
1026  * in things that mprotect may change.
1027  *
1028  * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1029  * we can merge the two vma's. For example, we refuse to merge a vma if
1030  * there is a vm_ops->close() function, because that indicates that the
1031  * driver is doing some kind of reference counting. But that doesn't
1032  * really matter for the anon_vma sharing case.
1033  */
1034 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1035 {
1036         return a->vm_end == b->vm_start &&
1037                 mpol_equal(vma_policy(a), vma_policy(b)) &&
1038                 a->vm_file == b->vm_file &&
1039                 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1040                 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1041 }
1042
1043 /*
1044  * Do some basic sanity checking to see if we can re-use the anon_vma
1045  * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1046  * the same as 'old', the other will be the new one that is trying
1047  * to share the anon_vma.
1048  *
1049  * NOTE! This runs with mmap_lock held for reading, so it is possible that
1050  * the anon_vma of 'old' is concurrently in the process of being set up
1051  * by another page fault trying to merge _that_. But that's ok: if it
1052  * is being set up, that automatically means that it will be a singleton
1053  * acceptable for merging, so we can do all of this optimistically. But
1054  * we do that READ_ONCE() to make sure that we never re-load the pointer.
1055  *
1056  * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1057  * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1058  * is to return an anon_vma that is "complex" due to having gone through
1059  * a fork).
1060  *
1061  * We also make sure that the two vma's are compatible (adjacent,
1062  * and with the same memory policies). That's all stable, even with just
1063  * a read lock on the mmap_lock.
1064  */
1065 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1066 {
1067         if (anon_vma_compatible(a, b)) {
1068                 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1069
1070                 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1071                         return anon_vma;
1072         }
1073         return NULL;
1074 }
1075
1076 /*
1077  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1078  * neighbouring vmas for a suitable anon_vma, before it goes off
1079  * to allocate a new anon_vma.  It checks because a repetitive
1080  * sequence of mprotects and faults may otherwise lead to distinct
1081  * anon_vmas being allocated, preventing vma merge in subsequent
1082  * mprotect.
1083  */
1084 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1085 {
1086         MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end);
1087         struct anon_vma *anon_vma = NULL;
1088         struct vm_area_struct *prev, *next;
1089
1090         /* Try next first. */
1091         next = mas_walk(&mas);
1092         if (next) {
1093                 anon_vma = reusable_anon_vma(next, vma, next);
1094                 if (anon_vma)
1095                         return anon_vma;
1096         }
1097
1098         prev = mas_prev(&mas, 0);
1099         VM_BUG_ON_VMA(prev != vma, vma);
1100         prev = mas_prev(&mas, 0);
1101         /* Try prev next. */
1102         if (prev)
1103                 anon_vma = reusable_anon_vma(prev, prev, vma);
1104
1105         /*
1106          * We might reach here with anon_vma == NULL if we can't find
1107          * any reusable anon_vma.
1108          * There's no absolute need to look only at touching neighbours:
1109          * we could search further afield for "compatible" anon_vmas.
1110          * But it would probably just be a waste of time searching,
1111          * or lead to too many vmas hanging off the same anon_vma.
1112          * We're trying to allow mprotect remerging later on,
1113          * not trying to minimize memory used for anon_vmas.
1114          */
1115         return anon_vma;
1116 }
1117
1118 /*
1119  * If a hint addr is less than mmap_min_addr change hint to be as
1120  * low as possible but still greater than mmap_min_addr
1121  */
1122 static inline unsigned long round_hint_to_min(unsigned long hint)
1123 {
1124         hint &= PAGE_MASK;
1125         if (((void *)hint != NULL) &&
1126             (hint < mmap_min_addr))
1127                 return PAGE_ALIGN(mmap_min_addr);
1128         return hint;
1129 }
1130
1131 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
1132                         unsigned long bytes)
1133 {
1134         unsigned long locked_pages, limit_pages;
1135
1136         if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1137                 return true;
1138
1139         locked_pages = bytes >> PAGE_SHIFT;
1140         locked_pages += mm->locked_vm;
1141
1142         limit_pages = rlimit(RLIMIT_MEMLOCK);
1143         limit_pages >>= PAGE_SHIFT;
1144
1145         return locked_pages <= limit_pages;
1146 }
1147
1148 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
1149 {
1150         if (S_ISREG(inode->i_mode))
1151                 return MAX_LFS_FILESIZE;
1152
1153         if (S_ISBLK(inode->i_mode))
1154                 return MAX_LFS_FILESIZE;
1155
1156         if (S_ISSOCK(inode->i_mode))
1157                 return MAX_LFS_FILESIZE;
1158
1159         /* Special "we do even unsigned file positions" case */
1160         if (file->f_mode & FMODE_UNSIGNED_OFFSET)
1161                 return 0;
1162
1163         /* Yes, random drivers might want more. But I'm tired of buggy drivers */
1164         return ULONG_MAX;
1165 }
1166
1167 static inline bool file_mmap_ok(struct file *file, struct inode *inode,
1168                                 unsigned long pgoff, unsigned long len)
1169 {
1170         u64 maxsize = file_mmap_size_max(file, inode);
1171
1172         if (maxsize && len > maxsize)
1173                 return false;
1174         maxsize -= len;
1175         if (pgoff > maxsize >> PAGE_SHIFT)
1176                 return false;
1177         return true;
1178 }
1179
1180 /*
1181  * The caller must write-lock current->mm->mmap_lock.
1182  */
1183 unsigned long do_mmap(struct file *file, unsigned long addr,
1184                         unsigned long len, unsigned long prot,
1185                         unsigned long flags, unsigned long pgoff,
1186                         unsigned long *populate, struct list_head *uf)
1187 {
1188         struct mm_struct *mm = current->mm;
1189         vm_flags_t vm_flags;
1190         int pkey = 0;
1191
1192         *populate = 0;
1193
1194         if (!len)
1195                 return -EINVAL;
1196
1197         /*
1198          * Does the application expect PROT_READ to imply PROT_EXEC?
1199          *
1200          * (the exception is when the underlying filesystem is noexec
1201          *  mounted, in which case we dont add PROT_EXEC.)
1202          */
1203         if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1204                 if (!(file && path_noexec(&file->f_path)))
1205                         prot |= PROT_EXEC;
1206
1207         /* force arch specific MAP_FIXED handling in get_unmapped_area */
1208         if (flags & MAP_FIXED_NOREPLACE)
1209                 flags |= MAP_FIXED;
1210
1211         if (!(flags & MAP_FIXED))
1212                 addr = round_hint_to_min(addr);
1213
1214         /* Careful about overflows.. */
1215         len = PAGE_ALIGN(len);
1216         if (!len)
1217                 return -ENOMEM;
1218
1219         /* offset overflow? */
1220         if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1221                 return -EOVERFLOW;
1222
1223         /* Too many mappings? */
1224         if (mm->map_count > sysctl_max_map_count)
1225                 return -ENOMEM;
1226
1227         /* Obtain the address to map to. we verify (or select) it and ensure
1228          * that it represents a valid section of the address space.
1229          */
1230         addr = get_unmapped_area(file, addr, len, pgoff, flags);
1231         if (IS_ERR_VALUE(addr))
1232                 return addr;
1233
1234         if (flags & MAP_FIXED_NOREPLACE) {
1235                 if (find_vma_intersection(mm, addr, addr + len))
1236                         return -EEXIST;
1237         }
1238
1239         if (prot == PROT_EXEC) {
1240                 pkey = execute_only_pkey(mm);
1241                 if (pkey < 0)
1242                         pkey = 0;
1243         }
1244
1245         /* Do simple checking here so the lower-level routines won't have
1246          * to. we assume access permissions have been handled by the open
1247          * of the memory object, so we don't do any here.
1248          */
1249         vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
1250                         mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1251
1252         if (flags & MAP_LOCKED)
1253                 if (!can_do_mlock())
1254                         return -EPERM;
1255
1256         if (!mlock_future_ok(mm, vm_flags, len))
1257                 return -EAGAIN;
1258
1259         if (file) {
1260                 struct inode *inode = file_inode(file);
1261                 unsigned long flags_mask;
1262
1263                 if (!file_mmap_ok(file, inode, pgoff, len))
1264                         return -EOVERFLOW;
1265
1266                 flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
1267
1268                 switch (flags & MAP_TYPE) {
1269                 case MAP_SHARED:
1270                         /*
1271                          * Force use of MAP_SHARED_VALIDATE with non-legacy
1272                          * flags. E.g. MAP_SYNC is dangerous to use with
1273                          * MAP_SHARED as you don't know which consistency model
1274                          * you will get. We silently ignore unsupported flags
1275                          * with MAP_SHARED to preserve backward compatibility.
1276                          */
1277                         flags &= LEGACY_MAP_MASK;
1278                         fallthrough;
1279                 case MAP_SHARED_VALIDATE:
1280                         if (flags & ~flags_mask)
1281                                 return -EOPNOTSUPP;
1282                         if (prot & PROT_WRITE) {
1283                                 if (!(file->f_mode & FMODE_WRITE))
1284                                         return -EACCES;
1285                                 if (IS_SWAPFILE(file->f_mapping->host))
1286                                         return -ETXTBSY;
1287                         }
1288
1289                         /*
1290                          * Make sure we don't allow writing to an append-only
1291                          * file..
1292                          */
1293                         if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1294                                 return -EACCES;
1295
1296                         vm_flags |= VM_SHARED | VM_MAYSHARE;
1297                         if (!(file->f_mode & FMODE_WRITE))
1298                                 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1299                         fallthrough;
1300                 case MAP_PRIVATE:
1301                         if (!(file->f_mode & FMODE_READ))
1302                                 return -EACCES;
1303                         if (path_noexec(&file->f_path)) {
1304                                 if (vm_flags & VM_EXEC)
1305                                         return -EPERM;
1306                                 vm_flags &= ~VM_MAYEXEC;
1307                         }
1308
1309                         if (!file->f_op->mmap)
1310                                 return -ENODEV;
1311                         if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1312                                 return -EINVAL;
1313                         break;
1314
1315                 default:
1316                         return -EINVAL;
1317                 }
1318         } else {
1319                 switch (flags & MAP_TYPE) {
1320                 case MAP_SHARED:
1321                         if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1322                                 return -EINVAL;
1323                         /*
1324                          * Ignore pgoff.
1325                          */
1326                         pgoff = 0;
1327                         vm_flags |= VM_SHARED | VM_MAYSHARE;
1328                         break;
1329                 case MAP_PRIVATE:
1330                         /*
1331                          * Set pgoff according to addr for anon_vma.
1332                          */
1333                         pgoff = addr >> PAGE_SHIFT;
1334                         break;
1335                 default:
1336                         return -EINVAL;
1337                 }
1338         }
1339
1340         /*
1341          * Set 'VM_NORESERVE' if we should not account for the
1342          * memory use of this mapping.
1343          */
1344         if (flags & MAP_NORESERVE) {
1345                 /* We honor MAP_NORESERVE if allowed to overcommit */
1346                 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1347                         vm_flags |= VM_NORESERVE;
1348
1349                 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1350                 if (file && is_file_hugepages(file))
1351                         vm_flags |= VM_NORESERVE;
1352         }
1353
1354         addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
1355         if (!IS_ERR_VALUE(addr) &&
1356             ((vm_flags & VM_LOCKED) ||
1357              (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1358                 *populate = len;
1359         return addr;
1360 }
1361
1362 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1363                               unsigned long prot, unsigned long flags,
1364                               unsigned long fd, unsigned long pgoff)
1365 {
1366         struct file *file = NULL;
1367         unsigned long retval;
1368
1369         if (!(flags & MAP_ANONYMOUS)) {
1370                 audit_mmap_fd(fd, flags);
1371                 file = fget(fd);
1372                 if (!file)
1373                         return -EBADF;
1374                 if (is_file_hugepages(file)) {
1375                         len = ALIGN(len, huge_page_size(hstate_file(file)));
1376                 } else if (unlikely(flags & MAP_HUGETLB)) {
1377                         retval = -EINVAL;
1378                         goto out_fput;
1379                 }
1380         } else if (flags & MAP_HUGETLB) {
1381                 struct hstate *hs;
1382
1383                 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1384                 if (!hs)
1385                         return -EINVAL;
1386
1387                 len = ALIGN(len, huge_page_size(hs));
1388                 /*
1389                  * VM_NORESERVE is used because the reservations will be
1390                  * taken when vm_ops->mmap() is called
1391                  */
1392                 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1393                                 VM_NORESERVE,
1394                                 HUGETLB_ANONHUGE_INODE,
1395                                 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1396                 if (IS_ERR(file))
1397                         return PTR_ERR(file);
1398         }
1399
1400         retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1401 out_fput:
1402         if (file)
1403                 fput(file);
1404         return retval;
1405 }
1406
1407 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1408                 unsigned long, prot, unsigned long, flags,
1409                 unsigned long, fd, unsigned long, pgoff)
1410 {
1411         return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1412 }
1413
1414 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1415 struct mmap_arg_struct {
1416         unsigned long addr;
1417         unsigned long len;
1418         unsigned long prot;
1419         unsigned long flags;
1420         unsigned long fd;
1421         unsigned long offset;
1422 };
1423
1424 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1425 {
1426         struct mmap_arg_struct a;
1427
1428         if (copy_from_user(&a, arg, sizeof(a)))
1429                 return -EFAULT;
1430         if (offset_in_page(a.offset))
1431                 return -EINVAL;
1432
1433         return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1434                                a.offset >> PAGE_SHIFT);
1435 }
1436 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1437
1438 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1439 {
1440         return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1441 }
1442
1443 static bool vma_is_shared_writable(struct vm_area_struct *vma)
1444 {
1445         return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1446                 (VM_WRITE | VM_SHARED);
1447 }
1448
1449 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1450 {
1451         /* No managed pages to writeback. */
1452         if (vma->vm_flags & VM_PFNMAP)
1453                 return false;
1454
1455         return vma->vm_file && vma->vm_file->f_mapping &&
1456                 mapping_can_writeback(vma->vm_file->f_mapping);
1457 }
1458
1459 /*
1460  * Does this VMA require the underlying folios to have their dirty state
1461  * tracked?
1462  */
1463 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1464 {
1465         /* Only shared, writable VMAs require dirty tracking. */
1466         if (!vma_is_shared_writable(vma))
1467                 return false;
1468
1469         /* Does the filesystem need to be notified? */
1470         if (vm_ops_needs_writenotify(vma->vm_ops))
1471                 return true;
1472
1473         /*
1474          * Even if the filesystem doesn't indicate a need for writenotify, if it
1475          * can writeback, dirty tracking is still required.
1476          */
1477         return vma_fs_can_writeback(vma);
1478 }
1479
1480 /*
1481  * Some shared mappings will want the pages marked read-only
1482  * to track write events. If so, we'll downgrade vm_page_prot
1483  * to the private version (using protection_map[] without the
1484  * VM_SHARED bit).
1485  */
1486 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1487 {
1488         /* If it was private or non-writable, the write bit is already clear */
1489         if (!vma_is_shared_writable(vma))
1490                 return 0;
1491
1492         /* The backer wishes to know when pages are first written to? */
1493         if (vm_ops_needs_writenotify(vma->vm_ops))
1494                 return 1;
1495
1496         /* The open routine did something to the protections that pgprot_modify
1497          * won't preserve? */
1498         if (pgprot_val(vm_page_prot) !=
1499             pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1500                 return 0;
1501
1502         /*
1503          * Do we need to track softdirty? hugetlb does not support softdirty
1504          * tracking yet.
1505          */
1506         if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1507                 return 1;
1508
1509         /* Do we need write faults for uffd-wp tracking? */
1510         if (userfaultfd_wp(vma))
1511                 return 1;
1512
1513         /* Can the mapping track the dirty pages? */
1514         return vma_fs_can_writeback(vma);
1515 }
1516
1517 /*
1518  * We account for memory if it's a private writeable mapping,
1519  * not hugepages and VM_NORESERVE wasn't set.
1520  */
1521 static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1522 {
1523         /*
1524          * hugetlb has its own accounting separate from the core VM
1525          * VM_HUGETLB may not be set yet so we cannot check for that flag.
1526          */
1527         if (file && is_file_hugepages(file))
1528                 return 0;
1529
1530         return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1531 }
1532
1533 /**
1534  * unmapped_area() - Find an area between the low_limit and the high_limit with
1535  * the correct alignment and offset, all from @info. Note: current->mm is used
1536  * for the search.
1537  *
1538  * @info: The unmapped area information including the range [low_limit -
1539  * high_limit), the alignment offset and mask.
1540  *
1541  * Return: A memory address or -ENOMEM.
1542  */
1543 static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1544 {
1545         unsigned long length, gap;
1546         unsigned long low_limit, high_limit;
1547         struct vm_area_struct *tmp;
1548
1549         MA_STATE(mas, &current->mm->mm_mt, 0, 0);
1550
1551         /* Adjust search length to account for worst case alignment overhead */
1552         length = info->length + info->align_mask;
1553         if (length < info->length)
1554                 return -ENOMEM;
1555
1556         low_limit = info->low_limit;
1557         if (low_limit < mmap_min_addr)
1558                 low_limit = mmap_min_addr;
1559         high_limit = info->high_limit;
1560 retry:
1561         if (mas_empty_area(&mas, low_limit, high_limit - 1, length))
1562                 return -ENOMEM;
1563
1564         gap = mas.index;
1565         gap += (info->align_offset - gap) & info->align_mask;
1566         tmp = mas_next(&mas, ULONG_MAX);
1567         if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
1568                 if (vm_start_gap(tmp) < gap + length - 1) {
1569                         low_limit = tmp->vm_end;
1570                         mas_reset(&mas);
1571                         goto retry;
1572                 }
1573         } else {
1574                 tmp = mas_prev(&mas, 0);
1575                 if (tmp && vm_end_gap(tmp) > gap) {
1576                         low_limit = vm_end_gap(tmp);
1577                         mas_reset(&mas);
1578                         goto retry;
1579                 }
1580         }
1581
1582         return gap;
1583 }
1584
1585 /**
1586  * unmapped_area_topdown() - Find an area between the low_limit and the
1587  * high_limit with the correct alignment and offset at the highest available
1588  * address, all from @info. Note: current->mm is used for the search.
1589  *
1590  * @info: The unmapped area information including the range [low_limit -
1591  * high_limit), the alignment offset and mask.
1592  *
1593  * Return: A memory address or -ENOMEM.
1594  */
1595 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1596 {
1597         unsigned long length, gap, gap_end;
1598         unsigned long low_limit, high_limit;
1599         struct vm_area_struct *tmp;
1600
1601         MA_STATE(mas, &current->mm->mm_mt, 0, 0);
1602         /* Adjust search length to account for worst case alignment overhead */
1603         length = info->length + info->align_mask;
1604         if (length < info->length)
1605                 return -ENOMEM;
1606
1607         low_limit = info->low_limit;
1608         if (low_limit < mmap_min_addr)
1609                 low_limit = mmap_min_addr;
1610         high_limit = info->high_limit;
1611 retry:
1612         if (mas_empty_area_rev(&mas, low_limit, high_limit - 1, length))
1613                 return -ENOMEM;
1614
1615         gap = mas.last + 1 - info->length;
1616         gap -= (gap - info->align_offset) & info->align_mask;
1617         gap_end = mas.last;
1618         tmp = mas_next(&mas, ULONG_MAX);
1619         if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
1620                 if (vm_start_gap(tmp) <= gap_end) {
1621                         high_limit = vm_start_gap(tmp);
1622                         mas_reset(&mas);
1623                         goto retry;
1624                 }
1625         } else {
1626                 tmp = mas_prev(&mas, 0);
1627                 if (tmp && vm_end_gap(tmp) > gap) {
1628                         high_limit = tmp->vm_start;
1629                         mas_reset(&mas);
1630                         goto retry;
1631                 }
1632         }
1633
1634         return gap;
1635 }
1636
1637 /*
1638  * Search for an unmapped address range.
1639  *
1640  * We are looking for a range that:
1641  * - does not intersect with any VMA;
1642  * - is contained within the [low_limit, high_limit) interval;
1643  * - is at least the desired size.
1644  * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
1645  */
1646 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
1647 {
1648         unsigned long addr;
1649
1650         if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
1651                 addr = unmapped_area_topdown(info);
1652         else
1653                 addr = unmapped_area(info);
1654
1655         trace_vm_unmapped_area(addr, info);
1656         return addr;
1657 }
1658
1659 /* Get an address range which is currently unmapped.
1660  * For shmat() with addr=0.
1661  *
1662  * Ugly calling convention alert:
1663  * Return value with the low bits set means error value,
1664  * ie
1665  *      if (ret & ~PAGE_MASK)
1666  *              error = ret;
1667  *
1668  * This function "knows" that -ENOMEM has the bits set.
1669  */
1670 unsigned long
1671 generic_get_unmapped_area(struct file *filp, unsigned long addr,
1672                           unsigned long len, unsigned long pgoff,
1673                           unsigned long flags)
1674 {
1675         struct mm_struct *mm = current->mm;
1676         struct vm_area_struct *vma, *prev;
1677         struct vm_unmapped_area_info info;
1678         const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
1679
1680         if (len > mmap_end - mmap_min_addr)
1681                 return -ENOMEM;
1682
1683         if (flags & MAP_FIXED)
1684                 return addr;
1685
1686         if (addr) {
1687                 addr = PAGE_ALIGN(addr);
1688                 vma = find_vma_prev(mm, addr, &prev);
1689                 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
1690                     (!vma || addr + len <= vm_start_gap(vma)) &&
1691                     (!prev || addr >= vm_end_gap(prev)))
1692                         return addr;
1693         }
1694
1695         info.flags = 0;
1696         info.length = len;
1697         info.low_limit = mm->mmap_base;
1698         info.high_limit = mmap_end;
1699         info.align_mask = 0;
1700         info.align_offset = 0;
1701         return vm_unmapped_area(&info);
1702 }
1703
1704 #ifndef HAVE_ARCH_UNMAPPED_AREA
1705 unsigned long
1706 arch_get_unmapped_area(struct file *filp, unsigned long addr,
1707                        unsigned long len, unsigned long pgoff,
1708                        unsigned long flags)
1709 {
1710         return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
1711 }
1712 #endif
1713
1714 /*
1715  * This mmap-allocator allocates new areas top-down from below the
1716  * stack's low limit (the base):
1717  */
1718 unsigned long
1719 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
1720                                   unsigned long len, unsigned long pgoff,
1721                                   unsigned long flags)
1722 {
1723         struct vm_area_struct *vma, *prev;
1724         struct mm_struct *mm = current->mm;
1725         struct vm_unmapped_area_info info;
1726         const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
1727
1728         /* requested length too big for entire address space */
1729         if (len > mmap_end - mmap_min_addr)
1730                 return -ENOMEM;
1731
1732         if (flags & MAP_FIXED)
1733                 return addr;
1734
1735         /* requesting a specific address */
1736         if (addr) {
1737                 addr = PAGE_ALIGN(addr);
1738                 vma = find_vma_prev(mm, addr, &prev);
1739                 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
1740                                 (!vma || addr + len <= vm_start_gap(vma)) &&
1741                                 (!prev || addr >= vm_end_gap(prev)))
1742                         return addr;
1743         }
1744
1745         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1746         info.length = len;
1747         info.low_limit = PAGE_SIZE;
1748         info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
1749         info.align_mask = 0;
1750         info.align_offset = 0;
1751         addr = vm_unmapped_area(&info);
1752
1753         /*
1754          * A failed mmap() very likely causes application failure,
1755          * so fall back to the bottom-up function here. This scenario
1756          * can happen with large stack limits and large mmap()
1757          * allocations.
1758          */
1759         if (offset_in_page(addr)) {
1760                 VM_BUG_ON(addr != -ENOMEM);
1761                 info.flags = 0;
1762                 info.low_limit = TASK_UNMAPPED_BASE;
1763                 info.high_limit = mmap_end;
1764                 addr = vm_unmapped_area(&info);
1765         }
1766
1767         return addr;
1768 }
1769
1770 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1771 unsigned long
1772 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
1773                                unsigned long len, unsigned long pgoff,
1774                                unsigned long flags)
1775 {
1776         return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
1777 }
1778 #endif
1779
1780 unsigned long
1781 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1782                 unsigned long pgoff, unsigned long flags)
1783 {
1784         unsigned long (*get_area)(struct file *, unsigned long,
1785                                   unsigned long, unsigned long, unsigned long);
1786
1787         unsigned long error = arch_mmap_check(addr, len, flags);
1788         if (error)
1789                 return error;
1790
1791         /* Careful about overflows.. */
1792         if (len > TASK_SIZE)
1793                 return -ENOMEM;
1794
1795         get_area = current->mm->get_unmapped_area;
1796         if (file) {
1797                 if (file->f_op->get_unmapped_area)
1798                         get_area = file->f_op->get_unmapped_area;
1799         } else if (flags & MAP_SHARED) {
1800                 /*
1801                  * mmap_region() will call shmem_zero_setup() to create a file,
1802                  * so use shmem's get_unmapped_area in case it can be huge.
1803                  * do_mmap() will clear pgoff, so match alignment.
1804                  */
1805                 pgoff = 0;
1806                 get_area = shmem_get_unmapped_area;
1807         }
1808
1809         addr = get_area(file, addr, len, pgoff, flags);
1810         if (IS_ERR_VALUE(addr))
1811                 return addr;
1812
1813         if (addr > TASK_SIZE - len)
1814                 return -ENOMEM;
1815         if (offset_in_page(addr))
1816                 return -EINVAL;
1817
1818         error = security_mmap_addr(addr);
1819         return error ? error : addr;
1820 }
1821
1822 EXPORT_SYMBOL(get_unmapped_area);
1823
1824 /**
1825  * find_vma_intersection() - Look up the first VMA which intersects the interval
1826  * @mm: The process address space.
1827  * @start_addr: The inclusive start user address.
1828  * @end_addr: The exclusive end user address.
1829  *
1830  * Returns: The first VMA within the provided range, %NULL otherwise.  Assumes
1831  * start_addr < end_addr.
1832  */
1833 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
1834                                              unsigned long start_addr,
1835                                              unsigned long end_addr)
1836 {
1837         unsigned long index = start_addr;
1838
1839         mmap_assert_locked(mm);
1840         return mt_find(&mm->mm_mt, &index, end_addr - 1);
1841 }
1842 EXPORT_SYMBOL(find_vma_intersection);
1843
1844 /**
1845  * find_vma() - Find the VMA for a given address, or the next VMA.
1846  * @mm: The mm_struct to check
1847  * @addr: The address
1848  *
1849  * Returns: The VMA associated with addr, or the next VMA.
1850  * May return %NULL in the case of no VMA at addr or above.
1851  */
1852 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1853 {
1854         unsigned long index = addr;
1855
1856         mmap_assert_locked(mm);
1857         return mt_find(&mm->mm_mt, &index, ULONG_MAX);
1858 }
1859 EXPORT_SYMBOL(find_vma);
1860
1861 /**
1862  * find_vma_prev() - Find the VMA for a given address, or the next vma and
1863  * set %pprev to the previous VMA, if any.
1864  * @mm: The mm_struct to check
1865  * @addr: The address
1866  * @pprev: The pointer to set to the previous VMA
1867  *
1868  * Note that RCU lock is missing here since the external mmap_lock() is used
1869  * instead.
1870  *
1871  * Returns: The VMA associated with @addr, or the next vma.
1872  * May return %NULL in the case of no vma at addr or above.
1873  */
1874 struct vm_area_struct *
1875 find_vma_prev(struct mm_struct *mm, unsigned long addr,
1876                         struct vm_area_struct **pprev)
1877 {
1878         struct vm_area_struct *vma;
1879         MA_STATE(mas, &mm->mm_mt, addr, addr);
1880
1881         vma = mas_walk(&mas);
1882         *pprev = mas_prev(&mas, 0);
1883         if (!vma)
1884                 vma = mas_next(&mas, ULONG_MAX);
1885         return vma;
1886 }
1887
1888 /*
1889  * Verify that the stack growth is acceptable and
1890  * update accounting. This is shared with both the
1891  * grow-up and grow-down cases.
1892  */
1893 static int acct_stack_growth(struct vm_area_struct *vma,
1894                              unsigned long size, unsigned long grow)
1895 {
1896         struct mm_struct *mm = vma->vm_mm;
1897         unsigned long new_start;
1898
1899         /* address space limit tests */
1900         if (!may_expand_vm(mm, vma->vm_flags, grow))
1901                 return -ENOMEM;
1902
1903         /* Stack limit test */
1904         if (size > rlimit(RLIMIT_STACK))
1905                 return -ENOMEM;
1906
1907         /* mlock limit tests */
1908         if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
1909                 return -ENOMEM;
1910
1911         /* Check to ensure the stack will not grow into a hugetlb-only region */
1912         new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1913                         vma->vm_end - size;
1914         if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1915                 return -EFAULT;
1916
1917         /*
1918          * Overcommit..  This must be the final test, as it will
1919          * update security statistics.
1920          */
1921         if (security_vm_enough_memory_mm(mm, grow))
1922                 return -ENOMEM;
1923
1924         return 0;
1925 }
1926
1927 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1928 /*
1929  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1930  * vma is the last one with address > vma->vm_end.  Have to extend vma.
1931  */
1932 static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1933 {
1934         struct mm_struct *mm = vma->vm_mm;
1935         struct vm_area_struct *next;
1936         unsigned long gap_addr;
1937         int error = 0;
1938         MA_STATE(mas, &mm->mm_mt, 0, 0);
1939
1940         if (!(vma->vm_flags & VM_GROWSUP))
1941                 return -EFAULT;
1942
1943         /* Guard against exceeding limits of the address space. */
1944         address &= PAGE_MASK;
1945         if (address >= (TASK_SIZE & PAGE_MASK))
1946                 return -ENOMEM;
1947         address += PAGE_SIZE;
1948
1949         /* Enforce stack_guard_gap */
1950         gap_addr = address + stack_guard_gap;
1951
1952         /* Guard against overflow */
1953         if (gap_addr < address || gap_addr > TASK_SIZE)
1954                 gap_addr = TASK_SIZE;
1955
1956         next = find_vma_intersection(mm, vma->vm_end, gap_addr);
1957         if (next && vma_is_accessible(next)) {
1958                 if (!(next->vm_flags & VM_GROWSUP))
1959                         return -ENOMEM;
1960                 /* Check that both stack segments have the same anon_vma? */
1961         }
1962
1963         if (mas_preallocate(&mas, GFP_KERNEL))
1964                 return -ENOMEM;
1965
1966         /* We must make sure the anon_vma is allocated. */
1967         if (unlikely(anon_vma_prepare(vma))) {
1968                 mas_destroy(&mas);
1969                 return -ENOMEM;
1970         }
1971
1972         /* Lock the VMA before expanding to prevent concurrent page faults */
1973         vma_start_write(vma);
1974         /*
1975          * vma->vm_start/vm_end cannot change under us because the caller
1976          * is required to hold the mmap_lock in read mode.  We need the
1977          * anon_vma lock to serialize against concurrent expand_stacks.
1978          */
1979         anon_vma_lock_write(vma->anon_vma);
1980
1981         /* Somebody else might have raced and expanded it already */
1982         if (address > vma->vm_end) {
1983                 unsigned long size, grow;
1984
1985                 size = address - vma->vm_start;
1986                 grow = (address - vma->vm_end) >> PAGE_SHIFT;
1987
1988                 error = -ENOMEM;
1989                 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
1990                         error = acct_stack_growth(vma, size, grow);
1991                         if (!error) {
1992                                 /*
1993                                  * We only hold a shared mmap_lock lock here, so
1994                                  * we need to protect against concurrent vma
1995                                  * expansions.  anon_vma_lock_write() doesn't
1996                                  * help here, as we don't guarantee that all
1997                                  * growable vmas in a mm share the same root
1998                                  * anon vma.  So, we reuse mm->page_table_lock
1999                                  * to guard against concurrent vma expansions.
2000                                  */
2001                                 spin_lock(&mm->page_table_lock);
2002                                 if (vma->vm_flags & VM_LOCKED)
2003                                         mm->locked_vm += grow;
2004                                 vm_stat_account(mm, vma->vm_flags, grow);
2005                                 anon_vma_interval_tree_pre_update_vma(vma);
2006                                 vma->vm_end = address;
2007                                 /* Overwrite old entry in mtree. */
2008                                 mas_set_range(&mas, vma->vm_start, address - 1);
2009                                 mas_store_prealloc(&mas, vma);
2010                                 anon_vma_interval_tree_post_update_vma(vma);
2011                                 spin_unlock(&mm->page_table_lock);
2012
2013                                 perf_event_mmap(vma);
2014                         }
2015                 }
2016         }
2017         anon_vma_unlock_write(vma->anon_vma);
2018         khugepaged_enter_vma(vma, vma->vm_flags);
2019         mas_destroy(&mas);
2020         validate_mm(mm);
2021         return error;
2022 }
2023 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
2024
2025 /*
2026  * vma is the first one with address < vma->vm_start.  Have to extend vma.
2027  * mmap_lock held for writing.
2028  */
2029 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
2030 {
2031         struct mm_struct *mm = vma->vm_mm;
2032         MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start);
2033         struct vm_area_struct *prev;
2034         int error = 0;
2035
2036         if (!(vma->vm_flags & VM_GROWSDOWN))
2037                 return -EFAULT;
2038
2039         address &= PAGE_MASK;
2040         if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
2041                 return -EPERM;
2042
2043         /* Enforce stack_guard_gap */
2044         prev = mas_prev(&mas, 0);
2045         /* Check that both stack segments have the same anon_vma? */
2046         if (prev) {
2047                 if (!(prev->vm_flags & VM_GROWSDOWN) &&
2048                     vma_is_accessible(prev) &&
2049                     (address - prev->vm_end < stack_guard_gap))
2050                         return -ENOMEM;
2051         }
2052
2053         if (mas_preallocate(&mas, GFP_KERNEL))
2054                 return -ENOMEM;
2055
2056         /* We must make sure the anon_vma is allocated. */
2057         if (unlikely(anon_vma_prepare(vma))) {
2058                 mas_destroy(&mas);
2059                 return -ENOMEM;
2060         }
2061
2062         /* Lock the VMA before expanding to prevent concurrent page faults */
2063         vma_start_write(vma);
2064         /*
2065          * vma->vm_start/vm_end cannot change under us because the caller
2066          * is required to hold the mmap_lock in read mode.  We need the
2067          * anon_vma lock to serialize against concurrent expand_stacks.
2068          */
2069         anon_vma_lock_write(vma->anon_vma);
2070
2071         /* Somebody else might have raced and expanded it already */
2072         if (address < vma->vm_start) {
2073                 unsigned long size, grow;
2074
2075                 size = vma->vm_end - address;
2076                 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2077
2078                 error = -ENOMEM;
2079                 if (grow <= vma->vm_pgoff) {
2080                         error = acct_stack_growth(vma, size, grow);
2081                         if (!error) {
2082                                 /*
2083                                  * We only hold a shared mmap_lock lock here, so
2084                                  * we need to protect against concurrent vma
2085                                  * expansions.  anon_vma_lock_write() doesn't
2086                                  * help here, as we don't guarantee that all
2087                                  * growable vmas in a mm share the same root
2088                                  * anon vma.  So, we reuse mm->page_table_lock
2089                                  * to guard against concurrent vma expansions.
2090                                  */
2091                                 spin_lock(&mm->page_table_lock);
2092                                 if (vma->vm_flags & VM_LOCKED)
2093                                         mm->locked_vm += grow;
2094                                 vm_stat_account(mm, vma->vm_flags, grow);
2095                                 anon_vma_interval_tree_pre_update_vma(vma);
2096                                 vma->vm_start = address;
2097                                 vma->vm_pgoff -= grow;
2098                                 /* Overwrite old entry in mtree. */
2099                                 mas_set_range(&mas, address, vma->vm_end - 1);
2100                                 mas_store_prealloc(&mas, vma);
2101                                 anon_vma_interval_tree_post_update_vma(vma);
2102                                 spin_unlock(&mm->page_table_lock);
2103
2104                                 perf_event_mmap(vma);
2105                         }
2106                 }
2107         }
2108         anon_vma_unlock_write(vma->anon_vma);
2109         khugepaged_enter_vma(vma, vma->vm_flags);
2110         mas_destroy(&mas);
2111         validate_mm(mm);
2112         return error;
2113 }
2114
2115 /* enforced gap between the expanding stack and other mappings. */
2116 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
2117
2118 static int __init cmdline_parse_stack_guard_gap(char *p)
2119 {
2120         unsigned long val;
2121         char *endptr;
2122
2123         val = simple_strtoul(p, &endptr, 10);
2124         if (!*endptr)
2125                 stack_guard_gap = val << PAGE_SHIFT;
2126
2127         return 1;
2128 }
2129 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
2130
2131 #ifdef CONFIG_STACK_GROWSUP
2132 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2133 {
2134         return expand_upwards(vma, address);
2135 }
2136
2137 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
2138 {
2139         struct vm_area_struct *vma, *prev;
2140
2141         addr &= PAGE_MASK;
2142         vma = find_vma_prev(mm, addr, &prev);
2143         if (vma && (vma->vm_start <= addr))
2144                 return vma;
2145         if (!prev)
2146                 return NULL;
2147         if (expand_stack_locked(prev, addr))
2148                 return NULL;
2149         if (prev->vm_flags & VM_LOCKED)
2150                 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2151         return prev;
2152 }
2153 #else
2154 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2155 {
2156         if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
2157                 return -EINVAL;
2158         return expand_downwards(vma, address);
2159 }
2160
2161 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
2162 {
2163         struct vm_area_struct *vma;
2164         unsigned long start;
2165
2166         addr &= PAGE_MASK;
2167         vma = find_vma(mm, addr);
2168         if (!vma)
2169                 return NULL;
2170         if (vma->vm_start <= addr)
2171                 return vma;
2172         start = vma->vm_start;
2173         if (expand_stack_locked(vma, addr))
2174                 return NULL;
2175         if (vma->vm_flags & VM_LOCKED)
2176                 populate_vma_page_range(vma, addr, start, NULL);
2177         return vma;
2178 }
2179 #endif
2180
2181 /*
2182  * IA64 has some horrid mapping rules: it can expand both up and down,
2183  * but with various special rules.
2184  *
2185  * We'll get rid of this architecture eventually, so the ugliness is
2186  * temporary.
2187  */
2188 #ifdef CONFIG_IA64
2189 static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr)
2190 {
2191         return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) &&
2192                 REGION_OFFSET(addr) < RGN_MAP_LIMIT;
2193 }
2194
2195 /*
2196  * IA64 stacks grow down, but there's a special register backing store
2197  * that can grow up. Only sequentially, though, so the new address must
2198  * match vm_end.
2199  */
2200 static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr)
2201 {
2202         if (!vma_expand_ok(vma, addr))
2203                 return -EFAULT;
2204         if (vma->vm_end != (addr & PAGE_MASK))
2205                 return -EFAULT;
2206         return expand_upwards(vma, addr);
2207 }
2208
2209 static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr)
2210 {
2211         if (!vma_expand_ok(vma, addr))
2212                 return -EFAULT;
2213         return expand_downwards(vma, addr);
2214 }
2215
2216 #elif defined(CONFIG_STACK_GROWSUP)
2217
2218 #define vma_expand_up(vma,addr) expand_upwards(vma, addr)
2219 #define vma_expand_down(vma, addr) (-EFAULT)
2220
2221 #else
2222
2223 #define vma_expand_up(vma,addr) (-EFAULT)
2224 #define vma_expand_down(vma, addr) expand_downwards(vma, addr)
2225
2226 #endif
2227
2228 /*
2229  * expand_stack(): legacy interface for page faulting. Don't use unless
2230  * you have to.
2231  *
2232  * This is called with the mm locked for reading, drops the lock, takes
2233  * the lock for writing, tries to look up a vma again, expands it if
2234  * necessary, and downgrades the lock to reading again.
2235  *
2236  * If no vma is found or it can't be expanded, it returns NULL and has
2237  * dropped the lock.
2238  */
2239 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
2240 {
2241         struct vm_area_struct *vma, *prev;
2242
2243         mmap_read_unlock(mm);
2244         if (mmap_write_lock_killable(mm))
2245                 return NULL;
2246
2247         vma = find_vma_prev(mm, addr, &prev);
2248         if (vma && vma->vm_start <= addr)
2249                 goto success;
2250
2251         if (prev && !vma_expand_up(prev, addr)) {
2252                 vma = prev;
2253                 goto success;
2254         }
2255
2256         if (vma && !vma_expand_down(vma, addr))
2257                 goto success;
2258
2259         mmap_write_unlock(mm);
2260         return NULL;
2261
2262 success:
2263         mmap_write_downgrade(mm);
2264         return vma;
2265 }
2266
2267 /*
2268  * Ok - we have the memory areas we should free on a maple tree so release them,
2269  * and do the vma updates.
2270  *
2271  * Called with the mm semaphore held.
2272  */
2273 static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
2274 {
2275         unsigned long nr_accounted = 0;
2276         struct vm_area_struct *vma;
2277
2278         /* Update high watermark before we lower total_vm */
2279         update_hiwater_vm(mm);
2280         mas_for_each(mas, vma, ULONG_MAX) {
2281                 long nrpages = vma_pages(vma);
2282
2283                 if (vma->vm_flags & VM_ACCOUNT)
2284                         nr_accounted += nrpages;
2285                 vm_stat_account(mm, vma->vm_flags, -nrpages);
2286                 remove_vma(vma, false);
2287         }
2288         vm_unacct_memory(nr_accounted);
2289 }
2290
2291 /*
2292  * Get rid of page table information in the indicated region.
2293  *
2294  * Called with the mm semaphore held.
2295  */
2296 static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
2297                 struct vm_area_struct *vma, struct vm_area_struct *prev,
2298                 struct vm_area_struct *next,
2299                 unsigned long start, unsigned long end, bool mm_wr_locked)
2300 {
2301         struct mmu_gather tlb;
2302
2303         lru_add_drain();
2304         tlb_gather_mmu(&tlb, mm);
2305         update_hiwater_rss(mm);
2306         unmap_vmas(&tlb, mt, vma, start, end, mm_wr_locked);
2307         free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2308                                  next ? next->vm_start : USER_PGTABLES_CEILING,
2309                                  mm_wr_locked);
2310         tlb_finish_mmu(&tlb);
2311 }
2312
2313 /*
2314  * __split_vma() bypasses sysctl_max_map_count checking.  We use this where it
2315  * has already been checked or doesn't make sense to fail.
2316  * VMA Iterator will point to the end VMA.
2317  */
2318 int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2319                 unsigned long addr, int new_below)
2320 {
2321         struct vma_prepare vp;
2322         struct vm_area_struct *new;
2323         int err;
2324
2325         WARN_ON(vma->vm_start >= addr);
2326         WARN_ON(vma->vm_end <= addr);
2327
2328         if (vma->vm_ops && vma->vm_ops->may_split) {
2329                 err = vma->vm_ops->may_split(vma, addr);
2330                 if (err)
2331                         return err;
2332         }
2333
2334         new = vm_area_dup(vma);
2335         if (!new)
2336                 return -ENOMEM;
2337
2338         err = -ENOMEM;
2339         if (vma_iter_prealloc(vmi))
2340                 goto out_free_vma;
2341
2342         if (new_below) {
2343                 new->vm_end = addr;
2344         } else {
2345                 new->vm_start = addr;
2346                 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2347         }
2348
2349         err = vma_dup_policy(vma, new);
2350         if (err)
2351                 goto out_free_vmi;
2352
2353         err = anon_vma_clone(new, vma);
2354         if (err)
2355                 goto out_free_mpol;
2356
2357         if (new->vm_file)
2358                 get_file(new->vm_file);
2359
2360         if (new->vm_ops && new->vm_ops->open)
2361                 new->vm_ops->open(new);
2362
2363         init_vma_prep(&vp, vma);
2364         vp.insert = new;
2365         vma_prepare(&vp);
2366         vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
2367
2368         if (new_below) {
2369                 vma->vm_start = addr;
2370                 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
2371         } else {
2372                 vma->vm_end = addr;
2373         }
2374
2375         /* vma_complete stores the new vma */
2376         vma_complete(&vp, vmi, vma->vm_mm);
2377
2378         /* Success. */
2379         if (new_below)
2380                 vma_next(vmi);
2381         return 0;
2382
2383 out_free_mpol:
2384         mpol_put(vma_policy(new));
2385 out_free_vmi:
2386         vma_iter_free(vmi);
2387 out_free_vma:
2388         vm_area_free(new);
2389         return err;
2390 }
2391
2392 /*
2393  * Split a vma into two pieces at address 'addr', a new vma is allocated
2394  * either for the first part or the tail.
2395  */
2396 int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2397               unsigned long addr, int new_below)
2398 {
2399         if (vma->vm_mm->map_count >= sysctl_max_map_count)
2400                 return -ENOMEM;
2401
2402         return __split_vma(vmi, vma, addr, new_below);
2403 }
2404
2405 /*
2406  * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
2407  * @vmi: The vma iterator
2408  * @vma: The starting vm_area_struct
2409  * @mm: The mm_struct
2410  * @start: The aligned start address to munmap.
2411  * @end: The aligned end address to munmap.
2412  * @uf: The userfaultfd list_head
2413  * @unlock: Set to true to drop the mmap_lock.  unlocking only happens on
2414  * success.
2415  *
2416  * Return: 0 on success and drops the lock if so directed, error and leaves the
2417  * lock held otherwise.
2418  */
2419 static int
2420 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
2421                     struct mm_struct *mm, unsigned long start,
2422                     unsigned long end, struct list_head *uf, bool unlock)
2423 {
2424         struct vm_area_struct *prev, *next = NULL;
2425         struct maple_tree mt_detach;
2426         int count = 0;
2427         int error = -ENOMEM;
2428         unsigned long locked_vm = 0;
2429         MA_STATE(mas_detach, &mt_detach, 0, 0);
2430         mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2431         mt_on_stack(mt_detach);
2432
2433         /*
2434          * If we need to split any vma, do it now to save pain later.
2435          *
2436          * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2437          * unmapped vm_area_struct will remain in use: so lower split_vma
2438          * places tmp vma above, and higher split_vma places tmp vma below.
2439          */
2440
2441         /* Does it split the first one? */
2442         if (start > vma->vm_start) {
2443
2444                 /*
2445                  * Make sure that map_count on return from munmap() will
2446                  * not exceed its limit; but let map_count go just above
2447                  * its limit temporarily, to help free resources as expected.
2448                  */
2449                 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2450                         goto map_count_exceeded;
2451
2452                 error = __split_vma(vmi, vma, start, 0);
2453                 if (error)
2454                         goto start_split_failed;
2455
2456                 vma = vma_iter_load(vmi);
2457         }
2458
2459         prev = vma_prev(vmi);
2460         if (unlikely((!prev)))
2461                 vma_iter_set(vmi, start);
2462
2463         /*
2464          * Detach a range of VMAs from the mm. Using next as a temp variable as
2465          * it is always overwritten.
2466          */
2467         for_each_vma_range(*vmi, next, end) {
2468                 /* Does it split the end? */
2469                 if (next->vm_end > end) {
2470                         error = __split_vma(vmi, next, end, 0);
2471                         if (error)
2472                                 goto end_split_failed;
2473                 }
2474                 vma_start_write(next);
2475                 mas_set_range(&mas_detach, next->vm_start, next->vm_end - 1);
2476                 error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
2477                 if (error)
2478                         goto munmap_gather_failed;
2479                 vma_mark_detached(next, true);
2480                 if (next->vm_flags & VM_LOCKED)
2481                         locked_vm += vma_pages(next);
2482
2483                 count++;
2484                 if (unlikely(uf)) {
2485                         /*
2486                          * If userfaultfd_unmap_prep returns an error the vmas
2487                          * will remain split, but userland will get a
2488                          * highly unexpected error anyway. This is no
2489                          * different than the case where the first of the two
2490                          * __split_vma fails, but we don't undo the first
2491                          * split, despite we could. This is unlikely enough
2492                          * failure that it's not worth optimizing it for.
2493                          */
2494                         error = userfaultfd_unmap_prep(next, start, end, uf);
2495
2496                         if (error)
2497                                 goto userfaultfd_error;
2498                 }
2499 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
2500                 BUG_ON(next->vm_start < start);
2501                 BUG_ON(next->vm_start > end);
2502 #endif
2503         }
2504
2505         if (vma_iter_end(vmi) > end)
2506                 next = vma_iter_load(vmi);
2507
2508         if (!next)
2509                 next = vma_next(vmi);
2510
2511 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
2512         /* Make sure no VMAs are about to be lost. */
2513         {
2514                 MA_STATE(test, &mt_detach, start, end - 1);
2515                 struct vm_area_struct *vma_mas, *vma_test;
2516                 int test_count = 0;
2517
2518                 vma_iter_set(vmi, start);
2519                 rcu_read_lock();
2520                 vma_test = mas_find(&test, end - 1);
2521                 for_each_vma_range(*vmi, vma_mas, end) {
2522                         BUG_ON(vma_mas != vma_test);
2523                         test_count++;
2524                         vma_test = mas_next(&test, end - 1);
2525                 }
2526                 rcu_read_unlock();
2527                 BUG_ON(count != test_count);
2528         }
2529 #endif
2530         vma_iter_set(vmi, start);
2531         error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
2532         if (error)
2533                 goto clear_tree_failed;
2534
2535         /* Point of no return */
2536         mm->locked_vm -= locked_vm;
2537         mm->map_count -= count;
2538         if (unlock)
2539                 mmap_write_downgrade(mm);
2540
2541         /*
2542          * We can free page tables without write-locking mmap_lock because VMAs
2543          * were isolated before we downgraded mmap_lock.
2544          */
2545         unmap_region(mm, &mt_detach, vma, prev, next, start, end, !unlock);
2546         /* Statistics and freeing VMAs */
2547         mas_set(&mas_detach, start);
2548         remove_mt(mm, &mas_detach);
2549         validate_mm(mm);
2550         if (unlock)
2551                 mmap_read_unlock(mm);
2552
2553         __mt_destroy(&mt_detach);
2554         return 0;
2555
2556 clear_tree_failed:
2557 userfaultfd_error:
2558 munmap_gather_failed:
2559 end_split_failed:
2560         mas_set(&mas_detach, 0);
2561         mas_for_each(&mas_detach, next, end)
2562                 vma_mark_detached(next, false);
2563
2564         __mt_destroy(&mt_detach);
2565 start_split_failed:
2566 map_count_exceeded:
2567         validate_mm(mm);
2568         return error;
2569 }
2570
2571 /*
2572  * do_vmi_munmap() - munmap a given range.
2573  * @vmi: The vma iterator
2574  * @mm: The mm_struct
2575  * @start: The start address to munmap
2576  * @len: The length of the range to munmap
2577  * @uf: The userfaultfd list_head
2578  * @unlock: set to true if the user wants to drop the mmap_lock on success
2579  *
2580  * This function takes a @mas that is either pointing to the previous VMA or set
2581  * to MA_START and sets it up to remove the mapping(s).  The @len will be
2582  * aligned and any arch_unmap work will be preformed.
2583  *
2584  * Return: 0 on success and drops the lock if so directed, error and leaves the
2585  * lock held otherwise.
2586  */
2587 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
2588                   unsigned long start, size_t len, struct list_head *uf,
2589                   bool unlock)
2590 {
2591         unsigned long end;
2592         struct vm_area_struct *vma;
2593
2594         if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
2595                 return -EINVAL;
2596
2597         end = start + PAGE_ALIGN(len);
2598         if (end == start)
2599                 return -EINVAL;
2600
2601          /* arch_unmap() might do unmaps itself.  */
2602         arch_unmap(mm, start, end);
2603
2604         /* Find the first overlapping VMA */
2605         vma = vma_find(vmi, end);
2606         if (!vma) {
2607                 if (unlock)
2608                         mmap_write_unlock(mm);
2609                 return 0;
2610         }
2611
2612         return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
2613 }
2614
2615 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
2616  * @mm: The mm_struct
2617  * @start: The start address to munmap
2618  * @len: The length to be munmapped.
2619  * @uf: The userfaultfd list_head
2620  *
2621  * Return: 0 on success, error otherwise.
2622  */
2623 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
2624               struct list_head *uf)
2625 {
2626         VMA_ITERATOR(vmi, mm, start);
2627
2628         return do_vmi_munmap(&vmi, mm, start, len, uf, false);
2629 }
2630
2631 unsigned long mmap_region(struct file *file, unsigned long addr,
2632                 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2633                 struct list_head *uf)
2634 {
2635         struct mm_struct *mm = current->mm;
2636         struct vm_area_struct *vma = NULL;
2637         struct vm_area_struct *next, *prev, *merge;
2638         pgoff_t pglen = len >> PAGE_SHIFT;
2639         unsigned long charged = 0;
2640         unsigned long end = addr + len;
2641         unsigned long merge_start = addr, merge_end = end;
2642         pgoff_t vm_pgoff;
2643         int error;
2644         VMA_ITERATOR(vmi, mm, addr);
2645
2646         /* Check against address space limit. */
2647         if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
2648                 unsigned long nr_pages;
2649
2650                 /*
2651                  * MAP_FIXED may remove pages of mappings that intersects with
2652                  * requested mapping. Account for the pages it would unmap.
2653                  */
2654                 nr_pages = count_vma_pages_range(mm, addr, end);
2655
2656                 if (!may_expand_vm(mm, vm_flags,
2657                                         (len >> PAGE_SHIFT) - nr_pages))
2658                         return -ENOMEM;
2659         }
2660
2661         /* Unmap any existing mapping in the area */
2662         if (do_vmi_munmap(&vmi, mm, addr, len, uf, false))
2663                 return -ENOMEM;
2664
2665         /*
2666          * Private writable mapping: check memory availability
2667          */
2668         if (accountable_mapping(file, vm_flags)) {
2669                 charged = len >> PAGE_SHIFT;
2670                 if (security_vm_enough_memory_mm(mm, charged))
2671                         return -ENOMEM;
2672                 vm_flags |= VM_ACCOUNT;
2673         }
2674
2675         next = vma_next(&vmi);
2676         prev = vma_prev(&vmi);
2677         if (vm_flags & VM_SPECIAL)
2678                 goto cannot_expand;
2679
2680         /* Attempt to expand an old mapping */
2681         /* Check next */
2682         if (next && next->vm_start == end && !vma_policy(next) &&
2683             can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
2684                                  NULL_VM_UFFD_CTX, NULL)) {
2685                 merge_end = next->vm_end;
2686                 vma = next;
2687                 vm_pgoff = next->vm_pgoff - pglen;
2688         }
2689
2690         /* Check prev */
2691         if (prev && prev->vm_end == addr && !vma_policy(prev) &&
2692             (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
2693                                        pgoff, vma->vm_userfaultfd_ctx, NULL) :
2694                    can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
2695                                        NULL_VM_UFFD_CTX, NULL))) {
2696                 merge_start = prev->vm_start;
2697                 vma = prev;
2698                 vm_pgoff = prev->vm_pgoff;
2699         }
2700
2701
2702         /* Actually expand, if possible */
2703         if (vma &&
2704             !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) {
2705                 khugepaged_enter_vma(vma, vm_flags);
2706                 goto expanded;
2707         }
2708
2709 cannot_expand:
2710         if (prev)
2711                 vma_iter_next_range(&vmi);
2712
2713         /*
2714          * Determine the object being mapped and call the appropriate
2715          * specific mapper. the address has already been validated, but
2716          * not unmapped, but the maps are removed from the list.
2717          */
2718         vma = vm_area_alloc(mm);
2719         if (!vma) {
2720                 error = -ENOMEM;
2721                 goto unacct_error;
2722         }
2723
2724         vma_iter_set(&vmi, addr);
2725         vma->vm_start = addr;
2726         vma->vm_end = end;
2727         vm_flags_init(vma, vm_flags);
2728         vma->vm_page_prot = vm_get_page_prot(vm_flags);
2729         vma->vm_pgoff = pgoff;
2730
2731         if (file) {
2732                 if (vm_flags & VM_SHARED) {
2733                         error = mapping_map_writable(file->f_mapping);
2734                         if (error)
2735                                 goto free_vma;
2736                 }
2737
2738                 vma->vm_file = get_file(file);
2739                 error = call_mmap(file, vma);
2740                 if (error)
2741                         goto unmap_and_free_vma;
2742
2743                 /*
2744                  * Expansion is handled above, merging is handled below.
2745                  * Drivers should not alter the address of the VMA.
2746                  */
2747                 error = -EINVAL;
2748                 if (WARN_ON((addr != vma->vm_start)))
2749                         goto close_and_free_vma;
2750
2751                 vma_iter_set(&vmi, addr);
2752                 /*
2753                  * If vm_flags changed after call_mmap(), we should try merge
2754                  * vma again as we may succeed this time.
2755                  */
2756                 if (unlikely(vm_flags != vma->vm_flags && prev)) {
2757                         merge = vma_merge(&vmi, mm, prev, vma->vm_start,
2758                                     vma->vm_end, vma->vm_flags, NULL,
2759                                     vma->vm_file, vma->vm_pgoff, NULL,
2760                                     NULL_VM_UFFD_CTX, NULL);
2761                         if (merge) {
2762                                 /*
2763                                  * ->mmap() can change vma->vm_file and fput
2764                                  * the original file. So fput the vma->vm_file
2765                                  * here or we would add an extra fput for file
2766                                  * and cause general protection fault
2767                                  * ultimately.
2768                                  */
2769                                 fput(vma->vm_file);
2770                                 vm_area_free(vma);
2771                                 vma = merge;
2772                                 /* Update vm_flags to pick up the change. */
2773                                 vm_flags = vma->vm_flags;
2774                                 goto unmap_writable;
2775                         }
2776                 }
2777
2778                 vm_flags = vma->vm_flags;
2779         } else if (vm_flags & VM_SHARED) {
2780                 error = shmem_zero_setup(vma);
2781                 if (error)
2782                         goto free_vma;
2783         } else {
2784                 vma_set_anonymous(vma);
2785         }
2786
2787         if (map_deny_write_exec(vma, vma->vm_flags)) {
2788                 error = -EACCES;
2789                 goto close_and_free_vma;
2790         }
2791
2792         /* Allow architectures to sanity-check the vm_flags */
2793         error = -EINVAL;
2794         if (!arch_validate_flags(vma->vm_flags))
2795                 goto close_and_free_vma;
2796
2797         error = -ENOMEM;
2798         if (vma_iter_prealloc(&vmi))
2799                 goto close_and_free_vma;
2800
2801         /* Lock the VMA since it is modified after insertion into VMA tree */
2802         vma_start_write(vma);
2803         vma_iter_store(&vmi, vma);
2804         mm->map_count++;
2805         if (vma->vm_file) {
2806                 i_mmap_lock_write(vma->vm_file->f_mapping);
2807                 if (vma->vm_flags & VM_SHARED)
2808                         mapping_allow_writable(vma->vm_file->f_mapping);
2809
2810                 flush_dcache_mmap_lock(vma->vm_file->f_mapping);
2811                 vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap);
2812                 flush_dcache_mmap_unlock(vma->vm_file->f_mapping);
2813                 i_mmap_unlock_write(vma->vm_file->f_mapping);
2814         }
2815
2816         /*
2817          * vma_merge() calls khugepaged_enter_vma() either, the below
2818          * call covers the non-merge case.
2819          */
2820         khugepaged_enter_vma(vma, vma->vm_flags);
2821
2822         /* Once vma denies write, undo our temporary denial count */
2823 unmap_writable:
2824         if (file && vm_flags & VM_SHARED)
2825                 mapping_unmap_writable(file->f_mapping);
2826         file = vma->vm_file;
2827         ksm_add_vma(vma);
2828 expanded:
2829         perf_event_mmap(vma);
2830
2831         vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
2832         if (vm_flags & VM_LOCKED) {
2833                 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
2834                                         is_vm_hugetlb_page(vma) ||
2835                                         vma == get_gate_vma(current->mm))
2836                         vm_flags_clear(vma, VM_LOCKED_MASK);
2837                 else
2838                         mm->locked_vm += (len >> PAGE_SHIFT);
2839         }
2840
2841         if (file)
2842                 uprobe_mmap(vma);
2843
2844         /*
2845          * New (or expanded) vma always get soft dirty status.
2846          * Otherwise user-space soft-dirty page tracker won't
2847          * be able to distinguish situation when vma area unmapped,
2848          * then new mapped in-place (which must be aimed as
2849          * a completely new data area).
2850          */
2851         vm_flags_set(vma, VM_SOFTDIRTY);
2852
2853         vma_set_page_prot(vma);
2854
2855         validate_mm(mm);
2856         return addr;
2857
2858 close_and_free_vma:
2859         if (file && vma->vm_ops && vma->vm_ops->close)
2860                 vma->vm_ops->close(vma);
2861
2862         if (file || vma->vm_file) {
2863 unmap_and_free_vma:
2864                 fput(vma->vm_file);
2865                 vma->vm_file = NULL;
2866
2867                 /* Undo any partial mapping done by a device driver. */
2868                 unmap_region(mm, &mm->mm_mt, vma, prev, next, vma->vm_start,
2869                              vma->vm_end, true);
2870         }
2871         if (file && (vm_flags & VM_SHARED))
2872                 mapping_unmap_writable(file->f_mapping);
2873 free_vma:
2874         vm_area_free(vma);
2875 unacct_error:
2876         if (charged)
2877                 vm_unacct_memory(charged);
2878         validate_mm(mm);
2879         return error;
2880 }
2881
2882 static int __vm_munmap(unsigned long start, size_t len, bool unlock)
2883 {
2884         int ret;
2885         struct mm_struct *mm = current->mm;
2886         LIST_HEAD(uf);
2887         VMA_ITERATOR(vmi, mm, start);
2888
2889         if (mmap_write_lock_killable(mm))
2890                 return -EINTR;
2891
2892         ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
2893         if (ret || !unlock)
2894                 mmap_write_unlock(mm);
2895
2896         userfaultfd_unmap_complete(mm, &uf);
2897         return ret;
2898 }
2899
2900 int vm_munmap(unsigned long start, size_t len)
2901 {
2902         return __vm_munmap(start, len, false);
2903 }
2904 EXPORT_SYMBOL(vm_munmap);
2905
2906 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2907 {
2908         addr = untagged_addr(addr);
2909         return __vm_munmap(addr, len, true);
2910 }
2911
2912
2913 /*
2914  * Emulation of deprecated remap_file_pages() syscall.
2915  */
2916 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2917                 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
2918 {
2919
2920         struct mm_struct *mm = current->mm;
2921         struct vm_area_struct *vma;
2922         unsigned long populate = 0;
2923         unsigned long ret = -EINVAL;
2924         struct file *file;
2925
2926         pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
2927                      current->comm, current->pid);
2928
2929         if (prot)
2930                 return ret;
2931         start = start & PAGE_MASK;
2932         size = size & PAGE_MASK;
2933
2934         if (start + size <= start)
2935                 return ret;
2936
2937         /* Does pgoff wrap? */
2938         if (pgoff + (size >> PAGE_SHIFT) < pgoff)
2939                 return ret;
2940
2941         if (mmap_write_lock_killable(mm))
2942                 return -EINTR;
2943
2944         vma = vma_lookup(mm, start);
2945
2946         if (!vma || !(vma->vm_flags & VM_SHARED))
2947                 goto out;
2948
2949         if (start + size > vma->vm_end) {
2950                 VMA_ITERATOR(vmi, mm, vma->vm_end);
2951                 struct vm_area_struct *next, *prev = vma;
2952
2953                 for_each_vma_range(vmi, next, start + size) {
2954                         /* hole between vmas ? */
2955                         if (next->vm_start != prev->vm_end)
2956                                 goto out;
2957
2958                         if (next->vm_file != vma->vm_file)
2959                                 goto out;
2960
2961                         if (next->vm_flags != vma->vm_flags)
2962                                 goto out;
2963
2964                         if (start + size <= next->vm_end)
2965                                 break;
2966
2967                         prev = next;
2968                 }
2969
2970                 if (!next)
2971                         goto out;
2972         }
2973
2974         prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
2975         prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
2976         prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
2977
2978         flags &= MAP_NONBLOCK;
2979         flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
2980         if (vma->vm_flags & VM_LOCKED)
2981                 flags |= MAP_LOCKED;
2982
2983         file = get_file(vma->vm_file);
2984         ret = do_mmap(vma->vm_file, start, size,
2985                         prot, flags, pgoff, &populate, NULL);
2986         fput(file);
2987 out:
2988         mmap_write_unlock(mm);
2989         if (populate)
2990                 mm_populate(ret, populate);
2991         if (!IS_ERR_VALUE(ret))
2992                 ret = 0;
2993         return ret;
2994 }
2995
2996 /*
2997  * do_vma_munmap() - Unmap a full or partial vma.
2998  * @vmi: The vma iterator pointing at the vma
2999  * @vma: The first vma to be munmapped
3000  * @start: the start of the address to unmap
3001  * @end: The end of the address to unmap
3002  * @uf: The userfaultfd list_head
3003  * @unlock: Drop the lock on success
3004  *
3005  * unmaps a VMA mapping when the vma iterator is already in position.
3006  * Does not handle alignment.
3007  *
3008  * Return: 0 on success drops the lock of so directed, error on failure and will
3009  * still hold the lock.
3010  */
3011 int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3012                 unsigned long start, unsigned long end, struct list_head *uf,
3013                 bool unlock)
3014 {
3015         struct mm_struct *mm = vma->vm_mm;
3016
3017         arch_unmap(mm, start, end);
3018         return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
3019 }
3020
3021 /*
3022  * do_brk_flags() - Increase the brk vma if the flags match.
3023  * @vmi: The vma iterator
3024  * @addr: The start address
3025  * @len: The length of the increase
3026  * @vma: The vma,
3027  * @flags: The VMA Flags
3028  *
3029  * Extend the brk VMA from addr to addr + len.  If the VMA is NULL or the flags
3030  * do not match then create a new anonymous VMA.  Eventually we may be able to
3031  * do some brk-specific accounting here.
3032  */
3033 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
3034                 unsigned long addr, unsigned long len, unsigned long flags)
3035 {
3036         struct mm_struct *mm = current->mm;
3037         struct vma_prepare vp;
3038
3039         /*
3040          * Check against address space limits by the changed size
3041          * Note: This happens *after* clearing old mappings in some code paths.
3042          */
3043         flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
3044         if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
3045                 return -ENOMEM;
3046
3047         if (mm->map_count > sysctl_max_map_count)
3048                 return -ENOMEM;
3049
3050         if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
3051                 return -ENOMEM;
3052
3053         /*
3054          * Expand the existing vma if possible; Note that singular lists do not
3055          * occur after forking, so the expand will only happen on new VMAs.
3056          */
3057         if (vma && vma->vm_end == addr && !vma_policy(vma) &&
3058             can_vma_merge_after(vma, flags, NULL, NULL,
3059                                 addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
3060                 if (vma_iter_prealloc(vmi))
3061                         goto unacct_fail;
3062
3063                 init_vma_prep(&vp, vma);
3064                 vma_prepare(&vp);
3065                 vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
3066                 vma->vm_end = addr + len;
3067                 vm_flags_set(vma, VM_SOFTDIRTY);
3068                 vma_iter_store(vmi, vma);
3069
3070                 vma_complete(&vp, vmi, mm);
3071                 khugepaged_enter_vma(vma, flags);
3072                 goto out;
3073         }
3074
3075         /* create a vma struct for an anonymous mapping */
3076         vma = vm_area_alloc(mm);
3077         if (!vma)
3078                 goto unacct_fail;
3079
3080         vma_set_anonymous(vma);
3081         vma->vm_start = addr;
3082         vma->vm_end = addr + len;
3083         vma->vm_pgoff = addr >> PAGE_SHIFT;
3084         vm_flags_init(vma, flags);
3085         vma->vm_page_prot = vm_get_page_prot(flags);
3086         if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
3087                 goto mas_store_fail;
3088
3089         mm->map_count++;
3090         validate_mm(mm);
3091         ksm_add_vma(vma);
3092 out:
3093         perf_event_mmap(vma);
3094         mm->total_vm += len >> PAGE_SHIFT;
3095         mm->data_vm += len >> PAGE_SHIFT;
3096         if (flags & VM_LOCKED)
3097                 mm->locked_vm += (len >> PAGE_SHIFT);
3098         vm_flags_set(vma, VM_SOFTDIRTY);
3099         return 0;
3100
3101 mas_store_fail:
3102         vm_area_free(vma);
3103 unacct_fail:
3104         vm_unacct_memory(len >> PAGE_SHIFT);
3105         return -ENOMEM;
3106 }
3107
3108 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
3109 {
3110         struct mm_struct *mm = current->mm;
3111         struct vm_area_struct *vma = NULL;
3112         unsigned long len;
3113         int ret;
3114         bool populate;
3115         LIST_HEAD(uf);
3116         VMA_ITERATOR(vmi, mm, addr);
3117
3118         len = PAGE_ALIGN(request);
3119         if (len < request)
3120                 return -ENOMEM;
3121         if (!len)
3122                 return 0;
3123
3124         if (mmap_write_lock_killable(mm))
3125                 return -EINTR;
3126
3127         /* Until we need other flags, refuse anything except VM_EXEC. */
3128         if ((flags & (~VM_EXEC)) != 0)
3129                 return -EINVAL;
3130
3131         ret = check_brk_limits(addr, len);
3132         if (ret)
3133                 goto limits_failed;
3134
3135         ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
3136         if (ret)
3137                 goto munmap_failed;
3138
3139         vma = vma_prev(&vmi);
3140         ret = do_brk_flags(&vmi, vma, addr, len, flags);
3141         populate = ((mm->def_flags & VM_LOCKED) != 0);
3142         mmap_write_unlock(mm);
3143         userfaultfd_unmap_complete(mm, &uf);
3144         if (populate && !ret)
3145                 mm_populate(addr, len);
3146         return ret;
3147
3148 munmap_failed:
3149 limits_failed:
3150         mmap_write_unlock(mm);
3151         return ret;
3152 }
3153 EXPORT_SYMBOL(vm_brk_flags);
3154
3155 int vm_brk(unsigned long addr, unsigned long len)
3156 {
3157         return vm_brk_flags(addr, len, 0);
3158 }
3159 EXPORT_SYMBOL(vm_brk);
3160
3161 /* Release all mmaps. */
3162 void exit_mmap(struct mm_struct *mm)
3163 {
3164         struct mmu_gather tlb;
3165         struct vm_area_struct *vma;
3166         unsigned long nr_accounted = 0;
3167         MA_STATE(mas, &mm->mm_mt, 0, 0);
3168         int count = 0;
3169
3170         /* mm's last user has gone, and its about to be pulled down */
3171         mmu_notifier_release(mm);
3172
3173         mmap_read_lock(mm);
3174         arch_exit_mmap(mm);
3175
3176         vma = mas_find(&mas, ULONG_MAX);
3177         if (!vma) {
3178                 /* Can happen if dup_mmap() received an OOM */
3179                 mmap_read_unlock(mm);
3180                 return;
3181         }
3182
3183         lru_add_drain();
3184         flush_cache_mm(mm);
3185         tlb_gather_mmu_fullmm(&tlb, mm);
3186         /* update_hiwater_rss(mm) here? but nobody should be looking */
3187         /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
3188         unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX, false);
3189         mmap_read_unlock(mm);
3190
3191         /*
3192          * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
3193          * because the memory has been already freed.
3194          */
3195         set_bit(MMF_OOM_SKIP, &mm->flags);
3196         mmap_write_lock(mm);
3197         mt_clear_in_rcu(&mm->mm_mt);
3198         free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS,
3199                       USER_PGTABLES_CEILING, true);
3200         tlb_finish_mmu(&tlb);
3201
3202         /*
3203          * Walk the list again, actually closing and freeing it, with preemption
3204          * enabled, without holding any MM locks besides the unreachable
3205          * mmap_write_lock.
3206          */
3207         do {
3208                 if (vma->vm_flags & VM_ACCOUNT)
3209                         nr_accounted += vma_pages(vma);
3210                 remove_vma(vma, true);
3211                 count++;
3212                 cond_resched();
3213         } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
3214
3215         BUG_ON(count != mm->map_count);
3216
3217         trace_exit_mmap(mm);
3218         __mt_destroy(&mm->mm_mt);
3219         mmap_write_unlock(mm);
3220         vm_unacct_memory(nr_accounted);
3221 }
3222
3223 /* Insert vm structure into process list sorted by address
3224  * and into the inode's i_mmap tree.  If vm_file is non-NULL
3225  * then i_mmap_rwsem is taken here.
3226  */
3227 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
3228 {
3229         unsigned long charged = vma_pages(vma);
3230
3231
3232         if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
3233                 return -ENOMEM;
3234
3235         if ((vma->vm_flags & VM_ACCOUNT) &&
3236              security_vm_enough_memory_mm(mm, charged))
3237                 return -ENOMEM;
3238
3239         /*
3240          * The vm_pgoff of a purely anonymous vma should be irrelevant
3241          * until its first write fault, when page's anon_vma and index
3242          * are set.  But now set the vm_pgoff it will almost certainly
3243          * end up with (unless mremap moves it elsewhere before that
3244          * first wfault), so /proc/pid/maps tells a consistent story.
3245          *
3246          * By setting it to reflect the virtual start address of the
3247          * vma, merges and splits can happen in a seamless way, just
3248          * using the existing file pgoff checks and manipulations.
3249          * Similarly in do_mmap and in do_brk_flags.
3250          */
3251         if (vma_is_anonymous(vma)) {
3252                 BUG_ON(vma->anon_vma);
3253                 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
3254         }
3255
3256         if (vma_link(mm, vma)) {
3257                 vm_unacct_memory(charged);
3258                 return -ENOMEM;
3259         }
3260
3261         return 0;
3262 }
3263
3264 /*
3265  * Copy the vma structure to a new location in the same mm,
3266  * prior to moving page table entries, to effect an mremap move.
3267  */
3268 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
3269         unsigned long addr, unsigned long len, pgoff_t pgoff,
3270         bool *need_rmap_locks)
3271 {
3272         struct vm_area_struct *vma = *vmap;
3273         unsigned long vma_start = vma->vm_start;
3274         struct mm_struct *mm = vma->vm_mm;
3275         struct vm_area_struct *new_vma, *prev;
3276         bool faulted_in_anon_vma = true;
3277         VMA_ITERATOR(vmi, mm, addr);
3278
3279         /*
3280          * If anonymous vma has not yet been faulted, update new pgoff
3281          * to match new location, to increase its chance of merging.
3282          */
3283         if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
3284                 pgoff = addr >> PAGE_SHIFT;
3285                 faulted_in_anon_vma = false;
3286         }
3287
3288         new_vma = find_vma_prev(mm, addr, &prev);
3289         if (new_vma && new_vma->vm_start < addr + len)
3290                 return NULL;    /* should never get here */
3291
3292         new_vma = vma_merge(&vmi, mm, prev, addr, addr + len, vma->vm_flags,
3293                             vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
3294                             vma->vm_userfaultfd_ctx, anon_vma_name(vma));
3295         if (new_vma) {
3296                 /*
3297                  * Source vma may have been merged into new_vma
3298                  */
3299                 if (unlikely(vma_start >= new_vma->vm_start &&
3300                              vma_start < new_vma->vm_end)) {
3301                         /*
3302                          * The only way we can get a vma_merge with
3303                          * self during an mremap is if the vma hasn't
3304                          * been faulted in yet and we were allowed to
3305                          * reset the dst vma->vm_pgoff to the
3306                          * destination address of the mremap to allow
3307                          * the merge to happen. mremap must change the
3308                          * vm_pgoff linearity between src and dst vmas
3309                          * (in turn preventing a vma_merge) to be
3310                          * safe. It is only safe to keep the vm_pgoff
3311                          * linear if there are no pages mapped yet.
3312                          */
3313                         VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
3314                         *vmap = vma = new_vma;
3315                 }
3316                 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
3317         } else {
3318                 new_vma = vm_area_dup(vma);
3319                 if (!new_vma)
3320                         goto out;
3321                 new_vma->vm_start = addr;
3322                 new_vma->vm_end = addr + len;
3323                 new_vma->vm_pgoff = pgoff;
3324                 if (vma_dup_policy(vma, new_vma))
3325                         goto out_free_vma;
3326                 if (anon_vma_clone(new_vma, vma))
3327                         goto out_free_mempol;
3328                 if (new_vma->vm_file)
3329                         get_file(new_vma->vm_file);
3330                 if (new_vma->vm_ops && new_vma->vm_ops->open)
3331                         new_vma->vm_ops->open(new_vma);
3332                 vma_start_write(new_vma);
3333                 if (vma_link(mm, new_vma))
3334                         goto out_vma_link;
3335                 *need_rmap_locks = false;
3336         }
3337         return new_vma;
3338
3339 out_vma_link:
3340         if (new_vma->vm_ops && new_vma->vm_ops->close)
3341                 new_vma->vm_ops->close(new_vma);
3342
3343         if (new_vma->vm_file)
3344                 fput(new_vma->vm_file);
3345
3346         unlink_anon_vmas(new_vma);
3347 out_free_mempol:
3348         mpol_put(vma_policy(new_vma));
3349 out_free_vma:
3350         vm_area_free(new_vma);
3351 out:
3352         return NULL;
3353 }
3354
3355 /*
3356  * Return true if the calling process may expand its vm space by the passed
3357  * number of pages
3358  */
3359 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
3360 {
3361         if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
3362                 return false;
3363
3364         if (is_data_mapping(flags) &&
3365             mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
3366                 /* Workaround for Valgrind */
3367                 if (rlimit(RLIMIT_DATA) == 0 &&
3368                     mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
3369                         return true;
3370
3371                 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
3372                              current->comm, current->pid,
3373                              (mm->data_vm + npages) << PAGE_SHIFT,
3374                              rlimit(RLIMIT_DATA),
3375                              ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
3376
3377                 if (!ignore_rlimit_data)
3378                         return false;
3379         }
3380
3381         return true;
3382 }
3383
3384 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
3385 {
3386         WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
3387
3388         if (is_exec_mapping(flags))
3389                 mm->exec_vm += npages;
3390         else if (is_stack_mapping(flags))
3391                 mm->stack_vm += npages;
3392         else if (is_data_mapping(flags))
3393                 mm->data_vm += npages;
3394 }
3395
3396 static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
3397
3398 /*
3399  * Having a close hook prevents vma merging regardless of flags.
3400  */
3401 static void special_mapping_close(struct vm_area_struct *vma)
3402 {
3403 }
3404
3405 static const char *special_mapping_name(struct vm_area_struct *vma)
3406 {
3407         return ((struct vm_special_mapping *)vma->vm_private_data)->name;
3408 }
3409
3410 static int special_mapping_mremap(struct vm_area_struct *new_vma)
3411 {
3412         struct vm_special_mapping *sm = new_vma->vm_private_data;
3413
3414         if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
3415                 return -EFAULT;
3416
3417         if (sm->mremap)
3418                 return sm->mremap(sm, new_vma);
3419
3420         return 0;
3421 }
3422
3423 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
3424 {
3425         /*
3426          * Forbid splitting special mappings - kernel has expectations over
3427          * the number of pages in mapping. Together with VM_DONTEXPAND
3428          * the size of vma should stay the same over the special mapping's
3429          * lifetime.
3430          */
3431         return -EINVAL;
3432 }
3433
3434 static const struct vm_operations_struct special_mapping_vmops = {
3435         .close = special_mapping_close,
3436         .fault = special_mapping_fault,
3437         .mremap = special_mapping_mremap,
3438         .name = special_mapping_name,
3439         /* vDSO code relies that VVAR can't be accessed remotely */
3440         .access = NULL,
3441         .may_split = special_mapping_split,
3442 };
3443
3444 static const struct vm_operations_struct legacy_special_mapping_vmops = {
3445         .close = special_mapping_close,
3446         .fault = special_mapping_fault,
3447 };
3448
3449 static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
3450 {
3451         struct vm_area_struct *vma = vmf->vma;
3452         pgoff_t pgoff;
3453         struct page **pages;
3454
3455         if (vma->vm_ops == &legacy_special_mapping_vmops) {
3456                 pages = vma->vm_private_data;
3457         } else {
3458                 struct vm_special_mapping *sm = vma->vm_private_data;
3459
3460                 if (sm->fault)
3461                         return sm->fault(sm, vmf->vma, vmf);
3462
3463                 pages = sm->pages;
3464         }
3465
3466         for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
3467                 pgoff--;
3468
3469         if (*pages) {
3470                 struct page *page = *pages;
3471                 get_page(page);
3472                 vmf->page = page;
3473                 return 0;
3474         }
3475
3476         return VM_FAULT_SIGBUS;
3477 }
3478
3479 static struct vm_area_struct *__install_special_mapping(
3480         struct mm_struct *mm,
3481         unsigned long addr, unsigned long len,
3482         unsigned long vm_flags, void *priv,
3483         const struct vm_operations_struct *ops)
3484 {
3485         int ret;
3486         struct vm_area_struct *vma;
3487
3488         vma = vm_area_alloc(mm);
3489         if (unlikely(vma == NULL))
3490                 return ERR_PTR(-ENOMEM);
3491
3492         vma->vm_start = addr;
3493         vma->vm_end = addr + len;
3494
3495         vm_flags_init(vma, (vm_flags | mm->def_flags |
3496                       VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK);
3497         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3498
3499         vma->vm_ops = ops;
3500         vma->vm_private_data = priv;
3501
3502         ret = insert_vm_struct(mm, vma);
3503         if (ret)
3504                 goto out;
3505
3506         vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
3507
3508         perf_event_mmap(vma);
3509
3510         return vma;
3511
3512 out:
3513         vm_area_free(vma);
3514         return ERR_PTR(ret);
3515 }
3516
3517 bool vma_is_special_mapping(const struct vm_area_struct *vma,
3518         const struct vm_special_mapping *sm)
3519 {
3520         return vma->vm_private_data == sm &&
3521                 (vma->vm_ops == &special_mapping_vmops ||
3522                  vma->vm_ops == &legacy_special_mapping_vmops);
3523 }
3524
3525 /*
3526  * Called with mm->mmap_lock held for writing.
3527  * Insert a new vma covering the given region, with the given flags.
3528  * Its pages are supplied by the given array of struct page *.
3529  * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
3530  * The region past the last page supplied will always produce SIGBUS.
3531  * The array pointer and the pages it points to are assumed to stay alive
3532  * for as long as this mapping might exist.
3533  */
3534 struct vm_area_struct *_install_special_mapping(
3535         struct mm_struct *mm,
3536         unsigned long addr, unsigned long len,
3537         unsigned long vm_flags, const struct vm_special_mapping *spec)
3538 {
3539         return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
3540                                         &special_mapping_vmops);
3541 }
3542
3543 int install_special_mapping(struct mm_struct *mm,
3544                             unsigned long addr, unsigned long len,
3545                             unsigned long vm_flags, struct page **pages)
3546 {
3547         struct vm_area_struct *vma = __install_special_mapping(
3548                 mm, addr, len, vm_flags, (void *)pages,
3549                 &legacy_special_mapping_vmops);
3550
3551         return PTR_ERR_OR_ZERO(vma);
3552 }
3553
3554 static DEFINE_MUTEX(mm_all_locks_mutex);
3555
3556 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3557 {
3558         if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3559                 /*
3560                  * The LSB of head.next can't change from under us
3561                  * because we hold the mm_all_locks_mutex.
3562                  */
3563                 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
3564                 /*
3565                  * We can safely modify head.next after taking the
3566                  * anon_vma->root->rwsem. If some other vma in this mm shares
3567                  * the same anon_vma we won't take it again.
3568                  *
3569                  * No need of atomic instructions here, head.next
3570                  * can't change from under us thanks to the
3571                  * anon_vma->root->rwsem.
3572                  */
3573                 if (__test_and_set_bit(0, (unsigned long *)
3574                                        &anon_vma->root->rb_root.rb_root.rb_node))
3575                         BUG();
3576         }
3577 }
3578
3579 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3580 {
3581         if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3582                 /*
3583                  * AS_MM_ALL_LOCKS can't change from under us because
3584                  * we hold the mm_all_locks_mutex.
3585                  *
3586                  * Operations on ->flags have to be atomic because
3587                  * even if AS_MM_ALL_LOCKS is stable thanks to the
3588                  * mm_all_locks_mutex, there may be other cpus
3589                  * changing other bitflags in parallel to us.
3590                  */
3591                 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3592                         BUG();
3593                 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
3594         }
3595 }
3596
3597 /*
3598  * This operation locks against the VM for all pte/vma/mm related
3599  * operations that could ever happen on a certain mm. This includes
3600  * vmtruncate, try_to_unmap, and all page faults.
3601  *
3602  * The caller must take the mmap_lock in write mode before calling
3603  * mm_take_all_locks(). The caller isn't allowed to release the
3604  * mmap_lock until mm_drop_all_locks() returns.
3605  *
3606  * mmap_lock in write mode is required in order to block all operations
3607  * that could modify pagetables and free pages without need of
3608  * altering the vma layout. It's also needed in write mode to avoid new
3609  * anon_vmas to be associated with existing vmas.
3610  *
3611  * A single task can't take more than one mm_take_all_locks() in a row
3612  * or it would deadlock.
3613  *
3614  * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3615  * mapping->flags avoid to take the same lock twice, if more than one
3616  * vma in this mm is backed by the same anon_vma or address_space.
3617  *
3618  * We take locks in following order, accordingly to comment at beginning
3619  * of mm/rmap.c:
3620  *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
3621  *     hugetlb mapping);
3622  *   - all vmas marked locked
3623  *   - all i_mmap_rwsem locks;
3624  *   - all anon_vma->rwseml
3625  *
3626  * We can take all locks within these types randomly because the VM code
3627  * doesn't nest them and we protected from parallel mm_take_all_locks() by
3628  * mm_all_locks_mutex.
3629  *
3630  * mm_take_all_locks() and mm_drop_all_locks are expensive operations
3631  * that may have to take thousand of locks.
3632  *
3633  * mm_take_all_locks() can fail if it's interrupted by signals.
3634  */
3635 int mm_take_all_locks(struct mm_struct *mm)
3636 {
3637         struct vm_area_struct *vma;
3638         struct anon_vma_chain *avc;
3639         MA_STATE(mas, &mm->mm_mt, 0, 0);
3640
3641         mmap_assert_write_locked(mm);
3642
3643         mutex_lock(&mm_all_locks_mutex);
3644
3645         mas_for_each(&mas, vma, ULONG_MAX) {
3646                 if (signal_pending(current))
3647                         goto out_unlock;
3648                 vma_start_write(vma);
3649         }
3650
3651         mas_set(&mas, 0);
3652         mas_for_each(&mas, vma, ULONG_MAX) {
3653                 if (signal_pending(current))
3654                         goto out_unlock;
3655                 if (vma->vm_file && vma->vm_file->f_mapping &&
3656                                 is_vm_hugetlb_page(vma))
3657                         vm_lock_mapping(mm, vma->vm_file->f_mapping);
3658         }
3659
3660         mas_set(&mas, 0);
3661         mas_for_each(&mas, vma, ULONG_MAX) {
3662                 if (signal_pending(current))
3663                         goto out_unlock;
3664                 if (vma->vm_file && vma->vm_file->f_mapping &&
3665                                 !is_vm_hugetlb_page(vma))
3666                         vm_lock_mapping(mm, vma->vm_file->f_mapping);
3667         }
3668
3669         mas_set(&mas, 0);
3670         mas_for_each(&mas, vma, ULONG_MAX) {
3671                 if (signal_pending(current))
3672                         goto out_unlock;
3673                 if (vma->anon_vma)
3674                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3675                                 vm_lock_anon_vma(mm, avc->anon_vma);
3676         }
3677
3678         return 0;
3679
3680 out_unlock:
3681         mm_drop_all_locks(mm);
3682         return -EINTR;
3683 }
3684
3685 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3686 {
3687         if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3688                 /*
3689                  * The LSB of head.next can't change to 0 from under
3690                  * us because we hold the mm_all_locks_mutex.
3691                  *
3692                  * We must however clear the bitflag before unlocking
3693                  * the vma so the users using the anon_vma->rb_root will
3694                  * never see our bitflag.
3695                  *
3696                  * No need of atomic instructions here, head.next
3697                  * can't change from under us until we release the
3698                  * anon_vma->root->rwsem.
3699                  */
3700                 if (!__test_and_clear_bit(0, (unsigned long *)
3701                                           &anon_vma->root->rb_root.rb_root.rb_node))
3702                         BUG();
3703                 anon_vma_unlock_write(anon_vma);
3704         }
3705 }
3706
3707 static void vm_unlock_mapping(struct address_space *mapping)
3708 {
3709         if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3710                 /*
3711                  * AS_MM_ALL_LOCKS can't change to 0 from under us
3712                  * because we hold the mm_all_locks_mutex.
3713                  */
3714                 i_mmap_unlock_write(mapping);
3715                 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3716                                         &mapping->flags))
3717                         BUG();
3718         }
3719 }
3720
3721 /*
3722  * The mmap_lock cannot be released by the caller until
3723  * mm_drop_all_locks() returns.
3724  */
3725 void mm_drop_all_locks(struct mm_struct *mm)
3726 {
3727         struct vm_area_struct *vma;
3728         struct anon_vma_chain *avc;
3729         MA_STATE(mas, &mm->mm_mt, 0, 0);
3730
3731         mmap_assert_write_locked(mm);
3732         BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3733
3734         mas_for_each(&mas, vma, ULONG_MAX) {
3735                 if (vma->anon_vma)
3736                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3737                                 vm_unlock_anon_vma(avc->anon_vma);
3738                 if (vma->vm_file && vma->vm_file->f_mapping)
3739                         vm_unlock_mapping(vma->vm_file->f_mapping);
3740         }
3741         vma_end_write_all(mm);
3742
3743         mutex_unlock(&mm_all_locks_mutex);
3744 }
3745
3746 /*
3747  * initialise the percpu counter for VM
3748  */
3749 void __init mmap_init(void)
3750 {
3751         int ret;
3752
3753         ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
3754         VM_BUG_ON(ret);
3755 }
3756
3757 /*
3758  * Initialise sysctl_user_reserve_kbytes.
3759  *
3760  * This is intended to prevent a user from starting a single memory hogging
3761  * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
3762  * mode.
3763  *
3764  * The default value is min(3% of free memory, 128MB)
3765  * 128MB is enough to recover with sshd/login, bash, and top/kill.
3766  */
3767 static int init_user_reserve(void)
3768 {
3769         unsigned long free_kbytes;
3770
3771         free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3772
3773         sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3774         return 0;
3775 }
3776 subsys_initcall(init_user_reserve);
3777
3778 /*
3779  * Initialise sysctl_admin_reserve_kbytes.
3780  *
3781  * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
3782  * to log in and kill a memory hogging process.
3783  *
3784  * Systems with more than 256MB will reserve 8MB, enough to recover
3785  * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
3786  * only reserve 3% of free pages by default.
3787  */
3788 static int init_admin_reserve(void)
3789 {
3790         unsigned long free_kbytes;
3791
3792         free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3793
3794         sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3795         return 0;
3796 }
3797 subsys_initcall(init_admin_reserve);
3798
3799 /*
3800  * Reinititalise user and admin reserves if memory is added or removed.
3801  *
3802  * The default user reserve max is 128MB, and the default max for the
3803  * admin reserve is 8MB. These are usually, but not always, enough to
3804  * enable recovery from a memory hogging process using login/sshd, a shell,
3805  * and tools like top. It may make sense to increase or even disable the
3806  * reserve depending on the existence of swap or variations in the recovery
3807  * tools. So, the admin may have changed them.
3808  *
3809  * If memory is added and the reserves have been eliminated or increased above
3810  * the default max, then we'll trust the admin.
3811  *
3812  * If memory is removed and there isn't enough free memory, then we
3813  * need to reset the reserves.
3814  *
3815  * Otherwise keep the reserve set by the admin.
3816  */
3817 static int reserve_mem_notifier(struct notifier_block *nb,
3818                              unsigned long action, void *data)
3819 {
3820         unsigned long tmp, free_kbytes;
3821
3822         switch (action) {
3823         case MEM_ONLINE:
3824                 /* Default max is 128MB. Leave alone if modified by operator. */
3825                 tmp = sysctl_user_reserve_kbytes;
3826                 if (0 < tmp && tmp < (1UL << 17))
3827                         init_user_reserve();
3828
3829                 /* Default max is 8MB.  Leave alone if modified by operator. */
3830                 tmp = sysctl_admin_reserve_kbytes;
3831                 if (0 < tmp && tmp < (1UL << 13))
3832                         init_admin_reserve();
3833
3834                 break;
3835         case MEM_OFFLINE:
3836                 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3837
3838                 if (sysctl_user_reserve_kbytes > free_kbytes) {
3839                         init_user_reserve();
3840                         pr_info("vm.user_reserve_kbytes reset to %lu\n",
3841                                 sysctl_user_reserve_kbytes);
3842                 }
3843
3844                 if (sysctl_admin_reserve_kbytes > free_kbytes) {
3845                         init_admin_reserve();
3846                         pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3847                                 sysctl_admin_reserve_kbytes);
3848                 }
3849                 break;
3850         default:
3851                 break;
3852         }
3853         return NOTIFY_OK;
3854 }
3855
3856 static int __meminit init_reserve_notifier(void)
3857 {
3858         if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI))
3859                 pr_err("Failed registering memory add/remove notifier for admin reserve\n");
3860
3861         return 0;
3862 }
3863 subsys_initcall(init_reserve_notifier);
This page took 0.252027 seconds and 4 git commands to generate.