]> Git Repo - linux.git/blob - mm/vma.c
Linux 6.14-rc3
[linux.git] / mm / vma.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /*
4  * VMA-specific functions.
5  */
6
7 #include "vma_internal.h"
8 #include "vma.h"
9
10 struct mmap_state {
11         struct mm_struct *mm;
12         struct vma_iterator *vmi;
13
14         unsigned long addr;
15         unsigned long end;
16         pgoff_t pgoff;
17         unsigned long pglen;
18         unsigned long flags;
19         struct file *file;
20
21         unsigned long charged;
22         bool retry_merge;
23
24         struct vm_area_struct *prev;
25         struct vm_area_struct *next;
26
27         /* Unmapping state. */
28         struct vma_munmap_struct vms;
29         struct ma_state mas_detach;
30         struct maple_tree mt_detach;
31 };
32
33 #define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, flags_, file_) \
34         struct mmap_state name = {                                      \
35                 .mm = mm_,                                              \
36                 .vmi = vmi_,                                            \
37                 .addr = addr_,                                          \
38                 .end = (addr_) + (len_),                                \
39                 .pgoff = pgoff_,                                        \
40                 .pglen = PHYS_PFN(len_),                                \
41                 .flags = flags_,                                        \
42                 .file = file_,                                          \
43         }
44
45 #define VMG_MMAP_STATE(name, map_, vma_)                                \
46         struct vma_merge_struct name = {                                \
47                 .mm = (map_)->mm,                                       \
48                 .vmi = (map_)->vmi,                                     \
49                 .start = (map_)->addr,                                  \
50                 .end = (map_)->end,                                     \
51                 .flags = (map_)->flags,                                 \
52                 .pgoff = (map_)->pgoff,                                 \
53                 .file = (map_)->file,                                   \
54                 .prev = (map_)->prev,                                   \
55                 .vma = vma_,                                            \
56                 .next = (vma_) ? NULL : (map_)->next,                   \
57                 .state = VMA_MERGE_START,                               \
58                 .merge_flags = VMG_FLAG_DEFAULT,                        \
59         }
60
61 static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next)
62 {
63         struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev;
64
65         if (!mpol_equal(vmg->policy, vma_policy(vma)))
66                 return false;
67         /*
68          * VM_SOFTDIRTY should not prevent from VMA merging, if we
69          * match the flags but dirty bit -- the caller should mark
70          * merged VMA as dirty. If dirty bit won't be excluded from
71          * comparison, we increase pressure on the memory system forcing
72          * the kernel to generate new VMAs when old one could be
73          * extended instead.
74          */
75         if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY)
76                 return false;
77         if (vma->vm_file != vmg->file)
78                 return false;
79         if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx))
80                 return false;
81         if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name))
82                 return false;
83         return true;
84 }
85
86 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
87                  struct anon_vma *anon_vma2, struct vm_area_struct *vma)
88 {
89         /*
90          * The list_is_singular() test is to avoid merging VMA cloned from
91          * parents. This can improve scalability caused by anon_vma lock.
92          */
93         if ((!anon_vma1 || !anon_vma2) && (!vma ||
94                 list_is_singular(&vma->anon_vma_chain)))
95                 return true;
96         return anon_vma1 == anon_vma2;
97 }
98
99 /* Are the anon_vma's belonging to each VMA compatible with one another? */
100 static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1,
101                                             struct vm_area_struct *vma2)
102 {
103         return is_mergeable_anon_vma(vma1->anon_vma, vma2->anon_vma, NULL);
104 }
105
106 /*
107  * init_multi_vma_prep() - Initializer for struct vma_prepare
108  * @vp: The vma_prepare struct
109  * @vma: The vma that will be altered once locked
110  * @next: The next vma if it is to be adjusted
111  * @remove: The first vma to be removed
112  * @remove2: The second vma to be removed
113  */
114 static void init_multi_vma_prep(struct vma_prepare *vp,
115                                 struct vm_area_struct *vma,
116                                 struct vm_area_struct *next,
117                                 struct vm_area_struct *remove,
118                                 struct vm_area_struct *remove2)
119 {
120         memset(vp, 0, sizeof(struct vma_prepare));
121         vp->vma = vma;
122         vp->anon_vma = vma->anon_vma;
123         vp->remove = remove;
124         vp->remove2 = remove2;
125         vp->adj_next = next;
126         if (!vp->anon_vma && next)
127                 vp->anon_vma = next->anon_vma;
128
129         vp->file = vma->vm_file;
130         if (vp->file)
131                 vp->mapping = vma->vm_file->f_mapping;
132
133 }
134
135 /*
136  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
137  * in front of (at a lower virtual address and file offset than) the vma.
138  *
139  * We cannot merge two vmas if they have differently assigned (non-NULL)
140  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
141  *
142  * We don't check here for the merged mmap wrapping around the end of pagecache
143  * indices (16TB on ia32) because do_mmap() does not permit mmap's which
144  * wrap, nor mmaps which cover the final page at index -1UL.
145  *
146  * We assume the vma may be removed as part of the merge.
147  */
148 static bool can_vma_merge_before(struct vma_merge_struct *vmg)
149 {
150         pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
151
152         if (is_mergeable_vma(vmg, /* merge_next = */ true) &&
153             is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) {
154                 if (vmg->next->vm_pgoff == vmg->pgoff + pglen)
155                         return true;
156         }
157
158         return false;
159 }
160
161 /*
162  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
163  * beyond (at a higher virtual address and file offset than) the vma.
164  *
165  * We cannot merge two vmas if they have differently assigned (non-NULL)
166  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
167  *
168  * We assume that vma is not removed as part of the merge.
169  */
170 static bool can_vma_merge_after(struct vma_merge_struct *vmg)
171 {
172         if (is_mergeable_vma(vmg, /* merge_next = */ false) &&
173             is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) {
174                 if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff)
175                         return true;
176         }
177         return false;
178 }
179
180 static void __vma_link_file(struct vm_area_struct *vma,
181                             struct address_space *mapping)
182 {
183         if (vma_is_shared_maywrite(vma))
184                 mapping_allow_writable(mapping);
185
186         flush_dcache_mmap_lock(mapping);
187         vma_interval_tree_insert(vma, &mapping->i_mmap);
188         flush_dcache_mmap_unlock(mapping);
189 }
190
191 /*
192  * Requires inode->i_mapping->i_mmap_rwsem
193  */
194 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
195                                       struct address_space *mapping)
196 {
197         if (vma_is_shared_maywrite(vma))
198                 mapping_unmap_writable(mapping);
199
200         flush_dcache_mmap_lock(mapping);
201         vma_interval_tree_remove(vma, &mapping->i_mmap);
202         flush_dcache_mmap_unlock(mapping);
203 }
204
205 /*
206  * vma has some anon_vma assigned, and is already inserted on that
207  * anon_vma's interval trees.
208  *
209  * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
210  * vma must be removed from the anon_vma's interval trees using
211  * anon_vma_interval_tree_pre_update_vma().
212  *
213  * After the update, the vma will be reinserted using
214  * anon_vma_interval_tree_post_update_vma().
215  *
216  * The entire update must be protected by exclusive mmap_lock and by
217  * the root anon_vma's mutex.
218  */
219 static void
220 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
221 {
222         struct anon_vma_chain *avc;
223
224         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
225                 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
226 }
227
228 static void
229 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
230 {
231         struct anon_vma_chain *avc;
232
233         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
234                 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
235 }
236
237 /*
238  * vma_prepare() - Helper function for handling locking VMAs prior to altering
239  * @vp: The initialized vma_prepare struct
240  */
241 static void vma_prepare(struct vma_prepare *vp)
242 {
243         if (vp->file) {
244                 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
245
246                 if (vp->adj_next)
247                         uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
248                                       vp->adj_next->vm_end);
249
250                 i_mmap_lock_write(vp->mapping);
251                 if (vp->insert && vp->insert->vm_file) {
252                         /*
253                          * Put into interval tree now, so instantiated pages
254                          * are visible to arm/parisc __flush_dcache_page
255                          * throughout; but we cannot insert into address
256                          * space until vma start or end is updated.
257                          */
258                         __vma_link_file(vp->insert,
259                                         vp->insert->vm_file->f_mapping);
260                 }
261         }
262
263         if (vp->anon_vma) {
264                 anon_vma_lock_write(vp->anon_vma);
265                 anon_vma_interval_tree_pre_update_vma(vp->vma);
266                 if (vp->adj_next)
267                         anon_vma_interval_tree_pre_update_vma(vp->adj_next);
268         }
269
270         if (vp->file) {
271                 flush_dcache_mmap_lock(vp->mapping);
272                 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
273                 if (vp->adj_next)
274                         vma_interval_tree_remove(vp->adj_next,
275                                                  &vp->mapping->i_mmap);
276         }
277
278 }
279
280 /*
281  * vma_complete- Helper function for handling the unlocking after altering VMAs,
282  * or for inserting a VMA.
283  *
284  * @vp: The vma_prepare struct
285  * @vmi: The vma iterator
286  * @mm: The mm_struct
287  */
288 static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
289                          struct mm_struct *mm)
290 {
291         if (vp->file) {
292                 if (vp->adj_next)
293                         vma_interval_tree_insert(vp->adj_next,
294                                                  &vp->mapping->i_mmap);
295                 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
296                 flush_dcache_mmap_unlock(vp->mapping);
297         }
298
299         if (vp->remove && vp->file) {
300                 __remove_shared_vm_struct(vp->remove, vp->mapping);
301                 if (vp->remove2)
302                         __remove_shared_vm_struct(vp->remove2, vp->mapping);
303         } else if (vp->insert) {
304                 /*
305                  * split_vma has split insert from vma, and needs
306                  * us to insert it before dropping the locks
307                  * (it may either follow vma or precede it).
308                  */
309                 vma_iter_store(vmi, vp->insert);
310                 mm->map_count++;
311         }
312
313         if (vp->anon_vma) {
314                 anon_vma_interval_tree_post_update_vma(vp->vma);
315                 if (vp->adj_next)
316                         anon_vma_interval_tree_post_update_vma(vp->adj_next);
317                 anon_vma_unlock_write(vp->anon_vma);
318         }
319
320         if (vp->file) {
321                 i_mmap_unlock_write(vp->mapping);
322                 uprobe_mmap(vp->vma);
323
324                 if (vp->adj_next)
325                         uprobe_mmap(vp->adj_next);
326         }
327
328         if (vp->remove) {
329 again:
330                 vma_mark_detached(vp->remove, true);
331                 if (vp->file) {
332                         uprobe_munmap(vp->remove, vp->remove->vm_start,
333                                       vp->remove->vm_end);
334                         fput(vp->file);
335                 }
336                 if (vp->remove->anon_vma)
337                         anon_vma_merge(vp->vma, vp->remove);
338                 mm->map_count--;
339                 mpol_put(vma_policy(vp->remove));
340                 if (!vp->remove2)
341                         WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
342                 vm_area_free(vp->remove);
343
344                 /*
345                  * In mprotect's case 6 (see comments on vma_merge),
346                  * we are removing both mid and next vmas
347                  */
348                 if (vp->remove2) {
349                         vp->remove = vp->remove2;
350                         vp->remove2 = NULL;
351                         goto again;
352                 }
353         }
354         if (vp->insert && vp->file)
355                 uprobe_mmap(vp->insert);
356 }
357
358 /*
359  * init_vma_prep() - Initializer wrapper for vma_prepare struct
360  * @vp: The vma_prepare struct
361  * @vma: The vma that will be altered once locked
362  */
363 static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
364 {
365         init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
366 }
367
368 /*
369  * Can the proposed VMA be merged with the left (previous) VMA taking into
370  * account the start position of the proposed range.
371  */
372 static bool can_vma_merge_left(struct vma_merge_struct *vmg)
373
374 {
375         return vmg->prev && vmg->prev->vm_end == vmg->start &&
376                 can_vma_merge_after(vmg);
377 }
378
379 /*
380  * Can the proposed VMA be merged with the right (next) VMA taking into
381  * account the end position of the proposed range.
382  *
383  * In addition, if we can merge with the left VMA, ensure that left and right
384  * anon_vma's are also compatible.
385  */
386 static bool can_vma_merge_right(struct vma_merge_struct *vmg,
387                                 bool can_merge_left)
388 {
389         if (!vmg->next || vmg->end != vmg->next->vm_start ||
390             !can_vma_merge_before(vmg))
391                 return false;
392
393         if (!can_merge_left)
394                 return true;
395
396         /*
397          * If we can merge with prev (left) and next (right), indicating that
398          * each VMA's anon_vma is compatible with the proposed anon_vma, this
399          * does not mean prev and next are compatible with EACH OTHER.
400          *
401          * We therefore check this in addition to mergeability to either side.
402          */
403         return are_anon_vmas_compatible(vmg->prev, vmg->next);
404 }
405
406 /*
407  * Close a vm structure and free it.
408  */
409 void remove_vma(struct vm_area_struct *vma, bool unreachable)
410 {
411         might_sleep();
412         vma_close(vma);
413         if (vma->vm_file)
414                 fput(vma->vm_file);
415         mpol_put(vma_policy(vma));
416         if (unreachable)
417                 __vm_area_free(vma);
418         else
419                 vm_area_free(vma);
420 }
421
422 /*
423  * Get rid of page table information in the indicated region.
424  *
425  * Called with the mm semaphore held.
426  */
427 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
428                 struct vm_area_struct *prev, struct vm_area_struct *next)
429 {
430         struct mm_struct *mm = vma->vm_mm;
431         struct mmu_gather tlb;
432
433         tlb_gather_mmu(&tlb, mm);
434         update_hiwater_rss(mm);
435         unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end,
436                    /* mm_wr_locked = */ true);
437         mas_set(mas, vma->vm_end);
438         free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
439                       next ? next->vm_start : USER_PGTABLES_CEILING,
440                       /* mm_wr_locked = */ true);
441         tlb_finish_mmu(&tlb);
442 }
443
444 /*
445  * __split_vma() bypasses sysctl_max_map_count checking.  We use this where it
446  * has already been checked or doesn't make sense to fail.
447  * VMA Iterator will point to the original VMA.
448  */
449 static __must_check int
450 __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
451             unsigned long addr, int new_below)
452 {
453         struct vma_prepare vp;
454         struct vm_area_struct *new;
455         int err;
456
457         WARN_ON(vma->vm_start >= addr);
458         WARN_ON(vma->vm_end <= addr);
459
460         if (vma->vm_ops && vma->vm_ops->may_split) {
461                 err = vma->vm_ops->may_split(vma, addr);
462                 if (err)
463                         return err;
464         }
465
466         new = vm_area_dup(vma);
467         if (!new)
468                 return -ENOMEM;
469
470         if (new_below) {
471                 new->vm_end = addr;
472         } else {
473                 new->vm_start = addr;
474                 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
475         }
476
477         err = -ENOMEM;
478         vma_iter_config(vmi, new->vm_start, new->vm_end);
479         if (vma_iter_prealloc(vmi, new))
480                 goto out_free_vma;
481
482         err = vma_dup_policy(vma, new);
483         if (err)
484                 goto out_free_vmi;
485
486         err = anon_vma_clone(new, vma);
487         if (err)
488                 goto out_free_mpol;
489
490         if (new->vm_file)
491                 get_file(new->vm_file);
492
493         if (new->vm_ops && new->vm_ops->open)
494                 new->vm_ops->open(new);
495
496         vma_start_write(vma);
497         vma_start_write(new);
498
499         init_vma_prep(&vp, vma);
500         vp.insert = new;
501         vma_prepare(&vp);
502         vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
503
504         if (new_below) {
505                 vma->vm_start = addr;
506                 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
507         } else {
508                 vma->vm_end = addr;
509         }
510
511         /* vma_complete stores the new vma */
512         vma_complete(&vp, vmi, vma->vm_mm);
513         validate_mm(vma->vm_mm);
514
515         /* Success. */
516         if (new_below)
517                 vma_next(vmi);
518         else
519                 vma_prev(vmi);
520
521         return 0;
522
523 out_free_mpol:
524         mpol_put(vma_policy(new));
525 out_free_vmi:
526         vma_iter_free(vmi);
527 out_free_vma:
528         vm_area_free(new);
529         return err;
530 }
531
532 /*
533  * Split a vma into two pieces at address 'addr', a new vma is allocated
534  * either for the first part or the tail.
535  */
536 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
537                      unsigned long addr, int new_below)
538 {
539         if (vma->vm_mm->map_count >= sysctl_max_map_count)
540                 return -ENOMEM;
541
542         return __split_vma(vmi, vma, addr, new_below);
543 }
544
545 /*
546  * dup_anon_vma() - Helper function to duplicate anon_vma
547  * @dst: The destination VMA
548  * @src: The source VMA
549  * @dup: Pointer to the destination VMA when successful.
550  *
551  * Returns: 0 on success.
552  */
553 static int dup_anon_vma(struct vm_area_struct *dst,
554                         struct vm_area_struct *src, struct vm_area_struct **dup)
555 {
556         /*
557          * Easily overlooked: when mprotect shifts the boundary, make sure the
558          * expanding vma has anon_vma set if the shrinking vma had, to cover any
559          * anon pages imported.
560          */
561         if (src->anon_vma && !dst->anon_vma) {
562                 int ret;
563
564                 vma_assert_write_locked(dst);
565                 dst->anon_vma = src->anon_vma;
566                 ret = anon_vma_clone(dst, src);
567                 if (ret)
568                         return ret;
569
570                 *dup = dst;
571         }
572
573         return 0;
574 }
575
576 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
577 void validate_mm(struct mm_struct *mm)
578 {
579         int bug = 0;
580         int i = 0;
581         struct vm_area_struct *vma;
582         VMA_ITERATOR(vmi, mm, 0);
583
584         mt_validate(&mm->mm_mt);
585         for_each_vma(vmi, vma) {
586 #ifdef CONFIG_DEBUG_VM_RB
587                 struct anon_vma *anon_vma = vma->anon_vma;
588                 struct anon_vma_chain *avc;
589 #endif
590                 unsigned long vmi_start, vmi_end;
591                 bool warn = 0;
592
593                 vmi_start = vma_iter_addr(&vmi);
594                 vmi_end = vma_iter_end(&vmi);
595                 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
596                         warn = 1;
597
598                 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
599                         warn = 1;
600
601                 if (warn) {
602                         pr_emerg("issue in %s\n", current->comm);
603                         dump_stack();
604                         dump_vma(vma);
605                         pr_emerg("tree range: %px start %lx end %lx\n", vma,
606                                  vmi_start, vmi_end - 1);
607                         vma_iter_dump_tree(&vmi);
608                 }
609
610 #ifdef CONFIG_DEBUG_VM_RB
611                 if (anon_vma) {
612                         anon_vma_lock_read(anon_vma);
613                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
614                                 anon_vma_interval_tree_verify(avc);
615                         anon_vma_unlock_read(anon_vma);
616                 }
617 #endif
618                 /* Check for a infinite loop */
619                 if (++i > mm->map_count + 10) {
620                         i = -1;
621                         break;
622                 }
623         }
624         if (i != mm->map_count) {
625                 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
626                 bug = 1;
627         }
628         VM_BUG_ON_MM(bug, mm);
629 }
630 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
631
632 /* Actually perform the VMA merge operation. */
633 static int commit_merge(struct vma_merge_struct *vmg,
634                         struct vm_area_struct *adjust,
635                         struct vm_area_struct *remove,
636                         struct vm_area_struct *remove2,
637                         long adj_start,
638                         bool expanded)
639 {
640         struct vma_prepare vp;
641
642         init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2);
643
644         VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
645                    vp.anon_vma != adjust->anon_vma);
646
647         if (expanded) {
648                 /* Note: vma iterator must be pointing to 'start'. */
649                 vma_iter_config(vmg->vmi, vmg->start, vmg->end);
650         } else {
651                 vma_iter_config(vmg->vmi, adjust->vm_start + adj_start,
652                                 adjust->vm_end);
653         }
654
655         if (vma_iter_prealloc(vmg->vmi, vmg->vma))
656                 return -ENOMEM;
657
658         vma_prepare(&vp);
659         vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start);
660         vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
661
662         if (expanded)
663                 vma_iter_store(vmg->vmi, vmg->vma);
664
665         if (adj_start) {
666                 adjust->vm_start += adj_start;
667                 adjust->vm_pgoff += PHYS_PFN(adj_start);
668                 if (adj_start < 0) {
669                         WARN_ON(expanded);
670                         vma_iter_store(vmg->vmi, adjust);
671                 }
672         }
673
674         vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm);
675
676         return 0;
677 }
678
679 /* We can only remove VMAs when merging if they do not have a close hook. */
680 static bool can_merge_remove_vma(struct vm_area_struct *vma)
681 {
682         return !vma->vm_ops || !vma->vm_ops->close;
683 }
684
685 /*
686  * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its
687  * attributes modified.
688  *
689  * @vmg: Describes the modifications being made to a VMA and associated
690  *       metadata.
691  *
692  * When the attributes of a range within a VMA change, then it might be possible
693  * for immediately adjacent VMAs to be merged into that VMA due to having
694  * identical properties.
695  *
696  * This function checks for the existence of any such mergeable VMAs and updates
697  * the maple tree describing the @vmg->vma->vm_mm address space to account for
698  * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge.
699  *
700  * As part of this operation, if a merge occurs, the @vmg object will have its
701  * vma, start, end, and pgoff fields modified to execute the merge. Subsequent
702  * calls to this function should reset these fields.
703  *
704  * Returns: The merged VMA if merge succeeds, or NULL otherwise.
705  *
706  * ASSUMPTIONS:
707  * - The caller must assign the VMA to be modifed to @vmg->vma.
708  * - The caller must have set @vmg->prev to the previous VMA, if there is one.
709  * - The caller must not set @vmg->next, as we determine this.
710  * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
711  * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end).
712  */
713 static __must_check struct vm_area_struct *vma_merge_existing_range(
714                 struct vma_merge_struct *vmg)
715 {
716         struct vm_area_struct *vma = vmg->vma;
717         struct vm_area_struct *prev = vmg->prev;
718         struct vm_area_struct *next, *res;
719         struct vm_area_struct *anon_dup = NULL;
720         struct vm_area_struct *adjust = NULL;
721         unsigned long start = vmg->start;
722         unsigned long end = vmg->end;
723         bool left_side = vma && start == vma->vm_start;
724         bool right_side = vma && end == vma->vm_end;
725         int err = 0;
726         long adj_start = 0;
727         bool merge_will_delete_vma, merge_will_delete_next;
728         bool merge_left, merge_right, merge_both;
729         bool expanded;
730
731         mmap_assert_write_locked(vmg->mm);
732         VM_WARN_ON_VMG(!vma, vmg); /* We are modifying a VMA, so caller must specify. */
733         VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */
734         VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg);
735         VM_WARN_ON_VMG(start >= end, vmg);
736
737         /*
738          * If vma == prev, then we are offset into a VMA. Otherwise, if we are
739          * not, we must span a portion of the VMA.
740          */
741         VM_WARN_ON_VMG(vma && ((vma != prev && vmg->start != vma->vm_start) ||
742                                vmg->end > vma->vm_end), vmg);
743         /* The vmi must be positioned within vmg->vma. */
744         VM_WARN_ON_VMG(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start &&
745                                 vma_iter_addr(vmg->vmi) < vma->vm_end), vmg);
746
747         vmg->state = VMA_MERGE_NOMERGE;
748
749         /*
750          * If a special mapping or if the range being modified is neither at the
751          * furthermost left or right side of the VMA, then we have no chance of
752          * merging and should abort.
753          */
754         if (vmg->flags & VM_SPECIAL || (!left_side && !right_side))
755                 return NULL;
756
757         if (left_side)
758                 merge_left = can_vma_merge_left(vmg);
759         else
760                 merge_left = false;
761
762         if (right_side) {
763                 next = vmg->next = vma_iter_next_range(vmg->vmi);
764                 vma_iter_prev_range(vmg->vmi);
765
766                 merge_right = can_vma_merge_right(vmg, merge_left);
767         } else {
768                 merge_right = false;
769                 next = NULL;
770         }
771
772         if (merge_left)         /* If merging prev, position iterator there. */
773                 vma_prev(vmg->vmi);
774         else if (!merge_right)  /* If we have nothing to merge, abort. */
775                 return NULL;
776
777         merge_both = merge_left && merge_right;
778         /* If we span the entire VMA, a merge implies it will be deleted. */
779         merge_will_delete_vma = left_side && right_side;
780
781         /*
782          * If we need to remove vma in its entirety but are unable to do so,
783          * we have no sensible recourse but to abort the merge.
784          */
785         if (merge_will_delete_vma && !can_merge_remove_vma(vma))
786                 return NULL;
787
788         /*
789          * If we merge both VMAs, then next is also deleted. This implies
790          * merge_will_delete_vma also.
791          */
792         merge_will_delete_next = merge_both;
793
794         /*
795          * If we cannot delete next, then we can reduce the operation to merging
796          * prev and vma (thereby deleting vma).
797          */
798         if (merge_will_delete_next && !can_merge_remove_vma(next)) {
799                 merge_will_delete_next = false;
800                 merge_right = false;
801                 merge_both = false;
802         }
803
804         /* No matter what happens, we will be adjusting vma. */
805         vma_start_write(vma);
806
807         if (merge_left)
808                 vma_start_write(prev);
809
810         if (merge_right)
811                 vma_start_write(next);
812
813         if (merge_both) {
814                 /*
815                  *         |<----->|
816                  * |-------*********-------|
817                  *   prev     vma     next
818                  *  extend   delete  delete
819                  */
820
821                 vmg->vma = prev;
822                 vmg->start = prev->vm_start;
823                 vmg->end = next->vm_end;
824                 vmg->pgoff = prev->vm_pgoff;
825
826                 /*
827                  * We already ensured anon_vma compatibility above, so now it's
828                  * simply a case of, if prev has no anon_vma object, which of
829                  * next or vma contains the anon_vma we must duplicate.
830                  */
831                 err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup);
832         } else if (merge_left) {
833                 /*
834                  *         |<----->| OR
835                  *         |<--------->|
836                  * |-------*************
837                  *   prev       vma
838                  *  extend shrink/delete
839                  */
840
841                 vmg->vma = prev;
842                 vmg->start = prev->vm_start;
843                 vmg->pgoff = prev->vm_pgoff;
844
845                 if (!merge_will_delete_vma) {
846                         adjust = vma;
847                         adj_start = vmg->end - vma->vm_start;
848                 }
849
850                 err = dup_anon_vma(prev, vma, &anon_dup);
851         } else { /* merge_right */
852                 /*
853                  *     |<----->| OR
854                  * |<--------->|
855                  * *************-------|
856                  *      vma       next
857                  * shrink/delete extend
858                  */
859
860                 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
861
862                 VM_WARN_ON_VMG(!merge_right, vmg);
863                 /* If we are offset into a VMA, then prev must be vma. */
864                 VM_WARN_ON_VMG(vmg->start > vma->vm_start && prev && vma != prev, vmg);
865
866                 if (merge_will_delete_vma) {
867                         vmg->vma = next;
868                         vmg->end = next->vm_end;
869                         vmg->pgoff = next->vm_pgoff - pglen;
870                 } else {
871                         /*
872                          * We shrink vma and expand next.
873                          *
874                          * IMPORTANT: This is the ONLY case where the final
875                          * merged VMA is NOT vmg->vma, but rather vmg->next.
876                          */
877
878                         vmg->start = vma->vm_start;
879                         vmg->end = start;
880                         vmg->pgoff = vma->vm_pgoff;
881
882                         adjust = next;
883                         adj_start = -(vma->vm_end - start);
884                 }
885
886                 err = dup_anon_vma(next, vma, &anon_dup);
887         }
888
889         if (err)
890                 goto abort;
891
892         /*
893          * In nearly all cases, we expand vmg->vma. There is one exception -
894          * merge_right where we partially span the VMA. In this case we shrink
895          * the end of vmg->vma and adjust the start of vmg->next accordingly.
896          */
897         expanded = !merge_right || merge_will_delete_vma;
898
899         if (commit_merge(vmg, adjust,
900                          merge_will_delete_vma ? vma : NULL,
901                          merge_will_delete_next ? next : NULL,
902                          adj_start, expanded)) {
903                 if (anon_dup)
904                         unlink_anon_vmas(anon_dup);
905
906                 vmg->state = VMA_MERGE_ERROR_NOMEM;
907                 return NULL;
908         }
909
910         res = merge_left ? prev : next;
911         khugepaged_enter_vma(res, vmg->flags);
912
913         vmg->state = VMA_MERGE_SUCCESS;
914         return res;
915
916 abort:
917         vma_iter_set(vmg->vmi, start);
918         vma_iter_load(vmg->vmi);
919         vmg->state = VMA_MERGE_ERROR_NOMEM;
920         return NULL;
921 }
922
923 /*
924  * vma_merge_new_range - Attempt to merge a new VMA into address space
925  *
926  * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
927  *       (exclusive), which we try to merge with any adjacent VMAs if possible.
928  *
929  * We are about to add a VMA to the address space starting at @vmg->start and
930  * ending at @vmg->end. There are three different possible scenarios:
931  *
932  * 1. There is a VMA with identical properties immediately adjacent to the
933  *    proposed new VMA [@vmg->start, @vmg->end) either before or after it -
934  *    EXPAND that VMA:
935  *
936  * Proposed:       |-----|  or  |-----|
937  * Existing:  |----|                  |----|
938  *
939  * 2. There are VMAs with identical properties immediately adjacent to the
940  *    proposed new VMA [@vmg->start, @vmg->end) both before AND after it -
941  *    EXPAND the former and REMOVE the latter:
942  *
943  * Proposed:       |-----|
944  * Existing:  |----|     |----|
945  *
946  * 3. There are no VMAs immediately adjacent to the proposed new VMA or those
947  *    VMAs do not have identical attributes - NO MERGE POSSIBLE.
948  *
949  * In instances where we can merge, this function returns the expanded VMA which
950  * will have its range adjusted accordingly and the underlying maple tree also
951  * adjusted.
952  *
953  * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
954  *          to the VMA we expanded.
955  *
956  * This function adjusts @vmg to provide @vmg->next if not already specified,
957  * and adjusts [@vmg->start, @vmg->end) to span the expanded range.
958  *
959  * ASSUMPTIONS:
960  * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
961  * - The caller must have determined that [@vmg->start, @vmg->end) is empty,
962      other than VMAs that will be unmapped should the operation succeed.
963  * - The caller must have specified the previous vma in @vmg->prev.
964  * - The caller must have specified the next vma in @vmg->next.
965  * - The caller must have positioned the vmi at or before the gap.
966  */
967 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
968 {
969         struct vm_area_struct *prev = vmg->prev;
970         struct vm_area_struct *next = vmg->next;
971         unsigned long end = vmg->end;
972         bool can_merge_left, can_merge_right;
973         bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND;
974
975         mmap_assert_write_locked(vmg->mm);
976         VM_WARN_ON_VMG(vmg->vma, vmg);
977         /* vmi must point at or before the gap. */
978         VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg);
979
980         vmg->state = VMA_MERGE_NOMERGE;
981
982         /* Special VMAs are unmergeable, also if no prev/next. */
983         if ((vmg->flags & VM_SPECIAL) || (!prev && !next))
984                 return NULL;
985
986         can_merge_left = can_vma_merge_left(vmg);
987         can_merge_right = !just_expand && can_vma_merge_right(vmg, can_merge_left);
988
989         /* If we can merge with the next VMA, adjust vmg accordingly. */
990         if (can_merge_right) {
991                 vmg->end = next->vm_end;
992                 vmg->vma = next;
993         }
994
995         /* If we can merge with the previous VMA, adjust vmg accordingly. */
996         if (can_merge_left) {
997                 vmg->start = prev->vm_start;
998                 vmg->vma = prev;
999                 vmg->pgoff = prev->vm_pgoff;
1000
1001                 /*
1002                  * If this merge would result in removal of the next VMA but we
1003                  * are not permitted to do so, reduce the operation to merging
1004                  * prev and vma.
1005                  */
1006                 if (can_merge_right && !can_merge_remove_vma(next))
1007                         vmg->end = end;
1008
1009                 /* In expand-only case we are already positioned at prev. */
1010                 if (!just_expand) {
1011                         /* Equivalent to going to the previous range. */
1012                         vma_prev(vmg->vmi);
1013                 }
1014         }
1015
1016         /*
1017          * Now try to expand adjacent VMA(s). This takes care of removing the
1018          * following VMA if we have VMAs on both sides.
1019          */
1020         if (vmg->vma && !vma_expand(vmg)) {
1021                 khugepaged_enter_vma(vmg->vma, vmg->flags);
1022                 vmg->state = VMA_MERGE_SUCCESS;
1023                 return vmg->vma;
1024         }
1025
1026         return NULL;
1027 }
1028
1029 /*
1030  * vma_expand - Expand an existing VMA
1031  *
1032  * @vmg: Describes a VMA expansion operation.
1033  *
1034  * Expand @vma to vmg->start and vmg->end.  Can expand off the start and end.
1035  * Will expand over vmg->next if it's different from vmg->vma and vmg->end ==
1036  * vmg->next->vm_end.  Checking if the vmg->vma can expand and merge with
1037  * vmg->next needs to be handled by the caller.
1038  *
1039  * Returns: 0 on success.
1040  *
1041  * ASSUMPTIONS:
1042  * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock.
1043  * - The caller must have set @vmg->vma and @vmg->next.
1044  */
1045 int vma_expand(struct vma_merge_struct *vmg)
1046 {
1047         struct vm_area_struct *anon_dup = NULL;
1048         bool remove_next = false;
1049         struct vm_area_struct *vma = vmg->vma;
1050         struct vm_area_struct *next = vmg->next;
1051
1052         mmap_assert_write_locked(vmg->mm);
1053
1054         vma_start_write(vma);
1055         if (next && (vma != next) && (vmg->end == next->vm_end)) {
1056                 int ret;
1057
1058                 remove_next = true;
1059                 /* This should already have been checked by this point. */
1060                 VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg);
1061                 vma_start_write(next);
1062                 ret = dup_anon_vma(vma, next, &anon_dup);
1063                 if (ret)
1064                         return ret;
1065         }
1066
1067         /* Not merging but overwriting any part of next is not handled. */
1068         VM_WARN_ON_VMG(next && !remove_next &&
1069                        next != vma && vmg->end > next->vm_start, vmg);
1070         /* Only handles expanding */
1071         VM_WARN_ON_VMG(vma->vm_start < vmg->start || vma->vm_end > vmg->end, vmg);
1072
1073         if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true))
1074                 goto nomem;
1075
1076         return 0;
1077
1078 nomem:
1079         vmg->state = VMA_MERGE_ERROR_NOMEM;
1080         if (anon_dup)
1081                 unlink_anon_vmas(anon_dup);
1082         return -ENOMEM;
1083 }
1084
1085 /*
1086  * vma_shrink() - Reduce an existing VMAs memory area
1087  * @vmi: The vma iterator
1088  * @vma: The VMA to modify
1089  * @start: The new start
1090  * @end: The new end
1091  *
1092  * Returns: 0 on success, -ENOMEM otherwise
1093  */
1094 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
1095                unsigned long start, unsigned long end, pgoff_t pgoff)
1096 {
1097         struct vma_prepare vp;
1098
1099         WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
1100
1101         if (vma->vm_start < start)
1102                 vma_iter_config(vmi, vma->vm_start, start);
1103         else
1104                 vma_iter_config(vmi, end, vma->vm_end);
1105
1106         if (vma_iter_prealloc(vmi, NULL))
1107                 return -ENOMEM;
1108
1109         vma_start_write(vma);
1110
1111         init_vma_prep(&vp, vma);
1112         vma_prepare(&vp);
1113         vma_adjust_trans_huge(vma, start, end, 0);
1114
1115         vma_iter_clear(vmi);
1116         vma_set_range(vma, start, end, pgoff);
1117         vma_complete(&vp, vmi, vma->vm_mm);
1118         validate_mm(vma->vm_mm);
1119         return 0;
1120 }
1121
1122 static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
1123                     struct ma_state *mas_detach, bool mm_wr_locked)
1124 {
1125         struct mmu_gather tlb;
1126
1127         if (!vms->clear_ptes) /* Nothing to do */
1128                 return;
1129
1130         /*
1131          * We can free page tables without write-locking mmap_lock because VMAs
1132          * were isolated before we downgraded mmap_lock.
1133          */
1134         mas_set(mas_detach, 1);
1135         tlb_gather_mmu(&tlb, vms->vma->vm_mm);
1136         update_hiwater_rss(vms->vma->vm_mm);
1137         unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
1138                    vms->vma_count, mm_wr_locked);
1139
1140         mas_set(mas_detach, 1);
1141         /* start and end may be different if there is no prev or next vma. */
1142         free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start,
1143                       vms->unmap_end, mm_wr_locked);
1144         tlb_finish_mmu(&tlb);
1145         vms->clear_ptes = false;
1146 }
1147
1148 static void vms_clean_up_area(struct vma_munmap_struct *vms,
1149                 struct ma_state *mas_detach)
1150 {
1151         struct vm_area_struct *vma;
1152
1153         if (!vms->nr_pages)
1154                 return;
1155
1156         vms_clear_ptes(vms, mas_detach, true);
1157         mas_set(mas_detach, 0);
1158         mas_for_each(mas_detach, vma, ULONG_MAX)
1159                 vma_close(vma);
1160 }
1161
1162 /*
1163  * vms_complete_munmap_vmas() - Finish the munmap() operation
1164  * @vms: The vma munmap struct
1165  * @mas_detach: The maple state of the detached vmas
1166  *
1167  * This updates the mm_struct, unmaps the region, frees the resources
1168  * used for the munmap() and may downgrade the lock - if requested.  Everything
1169  * needed to be done once the vma maple tree is updated.
1170  */
1171 static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
1172                 struct ma_state *mas_detach)
1173 {
1174         struct vm_area_struct *vma;
1175         struct mm_struct *mm;
1176
1177         mm = current->mm;
1178         mm->map_count -= vms->vma_count;
1179         mm->locked_vm -= vms->locked_vm;
1180         if (vms->unlock)
1181                 mmap_write_downgrade(mm);
1182
1183         if (!vms->nr_pages)
1184                 return;
1185
1186         vms_clear_ptes(vms, mas_detach, !vms->unlock);
1187         /* Update high watermark before we lower total_vm */
1188         update_hiwater_vm(mm);
1189         /* Stat accounting */
1190         WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
1191         /* Paranoid bookkeeping */
1192         VM_WARN_ON(vms->exec_vm > mm->exec_vm);
1193         VM_WARN_ON(vms->stack_vm > mm->stack_vm);
1194         VM_WARN_ON(vms->data_vm > mm->data_vm);
1195         mm->exec_vm -= vms->exec_vm;
1196         mm->stack_vm -= vms->stack_vm;
1197         mm->data_vm -= vms->data_vm;
1198
1199         /* Remove and clean up vmas */
1200         mas_set(mas_detach, 0);
1201         mas_for_each(mas_detach, vma, ULONG_MAX)
1202                 remove_vma(vma, /* unreachable = */ false);
1203
1204         vm_unacct_memory(vms->nr_accounted);
1205         validate_mm(mm);
1206         if (vms->unlock)
1207                 mmap_read_unlock(mm);
1208
1209         __mt_destroy(mas_detach->tree);
1210 }
1211
1212 /*
1213  * reattach_vmas() - Undo any munmap work and free resources
1214  * @mas_detach: The maple state with the detached maple tree
1215  *
1216  * Reattach any detached vmas and free up the maple tree used to track the vmas.
1217  */
1218 static void reattach_vmas(struct ma_state *mas_detach)
1219 {
1220         struct vm_area_struct *vma;
1221
1222         mas_set(mas_detach, 0);
1223         mas_for_each(mas_detach, vma, ULONG_MAX)
1224                 vma_mark_detached(vma, false);
1225
1226         __mt_destroy(mas_detach->tree);
1227 }
1228
1229 /*
1230  * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
1231  * for removal at a later date.  Handles splitting first and last if necessary
1232  * and marking the vmas as isolated.
1233  *
1234  * @vms: The vma munmap struct
1235  * @mas_detach: The maple state tracking the detached tree
1236  *
1237  * Return: 0 on success, error otherwise
1238  */
1239 static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
1240                 struct ma_state *mas_detach)
1241 {
1242         struct vm_area_struct *next = NULL;
1243         int error;
1244
1245         /*
1246          * If we need to split any vma, do it now to save pain later.
1247          * Does it split the first one?
1248          */
1249         if (vms->start > vms->vma->vm_start) {
1250
1251                 /*
1252                  * Make sure that map_count on return from munmap() will
1253                  * not exceed its limit; but let map_count go just above
1254                  * its limit temporarily, to help free resources as expected.
1255                  */
1256                 if (vms->end < vms->vma->vm_end &&
1257                     vms->vma->vm_mm->map_count >= sysctl_max_map_count) {
1258                         error = -ENOMEM;
1259                         goto map_count_exceeded;
1260                 }
1261
1262                 /* Don't bother splitting the VMA if we can't unmap it anyway */
1263                 if (!can_modify_vma(vms->vma)) {
1264                         error = -EPERM;
1265                         goto start_split_failed;
1266                 }
1267
1268                 error = __split_vma(vms->vmi, vms->vma, vms->start, 1);
1269                 if (error)
1270                         goto start_split_failed;
1271         }
1272         vms->prev = vma_prev(vms->vmi);
1273         if (vms->prev)
1274                 vms->unmap_start = vms->prev->vm_end;
1275
1276         /*
1277          * Detach a range of VMAs from the mm. Using next as a temp variable as
1278          * it is always overwritten.
1279          */
1280         for_each_vma_range(*(vms->vmi), next, vms->end) {
1281                 long nrpages;
1282
1283                 if (!can_modify_vma(next)) {
1284                         error = -EPERM;
1285                         goto modify_vma_failed;
1286                 }
1287                 /* Does it split the end? */
1288                 if (next->vm_end > vms->end) {
1289                         error = __split_vma(vms->vmi, next, vms->end, 0);
1290                         if (error)
1291                                 goto end_split_failed;
1292                 }
1293                 vma_start_write(next);
1294                 mas_set(mas_detach, vms->vma_count++);
1295                 error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
1296                 if (error)
1297                         goto munmap_gather_failed;
1298
1299                 vma_mark_detached(next, true);
1300                 nrpages = vma_pages(next);
1301
1302                 vms->nr_pages += nrpages;
1303                 if (next->vm_flags & VM_LOCKED)
1304                         vms->locked_vm += nrpages;
1305
1306                 if (next->vm_flags & VM_ACCOUNT)
1307                         vms->nr_accounted += nrpages;
1308
1309                 if (is_exec_mapping(next->vm_flags))
1310                         vms->exec_vm += nrpages;
1311                 else if (is_stack_mapping(next->vm_flags))
1312                         vms->stack_vm += nrpages;
1313                 else if (is_data_mapping(next->vm_flags))
1314                         vms->data_vm += nrpages;
1315
1316                 if (vms->uf) {
1317                         /*
1318                          * If userfaultfd_unmap_prep returns an error the vmas
1319                          * will remain split, but userland will get a
1320                          * highly unexpected error anyway. This is no
1321                          * different than the case where the first of the two
1322                          * __split_vma fails, but we don't undo the first
1323                          * split, despite we could. This is unlikely enough
1324                          * failure that it's not worth optimizing it for.
1325                          */
1326                         error = userfaultfd_unmap_prep(next, vms->start,
1327                                                        vms->end, vms->uf);
1328                         if (error)
1329                                 goto userfaultfd_error;
1330                 }
1331 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
1332                 BUG_ON(next->vm_start < vms->start);
1333                 BUG_ON(next->vm_start > vms->end);
1334 #endif
1335         }
1336
1337         vms->next = vma_next(vms->vmi);
1338         if (vms->next)
1339                 vms->unmap_end = vms->next->vm_start;
1340
1341 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1342         /* Make sure no VMAs are about to be lost. */
1343         {
1344                 MA_STATE(test, mas_detach->tree, 0, 0);
1345                 struct vm_area_struct *vma_mas, *vma_test;
1346                 int test_count = 0;
1347
1348                 vma_iter_set(vms->vmi, vms->start);
1349                 rcu_read_lock();
1350                 vma_test = mas_find(&test, vms->vma_count - 1);
1351                 for_each_vma_range(*(vms->vmi), vma_mas, vms->end) {
1352                         BUG_ON(vma_mas != vma_test);
1353                         test_count++;
1354                         vma_test = mas_next(&test, vms->vma_count - 1);
1355                 }
1356                 rcu_read_unlock();
1357                 BUG_ON(vms->vma_count != test_count);
1358         }
1359 #endif
1360
1361         while (vma_iter_addr(vms->vmi) > vms->start)
1362                 vma_iter_prev_range(vms->vmi);
1363
1364         vms->clear_ptes = true;
1365         return 0;
1366
1367 userfaultfd_error:
1368 munmap_gather_failed:
1369 end_split_failed:
1370 modify_vma_failed:
1371         reattach_vmas(mas_detach);
1372 start_split_failed:
1373 map_count_exceeded:
1374         return error;
1375 }
1376
1377 /*
1378  * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
1379  * @vms: The vma munmap struct
1380  * @vmi: The vma iterator
1381  * @vma: The first vm_area_struct to munmap
1382  * @start: The aligned start address to munmap
1383  * @end: The aligned end address to munmap
1384  * @uf: The userfaultfd list_head
1385  * @unlock: Unlock after the operation.  Only unlocked on success
1386  */
1387 static void init_vma_munmap(struct vma_munmap_struct *vms,
1388                 struct vma_iterator *vmi, struct vm_area_struct *vma,
1389                 unsigned long start, unsigned long end, struct list_head *uf,
1390                 bool unlock)
1391 {
1392         vms->vmi = vmi;
1393         vms->vma = vma;
1394         if (vma) {
1395                 vms->start = start;
1396                 vms->end = end;
1397         } else {
1398                 vms->start = vms->end = 0;
1399         }
1400         vms->unlock = unlock;
1401         vms->uf = uf;
1402         vms->vma_count = 0;
1403         vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
1404         vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
1405         vms->unmap_start = FIRST_USER_ADDRESS;
1406         vms->unmap_end = USER_PGTABLES_CEILING;
1407         vms->clear_ptes = false;
1408 }
1409
1410 /*
1411  * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
1412  * @vmi: The vma iterator
1413  * @vma: The starting vm_area_struct
1414  * @mm: The mm_struct
1415  * @start: The aligned start address to munmap.
1416  * @end: The aligned end address to munmap.
1417  * @uf: The userfaultfd list_head
1418  * @unlock: Set to true to drop the mmap_lock.  unlocking only happens on
1419  * success.
1420  *
1421  * Return: 0 on success and drops the lock if so directed, error and leaves the
1422  * lock held otherwise.
1423  */
1424 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
1425                 struct mm_struct *mm, unsigned long start, unsigned long end,
1426                 struct list_head *uf, bool unlock)
1427 {
1428         struct maple_tree mt_detach;
1429         MA_STATE(mas_detach, &mt_detach, 0, 0);
1430         mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1431         mt_on_stack(mt_detach);
1432         struct vma_munmap_struct vms;
1433         int error;
1434
1435         init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock);
1436         error = vms_gather_munmap_vmas(&vms, &mas_detach);
1437         if (error)
1438                 goto gather_failed;
1439
1440         error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
1441         if (error)
1442                 goto clear_tree_failed;
1443
1444         /* Point of no return */
1445         vms_complete_munmap_vmas(&vms, &mas_detach);
1446         return 0;
1447
1448 clear_tree_failed:
1449         reattach_vmas(&mas_detach);
1450 gather_failed:
1451         validate_mm(mm);
1452         return error;
1453 }
1454
1455 /*
1456  * do_vmi_munmap() - munmap a given range.
1457  * @vmi: The vma iterator
1458  * @mm: The mm_struct
1459  * @start: The start address to munmap
1460  * @len: The length of the range to munmap
1461  * @uf: The userfaultfd list_head
1462  * @unlock: set to true if the user wants to drop the mmap_lock on success
1463  *
1464  * This function takes a @mas that is either pointing to the previous VMA or set
1465  * to MA_START and sets it up to remove the mapping(s).  The @len will be
1466  * aligned.
1467  *
1468  * Return: 0 on success and drops the lock if so directed, error and leaves the
1469  * lock held otherwise.
1470  */
1471 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
1472                   unsigned long start, size_t len, struct list_head *uf,
1473                   bool unlock)
1474 {
1475         unsigned long end;
1476         struct vm_area_struct *vma;
1477
1478         if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
1479                 return -EINVAL;
1480
1481         end = start + PAGE_ALIGN(len);
1482         if (end == start)
1483                 return -EINVAL;
1484
1485         /* Find the first overlapping VMA */
1486         vma = vma_find(vmi, end);
1487         if (!vma) {
1488                 if (unlock)
1489                         mmap_write_unlock(mm);
1490                 return 0;
1491         }
1492
1493         return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
1494 }
1495
1496 /*
1497  * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1498  * context and anonymous VMA name within the range [start, end).
1499  *
1500  * As a result, we might be able to merge the newly modified VMA range with an
1501  * adjacent VMA with identical properties.
1502  *
1503  * If no merge is possible and the range does not span the entirety of the VMA,
1504  * we then need to split the VMA to accommodate the change.
1505  *
1506  * The function returns either the merged VMA, the original VMA if a split was
1507  * required instead, or an error if the split failed.
1508  */
1509 static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
1510 {
1511         struct vm_area_struct *vma = vmg->vma;
1512         struct vm_area_struct *merged;
1513
1514         /* First, try to merge. */
1515         merged = vma_merge_existing_range(vmg);
1516         if (merged)
1517                 return merged;
1518
1519         /* Split any preceding portion of the VMA. */
1520         if (vma->vm_start < vmg->start) {
1521                 int err = split_vma(vmg->vmi, vma, vmg->start, 1);
1522
1523                 if (err)
1524                         return ERR_PTR(err);
1525         }
1526
1527         /* Split any trailing portion of the VMA. */
1528         if (vma->vm_end > vmg->end) {
1529                 int err = split_vma(vmg->vmi, vma, vmg->end, 0);
1530
1531                 if (err)
1532                         return ERR_PTR(err);
1533         }
1534
1535         return vma;
1536 }
1537
1538 struct vm_area_struct *vma_modify_flags(
1539         struct vma_iterator *vmi, struct vm_area_struct *prev,
1540         struct vm_area_struct *vma, unsigned long start, unsigned long end,
1541         unsigned long new_flags)
1542 {
1543         VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1544
1545         vmg.flags = new_flags;
1546
1547         return vma_modify(&vmg);
1548 }
1549
1550 struct vm_area_struct
1551 *vma_modify_flags_name(struct vma_iterator *vmi,
1552                        struct vm_area_struct *prev,
1553                        struct vm_area_struct *vma,
1554                        unsigned long start,
1555                        unsigned long end,
1556                        unsigned long new_flags,
1557                        struct anon_vma_name *new_name)
1558 {
1559         VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1560
1561         vmg.flags = new_flags;
1562         vmg.anon_name = new_name;
1563
1564         return vma_modify(&vmg);
1565 }
1566
1567 struct vm_area_struct
1568 *vma_modify_policy(struct vma_iterator *vmi,
1569                    struct vm_area_struct *prev,
1570                    struct vm_area_struct *vma,
1571                    unsigned long start, unsigned long end,
1572                    struct mempolicy *new_pol)
1573 {
1574         VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1575
1576         vmg.policy = new_pol;
1577
1578         return vma_modify(&vmg);
1579 }
1580
1581 struct vm_area_struct
1582 *vma_modify_flags_uffd(struct vma_iterator *vmi,
1583                        struct vm_area_struct *prev,
1584                        struct vm_area_struct *vma,
1585                        unsigned long start, unsigned long end,
1586                        unsigned long new_flags,
1587                        struct vm_userfaultfd_ctx new_ctx)
1588 {
1589         VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1590
1591         vmg.flags = new_flags;
1592         vmg.uffd_ctx = new_ctx;
1593
1594         return vma_modify(&vmg);
1595 }
1596
1597 /*
1598  * Expand vma by delta bytes, potentially merging with an immediately adjacent
1599  * VMA with identical properties.
1600  */
1601 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1602                                         struct vm_area_struct *vma,
1603                                         unsigned long delta)
1604 {
1605         VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta);
1606
1607         vmg.next = vma_iter_next_rewind(vmi, NULL);
1608         vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */
1609
1610         return vma_merge_new_range(&vmg);
1611 }
1612
1613 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
1614 {
1615         vb->count = 0;
1616 }
1617
1618 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
1619 {
1620         struct address_space *mapping;
1621         int i;
1622
1623         mapping = vb->vmas[0]->vm_file->f_mapping;
1624         i_mmap_lock_write(mapping);
1625         for (i = 0; i < vb->count; i++) {
1626                 VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
1627                 __remove_shared_vm_struct(vb->vmas[i], mapping);
1628         }
1629         i_mmap_unlock_write(mapping);
1630
1631         unlink_file_vma_batch_init(vb);
1632 }
1633
1634 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
1635                                struct vm_area_struct *vma)
1636 {
1637         if (vma->vm_file == NULL)
1638                 return;
1639
1640         if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
1641             vb->count == ARRAY_SIZE(vb->vmas))
1642                 unlink_file_vma_batch_process(vb);
1643
1644         vb->vmas[vb->count] = vma;
1645         vb->count++;
1646 }
1647
1648 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
1649 {
1650         if (vb->count > 0)
1651                 unlink_file_vma_batch_process(vb);
1652 }
1653
1654 /*
1655  * Unlink a file-based vm structure from its interval tree, to hide
1656  * vma from rmap and vmtruncate before freeing its page tables.
1657  */
1658 void unlink_file_vma(struct vm_area_struct *vma)
1659 {
1660         struct file *file = vma->vm_file;
1661
1662         if (file) {
1663                 struct address_space *mapping = file->f_mapping;
1664
1665                 i_mmap_lock_write(mapping);
1666                 __remove_shared_vm_struct(vma, mapping);
1667                 i_mmap_unlock_write(mapping);
1668         }
1669 }
1670
1671 void vma_link_file(struct vm_area_struct *vma)
1672 {
1673         struct file *file = vma->vm_file;
1674         struct address_space *mapping;
1675
1676         if (file) {
1677                 mapping = file->f_mapping;
1678                 i_mmap_lock_write(mapping);
1679                 __vma_link_file(vma, mapping);
1680                 i_mmap_unlock_write(mapping);
1681         }
1682 }
1683
1684 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
1685 {
1686         VMA_ITERATOR(vmi, mm, 0);
1687
1688         vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1689         if (vma_iter_prealloc(&vmi, vma))
1690                 return -ENOMEM;
1691
1692         vma_start_write(vma);
1693         vma_iter_store(&vmi, vma);
1694         vma_link_file(vma);
1695         mm->map_count++;
1696         validate_mm(mm);
1697         return 0;
1698 }
1699
1700 /*
1701  * Copy the vma structure to a new location in the same mm,
1702  * prior to moving page table entries, to effect an mremap move.
1703  */
1704 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
1705         unsigned long addr, unsigned long len, pgoff_t pgoff,
1706         bool *need_rmap_locks)
1707 {
1708         struct vm_area_struct *vma = *vmap;
1709         unsigned long vma_start = vma->vm_start;
1710         struct mm_struct *mm = vma->vm_mm;
1711         struct vm_area_struct *new_vma;
1712         bool faulted_in_anon_vma = true;
1713         VMA_ITERATOR(vmi, mm, addr);
1714         VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len);
1715
1716         /*
1717          * If anonymous vma has not yet been faulted, update new pgoff
1718          * to match new location, to increase its chance of merging.
1719          */
1720         if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
1721                 pgoff = addr >> PAGE_SHIFT;
1722                 faulted_in_anon_vma = false;
1723         }
1724
1725         new_vma = find_vma_prev(mm, addr, &vmg.prev);
1726         if (new_vma && new_vma->vm_start < addr + len)
1727                 return NULL;    /* should never get here */
1728
1729         vmg.vma = NULL; /* New VMA range. */
1730         vmg.pgoff = pgoff;
1731         vmg.next = vma_iter_next_rewind(&vmi, NULL);
1732         new_vma = vma_merge_new_range(&vmg);
1733
1734         if (new_vma) {
1735                 /*
1736                  * Source vma may have been merged into new_vma
1737                  */
1738                 if (unlikely(vma_start >= new_vma->vm_start &&
1739                              vma_start < new_vma->vm_end)) {
1740                         /*
1741                          * The only way we can get a vma_merge with
1742                          * self during an mremap is if the vma hasn't
1743                          * been faulted in yet and we were allowed to
1744                          * reset the dst vma->vm_pgoff to the
1745                          * destination address of the mremap to allow
1746                          * the merge to happen. mremap must change the
1747                          * vm_pgoff linearity between src and dst vmas
1748                          * (in turn preventing a vma_merge) to be
1749                          * safe. It is only safe to keep the vm_pgoff
1750                          * linear if there are no pages mapped yet.
1751                          */
1752                         VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
1753                         *vmap = vma = new_vma;
1754                 }
1755                 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
1756         } else {
1757                 new_vma = vm_area_dup(vma);
1758                 if (!new_vma)
1759                         goto out;
1760                 vma_set_range(new_vma, addr, addr + len, pgoff);
1761                 if (vma_dup_policy(vma, new_vma))
1762                         goto out_free_vma;
1763                 if (anon_vma_clone(new_vma, vma))
1764                         goto out_free_mempol;
1765                 if (new_vma->vm_file)
1766                         get_file(new_vma->vm_file);
1767                 if (new_vma->vm_ops && new_vma->vm_ops->open)
1768                         new_vma->vm_ops->open(new_vma);
1769                 if (vma_link(mm, new_vma))
1770                         goto out_vma_link;
1771                 *need_rmap_locks = false;
1772         }
1773         return new_vma;
1774
1775 out_vma_link:
1776         vma_close(new_vma);
1777
1778         if (new_vma->vm_file)
1779                 fput(new_vma->vm_file);
1780
1781         unlink_anon_vmas(new_vma);
1782 out_free_mempol:
1783         mpol_put(vma_policy(new_vma));
1784 out_free_vma:
1785         vm_area_free(new_vma);
1786 out:
1787         return NULL;
1788 }
1789
1790 /*
1791  * Rough compatibility check to quickly see if it's even worth looking
1792  * at sharing an anon_vma.
1793  *
1794  * They need to have the same vm_file, and the flags can only differ
1795  * in things that mprotect may change.
1796  *
1797  * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1798  * we can merge the two vma's. For example, we refuse to merge a vma if
1799  * there is a vm_ops->close() function, because that indicates that the
1800  * driver is doing some kind of reference counting. But that doesn't
1801  * really matter for the anon_vma sharing case.
1802  */
1803 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1804 {
1805         return a->vm_end == b->vm_start &&
1806                 mpol_equal(vma_policy(a), vma_policy(b)) &&
1807                 a->vm_file == b->vm_file &&
1808                 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1809                 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1810 }
1811
1812 /*
1813  * Do some basic sanity checking to see if we can re-use the anon_vma
1814  * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1815  * the same as 'old', the other will be the new one that is trying
1816  * to share the anon_vma.
1817  *
1818  * NOTE! This runs with mmap_lock held for reading, so it is possible that
1819  * the anon_vma of 'old' is concurrently in the process of being set up
1820  * by another page fault trying to merge _that_. But that's ok: if it
1821  * is being set up, that automatically means that it will be a singleton
1822  * acceptable for merging, so we can do all of this optimistically. But
1823  * we do that READ_ONCE() to make sure that we never re-load the pointer.
1824  *
1825  * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1826  * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1827  * is to return an anon_vma that is "complex" due to having gone through
1828  * a fork).
1829  *
1830  * We also make sure that the two vma's are compatible (adjacent,
1831  * and with the same memory policies). That's all stable, even with just
1832  * a read lock on the mmap_lock.
1833  */
1834 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old,
1835                                           struct vm_area_struct *a,
1836                                           struct vm_area_struct *b)
1837 {
1838         if (anon_vma_compatible(a, b)) {
1839                 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1840
1841                 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1842                         return anon_vma;
1843         }
1844         return NULL;
1845 }
1846
1847 /*
1848  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1849  * neighbouring vmas for a suitable anon_vma, before it goes off
1850  * to allocate a new anon_vma.  It checks because a repetitive
1851  * sequence of mprotects and faults may otherwise lead to distinct
1852  * anon_vmas being allocated, preventing vma merge in subsequent
1853  * mprotect.
1854  */
1855 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1856 {
1857         struct anon_vma *anon_vma = NULL;
1858         struct vm_area_struct *prev, *next;
1859         VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
1860
1861         /* Try next first. */
1862         next = vma_iter_load(&vmi);
1863         if (next) {
1864                 anon_vma = reusable_anon_vma(next, vma, next);
1865                 if (anon_vma)
1866                         return anon_vma;
1867         }
1868
1869         prev = vma_prev(&vmi);
1870         VM_BUG_ON_VMA(prev != vma, vma);
1871         prev = vma_prev(&vmi);
1872         /* Try prev next. */
1873         if (prev)
1874                 anon_vma = reusable_anon_vma(prev, prev, vma);
1875
1876         /*
1877          * We might reach here with anon_vma == NULL if we can't find
1878          * any reusable anon_vma.
1879          * There's no absolute need to look only at touching neighbours:
1880          * we could search further afield for "compatible" anon_vmas.
1881          * But it would probably just be a waste of time searching,
1882          * or lead to too many vmas hanging off the same anon_vma.
1883          * We're trying to allow mprotect remerging later on,
1884          * not trying to minimize memory used for anon_vmas.
1885          */
1886         return anon_vma;
1887 }
1888
1889 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1890 {
1891         return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1892 }
1893
1894 static bool vma_is_shared_writable(struct vm_area_struct *vma)
1895 {
1896         return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1897                 (VM_WRITE | VM_SHARED);
1898 }
1899
1900 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1901 {
1902         /* No managed pages to writeback. */
1903         if (vma->vm_flags & VM_PFNMAP)
1904                 return false;
1905
1906         return vma->vm_file && vma->vm_file->f_mapping &&
1907                 mapping_can_writeback(vma->vm_file->f_mapping);
1908 }
1909
1910 /*
1911  * Does this VMA require the underlying folios to have their dirty state
1912  * tracked?
1913  */
1914 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1915 {
1916         /* Only shared, writable VMAs require dirty tracking. */
1917         if (!vma_is_shared_writable(vma))
1918                 return false;
1919
1920         /* Does the filesystem need to be notified? */
1921         if (vm_ops_needs_writenotify(vma->vm_ops))
1922                 return true;
1923
1924         /*
1925          * Even if the filesystem doesn't indicate a need for writenotify, if it
1926          * can writeback, dirty tracking is still required.
1927          */
1928         return vma_fs_can_writeback(vma);
1929 }
1930
1931 /*
1932  * Some shared mappings will want the pages marked read-only
1933  * to track write events. If so, we'll downgrade vm_page_prot
1934  * to the private version (using protection_map[] without the
1935  * VM_SHARED bit).
1936  */
1937 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1938 {
1939         /* If it was private or non-writable, the write bit is already clear */
1940         if (!vma_is_shared_writable(vma))
1941                 return false;
1942
1943         /* The backer wishes to know when pages are first written to? */
1944         if (vm_ops_needs_writenotify(vma->vm_ops))
1945                 return true;
1946
1947         /* The open routine did something to the protections that pgprot_modify
1948          * won't preserve? */
1949         if (pgprot_val(vm_page_prot) !=
1950             pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1951                 return false;
1952
1953         /*
1954          * Do we need to track softdirty? hugetlb does not support softdirty
1955          * tracking yet.
1956          */
1957         if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1958                 return true;
1959
1960         /* Do we need write faults for uffd-wp tracking? */
1961         if (userfaultfd_wp(vma))
1962                 return true;
1963
1964         /* Can the mapping track the dirty pages? */
1965         return vma_fs_can_writeback(vma);
1966 }
1967
1968 static DEFINE_MUTEX(mm_all_locks_mutex);
1969
1970 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
1971 {
1972         if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
1973                 /*
1974                  * The LSB of head.next can't change from under us
1975                  * because we hold the mm_all_locks_mutex.
1976                  */
1977                 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
1978                 /*
1979                  * We can safely modify head.next after taking the
1980                  * anon_vma->root->rwsem. If some other vma in this mm shares
1981                  * the same anon_vma we won't take it again.
1982                  *
1983                  * No need of atomic instructions here, head.next
1984                  * can't change from under us thanks to the
1985                  * anon_vma->root->rwsem.
1986                  */
1987                 if (__test_and_set_bit(0, (unsigned long *)
1988                                        &anon_vma->root->rb_root.rb_root.rb_node))
1989                         BUG();
1990         }
1991 }
1992
1993 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
1994 {
1995         if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
1996                 /*
1997                  * AS_MM_ALL_LOCKS can't change from under us because
1998                  * we hold the mm_all_locks_mutex.
1999                  *
2000                  * Operations on ->flags have to be atomic because
2001                  * even if AS_MM_ALL_LOCKS is stable thanks to the
2002                  * mm_all_locks_mutex, there may be other cpus
2003                  * changing other bitflags in parallel to us.
2004                  */
2005                 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2006                         BUG();
2007                 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
2008         }
2009 }
2010
2011 /*
2012  * This operation locks against the VM for all pte/vma/mm related
2013  * operations that could ever happen on a certain mm. This includes
2014  * vmtruncate, try_to_unmap, and all page faults.
2015  *
2016  * The caller must take the mmap_lock in write mode before calling
2017  * mm_take_all_locks(). The caller isn't allowed to release the
2018  * mmap_lock until mm_drop_all_locks() returns.
2019  *
2020  * mmap_lock in write mode is required in order to block all operations
2021  * that could modify pagetables and free pages without need of
2022  * altering the vma layout. It's also needed in write mode to avoid new
2023  * anon_vmas to be associated with existing vmas.
2024  *
2025  * A single task can't take more than one mm_take_all_locks() in a row
2026  * or it would deadlock.
2027  *
2028  * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
2029  * mapping->flags avoid to take the same lock twice, if more than one
2030  * vma in this mm is backed by the same anon_vma or address_space.
2031  *
2032  * We take locks in following order, accordingly to comment at beginning
2033  * of mm/rmap.c:
2034  *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
2035  *     hugetlb mapping);
2036  *   - all vmas marked locked
2037  *   - all i_mmap_rwsem locks;
2038  *   - all anon_vma->rwseml
2039  *
2040  * We can take all locks within these types randomly because the VM code
2041  * doesn't nest them and we protected from parallel mm_take_all_locks() by
2042  * mm_all_locks_mutex.
2043  *
2044  * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2045  * that may have to take thousand of locks.
2046  *
2047  * mm_take_all_locks() can fail if it's interrupted by signals.
2048  */
2049 int mm_take_all_locks(struct mm_struct *mm)
2050 {
2051         struct vm_area_struct *vma;
2052         struct anon_vma_chain *avc;
2053         VMA_ITERATOR(vmi, mm, 0);
2054
2055         mmap_assert_write_locked(mm);
2056
2057         mutex_lock(&mm_all_locks_mutex);
2058
2059         /*
2060          * vma_start_write() does not have a complement in mm_drop_all_locks()
2061          * because vma_start_write() is always asymmetrical; it marks a VMA as
2062          * being written to until mmap_write_unlock() or mmap_write_downgrade()
2063          * is reached.
2064          */
2065         for_each_vma(vmi, vma) {
2066                 if (signal_pending(current))
2067                         goto out_unlock;
2068                 vma_start_write(vma);
2069         }
2070
2071         vma_iter_init(&vmi, mm, 0);
2072         for_each_vma(vmi, vma) {
2073                 if (signal_pending(current))
2074                         goto out_unlock;
2075                 if (vma->vm_file && vma->vm_file->f_mapping &&
2076                                 is_vm_hugetlb_page(vma))
2077                         vm_lock_mapping(mm, vma->vm_file->f_mapping);
2078         }
2079
2080         vma_iter_init(&vmi, mm, 0);
2081         for_each_vma(vmi, vma) {
2082                 if (signal_pending(current))
2083                         goto out_unlock;
2084                 if (vma->vm_file && vma->vm_file->f_mapping &&
2085                                 !is_vm_hugetlb_page(vma))
2086                         vm_lock_mapping(mm, vma->vm_file->f_mapping);
2087         }
2088
2089         vma_iter_init(&vmi, mm, 0);
2090         for_each_vma(vmi, vma) {
2091                 if (signal_pending(current))
2092                         goto out_unlock;
2093                 if (vma->anon_vma)
2094                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2095                                 vm_lock_anon_vma(mm, avc->anon_vma);
2096         }
2097
2098         return 0;
2099
2100 out_unlock:
2101         mm_drop_all_locks(mm);
2102         return -EINTR;
2103 }
2104
2105 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2106 {
2107         if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
2108                 /*
2109                  * The LSB of head.next can't change to 0 from under
2110                  * us because we hold the mm_all_locks_mutex.
2111                  *
2112                  * We must however clear the bitflag before unlocking
2113                  * the vma so the users using the anon_vma->rb_root will
2114                  * never see our bitflag.
2115                  *
2116                  * No need of atomic instructions here, head.next
2117                  * can't change from under us until we release the
2118                  * anon_vma->root->rwsem.
2119                  */
2120                 if (!__test_and_clear_bit(0, (unsigned long *)
2121                                           &anon_vma->root->rb_root.rb_root.rb_node))
2122                         BUG();
2123                 anon_vma_unlock_write(anon_vma);
2124         }
2125 }
2126
2127 static void vm_unlock_mapping(struct address_space *mapping)
2128 {
2129         if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2130                 /*
2131                  * AS_MM_ALL_LOCKS can't change to 0 from under us
2132                  * because we hold the mm_all_locks_mutex.
2133                  */
2134                 i_mmap_unlock_write(mapping);
2135                 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2136                                         &mapping->flags))
2137                         BUG();
2138         }
2139 }
2140
2141 /*
2142  * The mmap_lock cannot be released by the caller until
2143  * mm_drop_all_locks() returns.
2144  */
2145 void mm_drop_all_locks(struct mm_struct *mm)
2146 {
2147         struct vm_area_struct *vma;
2148         struct anon_vma_chain *avc;
2149         VMA_ITERATOR(vmi, mm, 0);
2150
2151         mmap_assert_write_locked(mm);
2152         BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2153
2154         for_each_vma(vmi, vma) {
2155                 if (vma->anon_vma)
2156                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2157                                 vm_unlock_anon_vma(avc->anon_vma);
2158                 if (vma->vm_file && vma->vm_file->f_mapping)
2159                         vm_unlock_mapping(vma->vm_file->f_mapping);
2160         }
2161
2162         mutex_unlock(&mm_all_locks_mutex);
2163 }
2164
2165 /*
2166  * We account for memory if it's a private writeable mapping,
2167  * not hugepages and VM_NORESERVE wasn't set.
2168  */
2169 static bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
2170 {
2171         /*
2172          * hugetlb has its own accounting separate from the core VM
2173          * VM_HUGETLB may not be set yet so we cannot check for that flag.
2174          */
2175         if (file && is_file_hugepages(file))
2176                 return false;
2177
2178         return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
2179 }
2180
2181 /*
2182  * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
2183  * operation.
2184  * @vms: The vma unmap structure
2185  * @mas_detach: The maple state with the detached maple tree
2186  *
2187  * Reattach any detached vmas, free up the maple tree used to track the vmas.
2188  * If that's not possible because the ptes are cleared (and vm_ops->closed() may
2189  * have been called), then a NULL is written over the vmas and the vmas are
2190  * removed (munmap() completed).
2191  */
2192 static void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
2193                 struct ma_state *mas_detach)
2194 {
2195         struct ma_state *mas = &vms->vmi->mas;
2196
2197         if (!vms->nr_pages)
2198                 return;
2199
2200         if (vms->clear_ptes)
2201                 return reattach_vmas(mas_detach);
2202
2203         /*
2204          * Aborting cannot just call the vm_ops open() because they are often
2205          * not symmetrical and state data has been lost.  Resort to the old
2206          * failure method of leaving a gap where the MAP_FIXED mapping failed.
2207          */
2208         mas_set_range(mas, vms->start, vms->end - 1);
2209         mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL);
2210         /* Clean up the insertion of the unfortunate gap */
2211         vms_complete_munmap_vmas(vms, mas_detach);
2212 }
2213
2214 /*
2215  * __mmap_prepare() - Prepare to gather any overlapping VMAs that need to be
2216  * unmapped once the map operation is completed, check limits, account mapping
2217  * and clean up any pre-existing VMAs.
2218  *
2219  * @map: Mapping state.
2220  * @uf:  Userfaultfd context list.
2221  *
2222  * Returns: 0 on success, error code otherwise.
2223  */
2224 static int __mmap_prepare(struct mmap_state *map, struct list_head *uf)
2225 {
2226         int error;
2227         struct vma_iterator *vmi = map->vmi;
2228         struct vma_munmap_struct *vms = &map->vms;
2229
2230         /* Find the first overlapping VMA and initialise unmap state. */
2231         vms->vma = vma_find(vmi, map->end);
2232         init_vma_munmap(vms, vmi, vms->vma, map->addr, map->end, uf,
2233                         /* unlock = */ false);
2234
2235         /* OK, we have overlapping VMAs - prepare to unmap them. */
2236         if (vms->vma) {
2237                 mt_init_flags(&map->mt_detach,
2238                               vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2239                 mt_on_stack(map->mt_detach);
2240                 mas_init(&map->mas_detach, &map->mt_detach, /* addr = */ 0);
2241                 /* Prepare to unmap any existing mapping in the area */
2242                 error = vms_gather_munmap_vmas(vms, &map->mas_detach);
2243                 if (error) {
2244                         /* On error VMAs will already have been reattached. */
2245                         vms->nr_pages = 0;
2246                         return error;
2247                 }
2248
2249                 map->next = vms->next;
2250                 map->prev = vms->prev;
2251         } else {
2252                 map->next = vma_iter_next_rewind(vmi, &map->prev);
2253         }
2254
2255         /* Check against address space limit. */
2256         if (!may_expand_vm(map->mm, map->flags, map->pglen - vms->nr_pages))
2257                 return -ENOMEM;
2258
2259         /* Private writable mapping: check memory availability. */
2260         if (accountable_mapping(map->file, map->flags)) {
2261                 map->charged = map->pglen;
2262                 map->charged -= vms->nr_accounted;
2263                 if (map->charged) {
2264                         error = security_vm_enough_memory_mm(map->mm, map->charged);
2265                         if (error)
2266                                 return error;
2267                 }
2268
2269                 vms->nr_accounted = 0;
2270                 map->flags |= VM_ACCOUNT;
2271         }
2272
2273         /*
2274          * Clear PTEs while the vma is still in the tree so that rmap
2275          * cannot race with the freeing later in the truncate scenario.
2276          * This is also needed for mmap_file(), which is why vm_ops
2277          * close function is called.
2278          */
2279         vms_clean_up_area(vms, &map->mas_detach);
2280
2281         return 0;
2282 }
2283
2284
2285 static int __mmap_new_file_vma(struct mmap_state *map,
2286                                struct vm_area_struct *vma)
2287 {
2288         struct vma_iterator *vmi = map->vmi;
2289         int error;
2290
2291         vma->vm_file = get_file(map->file);
2292         error = mmap_file(vma->vm_file, vma);
2293         if (error) {
2294                 fput(vma->vm_file);
2295                 vma->vm_file = NULL;
2296
2297                 vma_iter_set(vmi, vma->vm_end);
2298                 /* Undo any partial mapping done by a device driver. */
2299                 unmap_region(&vmi->mas, vma, map->prev, map->next);
2300
2301                 return error;
2302         }
2303
2304         /* Drivers cannot alter the address of the VMA. */
2305         WARN_ON_ONCE(map->addr != vma->vm_start);
2306         /*
2307          * Drivers should not permit writability when previously it was
2308          * disallowed.
2309          */
2310         VM_WARN_ON_ONCE(map->flags != vma->vm_flags &&
2311                         !(map->flags & VM_MAYWRITE) &&
2312                         (vma->vm_flags & VM_MAYWRITE));
2313
2314         /* If the flags change (and are mergeable), let's retry later. */
2315         map->retry_merge = vma->vm_flags != map->flags && !(vma->vm_flags & VM_SPECIAL);
2316         map->flags = vma->vm_flags;
2317
2318         return 0;
2319 }
2320
2321 /*
2322  * __mmap_new_vma() - Allocate a new VMA for the region, as merging was not
2323  * possible.
2324  *
2325  * @map:  Mapping state.
2326  * @vmap: Output pointer for the new VMA.
2327  *
2328  * Returns: Zero on success, or an error.
2329  */
2330 static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
2331 {
2332         struct vma_iterator *vmi = map->vmi;
2333         int error = 0;
2334         struct vm_area_struct *vma;
2335
2336         /*
2337          * Determine the object being mapped and call the appropriate
2338          * specific mapper. the address has already been validated, but
2339          * not unmapped, but the maps are removed from the list.
2340          */
2341         vma = vm_area_alloc(map->mm);
2342         if (!vma)
2343                 return -ENOMEM;
2344
2345         vma_iter_config(vmi, map->addr, map->end);
2346         vma_set_range(vma, map->addr, map->end, map->pgoff);
2347         vm_flags_init(vma, map->flags);
2348         vma->vm_page_prot = vm_get_page_prot(map->flags);
2349
2350         if (vma_iter_prealloc(vmi, vma)) {
2351                 error = -ENOMEM;
2352                 goto free_vma;
2353         }
2354
2355         if (map->file)
2356                 error = __mmap_new_file_vma(map, vma);
2357         else if (map->flags & VM_SHARED)
2358                 error = shmem_zero_setup(vma);
2359         else
2360                 vma_set_anonymous(vma);
2361
2362         if (error)
2363                 goto free_iter_vma;
2364
2365 #ifdef CONFIG_SPARC64
2366         /* TODO: Fix SPARC ADI! */
2367         WARN_ON_ONCE(!arch_validate_flags(map->flags));
2368 #endif
2369
2370         /* Lock the VMA since it is modified after insertion into VMA tree */
2371         vma_start_write(vma);
2372         vma_iter_store(vmi, vma);
2373         map->mm->map_count++;
2374         vma_link_file(vma);
2375
2376         /*
2377          * vma_merge_new_range() calls khugepaged_enter_vma() too, the below
2378          * call covers the non-merge case.
2379          */
2380         khugepaged_enter_vma(vma, map->flags);
2381         ksm_add_vma(vma);
2382         *vmap = vma;
2383         return 0;
2384
2385 free_iter_vma:
2386         vma_iter_free(vmi);
2387 free_vma:
2388         vm_area_free(vma);
2389         return error;
2390 }
2391
2392 /*
2393  * __mmap_complete() - Unmap any VMAs we overlap, account memory mapping
2394  *                     statistics, handle locking and finalise the VMA.
2395  *
2396  * @map: Mapping state.
2397  * @vma: Merged or newly allocated VMA for the mmap()'d region.
2398  */
2399 static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
2400 {
2401         struct mm_struct *mm = map->mm;
2402         unsigned long vm_flags = vma->vm_flags;
2403
2404         perf_event_mmap(vma);
2405
2406         /* Unmap any existing mapping in the area. */
2407         vms_complete_munmap_vmas(&map->vms, &map->mas_detach);
2408
2409         vm_stat_account(mm, vma->vm_flags, map->pglen);
2410         if (vm_flags & VM_LOCKED) {
2411                 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
2412                                         is_vm_hugetlb_page(vma) ||
2413                                         vma == get_gate_vma(mm))
2414                         vm_flags_clear(vma, VM_LOCKED_MASK);
2415                 else
2416                         mm->locked_vm += map->pglen;
2417         }
2418
2419         if (vma->vm_file)
2420                 uprobe_mmap(vma);
2421
2422         /*
2423          * New (or expanded) vma always get soft dirty status.
2424          * Otherwise user-space soft-dirty page tracker won't
2425          * be able to distinguish situation when vma area unmapped,
2426          * then new mapped in-place (which must be aimed as
2427          * a completely new data area).
2428          */
2429         vm_flags_set(vma, VM_SOFTDIRTY);
2430
2431         vma_set_page_prot(vma);
2432 }
2433
2434 static unsigned long __mmap_region(struct file *file, unsigned long addr,
2435                 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2436                 struct list_head *uf)
2437 {
2438         struct mm_struct *mm = current->mm;
2439         struct vm_area_struct *vma = NULL;
2440         int error;
2441         VMA_ITERATOR(vmi, mm, addr);
2442         MMAP_STATE(map, mm, &vmi, addr, len, pgoff, vm_flags, file);
2443
2444         error = __mmap_prepare(&map, uf);
2445         if (error)
2446                 goto abort_munmap;
2447
2448         /* Attempt to merge with adjacent VMAs... */
2449         if (map.prev || map.next) {
2450                 VMG_MMAP_STATE(vmg, &map, /* vma = */ NULL);
2451
2452                 vma = vma_merge_new_range(&vmg);
2453         }
2454
2455         /* ...but if we can't, allocate a new VMA. */
2456         if (!vma) {
2457                 error = __mmap_new_vma(&map, &vma);
2458                 if (error)
2459                         goto unacct_error;
2460         }
2461
2462         /* If flags changed, we might be able to merge, so try again. */
2463         if (map.retry_merge) {
2464                 struct vm_area_struct *merged;
2465                 VMG_MMAP_STATE(vmg, &map, vma);
2466
2467                 vma_iter_config(map.vmi, map.addr, map.end);
2468                 merged = vma_merge_existing_range(&vmg);
2469                 if (merged)
2470                         vma = merged;
2471         }
2472
2473         __mmap_complete(&map, vma);
2474
2475         return addr;
2476
2477         /* Accounting was done by __mmap_prepare(). */
2478 unacct_error:
2479         if (map.charged)
2480                 vm_unacct_memory(map.charged);
2481 abort_munmap:
2482         vms_abort_munmap_vmas(&map.vms, &map.mas_detach);
2483         return error;
2484 }
2485
2486 /**
2487  * mmap_region() - Actually perform the userland mapping of a VMA into
2488  * current->mm with known, aligned and overflow-checked @addr and @len, and
2489  * correctly determined VMA flags @vm_flags and page offset @pgoff.
2490  *
2491  * This is an internal memory management function, and should not be used
2492  * directly.
2493  *
2494  * The caller must write-lock current->mm->mmap_lock.
2495  *
2496  * @file: If a file-backed mapping, a pointer to the struct file describing the
2497  * file to be mapped, otherwise NULL.
2498  * @addr: The page-aligned address at which to perform the mapping.
2499  * @len: The page-aligned, non-zero, length of the mapping.
2500  * @vm_flags: The VMA flags which should be applied to the mapping.
2501  * @pgoff: If @file is specified, the page offset into the file, if not then
2502  * the virtual page offset in memory of the anonymous mapping.
2503  * @uf: Optionally, a pointer to a list head used for tracking userfaultfd unmap
2504  * events.
2505  *
2506  * Returns: Either an error, or the address at which the requested mapping has
2507  * been performed.
2508  */
2509 unsigned long mmap_region(struct file *file, unsigned long addr,
2510                           unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2511                           struct list_head *uf)
2512 {
2513         unsigned long ret;
2514         bool writable_file_mapping = false;
2515
2516         mmap_assert_write_locked(current->mm);
2517
2518         /* Check to see if MDWE is applicable. */
2519         if (map_deny_write_exec(vm_flags, vm_flags))
2520                 return -EACCES;
2521
2522         /* Allow architectures to sanity-check the vm_flags. */
2523         if (!arch_validate_flags(vm_flags))
2524                 return -EINVAL;
2525
2526         /* Map writable and ensure this isn't a sealed memfd. */
2527         if (file && is_shared_maywrite(vm_flags)) {
2528                 int error = mapping_map_writable(file->f_mapping);
2529
2530                 if (error)
2531                         return error;
2532                 writable_file_mapping = true;
2533         }
2534
2535         ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
2536
2537         /* Clear our write mapping regardless of error. */
2538         if (writable_file_mapping)
2539                 mapping_unmap_writable(file->f_mapping);
2540
2541         validate_mm(current->mm);
2542         return ret;
2543 }
2544
2545 /*
2546  * do_brk_flags() - Increase the brk vma if the flags match.
2547  * @vmi: The vma iterator
2548  * @addr: The start address
2549  * @len: The length of the increase
2550  * @vma: The vma,
2551  * @flags: The VMA Flags
2552  *
2553  * Extend the brk VMA from addr to addr + len.  If the VMA is NULL or the flags
2554  * do not match then create a new anonymous VMA.  Eventually we may be able to
2555  * do some brk-specific accounting here.
2556  */
2557 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
2558                  unsigned long addr, unsigned long len, unsigned long flags)
2559 {
2560         struct mm_struct *mm = current->mm;
2561
2562         /*
2563          * Check against address space limits by the changed size
2564          * Note: This happens *after* clearing old mappings in some code paths.
2565          */
2566         flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2567         if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
2568                 return -ENOMEM;
2569
2570         if (mm->map_count > sysctl_max_map_count)
2571                 return -ENOMEM;
2572
2573         if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
2574                 return -ENOMEM;
2575
2576         /*
2577          * Expand the existing vma if possible; Note that singular lists do not
2578          * occur after forking, so the expand will only happen on new VMAs.
2579          */
2580         if (vma && vma->vm_end == addr) {
2581                 VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr));
2582
2583                 vmg.prev = vma;
2584                 /* vmi is positioned at prev, which this mode expects. */
2585                 vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
2586
2587                 if (vma_merge_new_range(&vmg))
2588                         goto out;
2589                 else if (vmg_nomem(&vmg))
2590                         goto unacct_fail;
2591         }
2592
2593         if (vma)
2594                 vma_iter_next_range(vmi);
2595         /* create a vma struct for an anonymous mapping */
2596         vma = vm_area_alloc(mm);
2597         if (!vma)
2598                 goto unacct_fail;
2599
2600         vma_set_anonymous(vma);
2601         vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
2602         vm_flags_init(vma, flags);
2603         vma->vm_page_prot = vm_get_page_prot(flags);
2604         vma_start_write(vma);
2605         if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
2606                 goto mas_store_fail;
2607
2608         mm->map_count++;
2609         validate_mm(mm);
2610         ksm_add_vma(vma);
2611 out:
2612         perf_event_mmap(vma);
2613         mm->total_vm += len >> PAGE_SHIFT;
2614         mm->data_vm += len >> PAGE_SHIFT;
2615         if (flags & VM_LOCKED)
2616                 mm->locked_vm += (len >> PAGE_SHIFT);
2617         vm_flags_set(vma, VM_SOFTDIRTY);
2618         return 0;
2619
2620 mas_store_fail:
2621         vm_area_free(vma);
2622 unacct_fail:
2623         vm_unacct_memory(len >> PAGE_SHIFT);
2624         return -ENOMEM;
2625 }
2626
2627 /**
2628  * unmapped_area() - Find an area between the low_limit and the high_limit with
2629  * the correct alignment and offset, all from @info. Note: current->mm is used
2630  * for the search.
2631  *
2632  * @info: The unmapped area information including the range [low_limit -
2633  * high_limit), the alignment offset and mask.
2634  *
2635  * Return: A memory address or -ENOMEM.
2636  */
2637 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
2638 {
2639         unsigned long length, gap;
2640         unsigned long low_limit, high_limit;
2641         struct vm_area_struct *tmp;
2642         VMA_ITERATOR(vmi, current->mm, 0);
2643
2644         /* Adjust search length to account for worst case alignment overhead */
2645         length = info->length + info->align_mask + info->start_gap;
2646         if (length < info->length)
2647                 return -ENOMEM;
2648
2649         low_limit = info->low_limit;
2650         if (low_limit < mmap_min_addr)
2651                 low_limit = mmap_min_addr;
2652         high_limit = info->high_limit;
2653 retry:
2654         if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
2655                 return -ENOMEM;
2656
2657         /*
2658          * Adjust for the gap first so it doesn't interfere with the
2659          * later alignment. The first step is the minimum needed to
2660          * fulill the start gap, the next steps is the minimum to align
2661          * that. It is the minimum needed to fulill both.
2662          */
2663         gap = vma_iter_addr(&vmi) + info->start_gap;
2664         gap += (info->align_offset - gap) & info->align_mask;
2665         tmp = vma_next(&vmi);
2666         if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
2667                 if (vm_start_gap(tmp) < gap + length - 1) {
2668                         low_limit = tmp->vm_end;
2669                         vma_iter_reset(&vmi);
2670                         goto retry;
2671                 }
2672         } else {
2673                 tmp = vma_prev(&vmi);
2674                 if (tmp && vm_end_gap(tmp) > gap) {
2675                         low_limit = vm_end_gap(tmp);
2676                         vma_iter_reset(&vmi);
2677                         goto retry;
2678                 }
2679         }
2680
2681         return gap;
2682 }
2683
2684 /**
2685  * unmapped_area_topdown() - Find an area between the low_limit and the
2686  * high_limit with the correct alignment and offset at the highest available
2687  * address, all from @info. Note: current->mm is used for the search.
2688  *
2689  * @info: The unmapped area information including the range [low_limit -
2690  * high_limit), the alignment offset and mask.
2691  *
2692  * Return: A memory address or -ENOMEM.
2693  */
2694 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
2695 {
2696         unsigned long length, gap, gap_end;
2697         unsigned long low_limit, high_limit;
2698         struct vm_area_struct *tmp;
2699         VMA_ITERATOR(vmi, current->mm, 0);
2700
2701         /* Adjust search length to account for worst case alignment overhead */
2702         length = info->length + info->align_mask + info->start_gap;
2703         if (length < info->length)
2704                 return -ENOMEM;
2705
2706         low_limit = info->low_limit;
2707         if (low_limit < mmap_min_addr)
2708                 low_limit = mmap_min_addr;
2709         high_limit = info->high_limit;
2710 retry:
2711         if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
2712                 return -ENOMEM;
2713
2714         gap = vma_iter_end(&vmi) - info->length;
2715         gap -= (gap - info->align_offset) & info->align_mask;
2716         gap_end = vma_iter_end(&vmi);
2717         tmp = vma_next(&vmi);
2718         if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
2719                 if (vm_start_gap(tmp) < gap_end) {
2720                         high_limit = vm_start_gap(tmp);
2721                         vma_iter_reset(&vmi);
2722                         goto retry;
2723                 }
2724         } else {
2725                 tmp = vma_prev(&vmi);
2726                 if (tmp && vm_end_gap(tmp) > gap) {
2727                         high_limit = tmp->vm_start;
2728                         vma_iter_reset(&vmi);
2729                         goto retry;
2730                 }
2731         }
2732
2733         return gap;
2734 }
2735
2736 /*
2737  * Verify that the stack growth is acceptable and
2738  * update accounting. This is shared with both the
2739  * grow-up and grow-down cases.
2740  */
2741 static int acct_stack_growth(struct vm_area_struct *vma,
2742                              unsigned long size, unsigned long grow)
2743 {
2744         struct mm_struct *mm = vma->vm_mm;
2745         unsigned long new_start;
2746
2747         /* address space limit tests */
2748         if (!may_expand_vm(mm, vma->vm_flags, grow))
2749                 return -ENOMEM;
2750
2751         /* Stack limit test */
2752         if (size > rlimit(RLIMIT_STACK))
2753                 return -ENOMEM;
2754
2755         /* mlock limit tests */
2756         if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
2757                 return -ENOMEM;
2758
2759         /* Check to ensure the stack will not grow into a hugetlb-only region */
2760         new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
2761                         vma->vm_end - size;
2762         if (is_hugepage_only_range(vma->vm_mm, new_start, size))
2763                 return -EFAULT;
2764
2765         /*
2766          * Overcommit..  This must be the final test, as it will
2767          * update security statistics.
2768          */
2769         if (security_vm_enough_memory_mm(mm, grow))
2770                 return -ENOMEM;
2771
2772         return 0;
2773 }
2774
2775 #if defined(CONFIG_STACK_GROWSUP)
2776 /*
2777  * PA-RISC uses this for its stack.
2778  * vma is the last one with address > vma->vm_end.  Have to extend vma.
2779  */
2780 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2781 {
2782         struct mm_struct *mm = vma->vm_mm;
2783         struct vm_area_struct *next;
2784         unsigned long gap_addr;
2785         int error = 0;
2786         VMA_ITERATOR(vmi, mm, vma->vm_start);
2787
2788         if (!(vma->vm_flags & VM_GROWSUP))
2789                 return -EFAULT;
2790
2791         mmap_assert_write_locked(mm);
2792
2793         /* Guard against exceeding limits of the address space. */
2794         address &= PAGE_MASK;
2795         if (address >= (TASK_SIZE & PAGE_MASK))
2796                 return -ENOMEM;
2797         address += PAGE_SIZE;
2798
2799         /* Enforce stack_guard_gap */
2800         gap_addr = address + stack_guard_gap;
2801
2802         /* Guard against overflow */
2803         if (gap_addr < address || gap_addr > TASK_SIZE)
2804                 gap_addr = TASK_SIZE;
2805
2806         next = find_vma_intersection(mm, vma->vm_end, gap_addr);
2807         if (next && vma_is_accessible(next)) {
2808                 if (!(next->vm_flags & VM_GROWSUP))
2809                         return -ENOMEM;
2810                 /* Check that both stack segments have the same anon_vma? */
2811         }
2812
2813         if (next)
2814                 vma_iter_prev_range_limit(&vmi, address);
2815
2816         vma_iter_config(&vmi, vma->vm_start, address);
2817         if (vma_iter_prealloc(&vmi, vma))
2818                 return -ENOMEM;
2819
2820         /* We must make sure the anon_vma is allocated. */
2821         if (unlikely(anon_vma_prepare(vma))) {
2822                 vma_iter_free(&vmi);
2823                 return -ENOMEM;
2824         }
2825
2826         /* Lock the VMA before expanding to prevent concurrent page faults */
2827         vma_start_write(vma);
2828         /* We update the anon VMA tree. */
2829         anon_vma_lock_write(vma->anon_vma);
2830
2831         /* Somebody else might have raced and expanded it already */
2832         if (address > vma->vm_end) {
2833                 unsigned long size, grow;
2834
2835                 size = address - vma->vm_start;
2836                 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2837
2838                 error = -ENOMEM;
2839                 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2840                         error = acct_stack_growth(vma, size, grow);
2841                         if (!error) {
2842                                 if (vma->vm_flags & VM_LOCKED)
2843                                         mm->locked_vm += grow;
2844                                 vm_stat_account(mm, vma->vm_flags, grow);
2845                                 anon_vma_interval_tree_pre_update_vma(vma);
2846                                 vma->vm_end = address;
2847                                 /* Overwrite old entry in mtree. */
2848                                 vma_iter_store(&vmi, vma);
2849                                 anon_vma_interval_tree_post_update_vma(vma);
2850
2851                                 perf_event_mmap(vma);
2852                         }
2853                 }
2854         }
2855         anon_vma_unlock_write(vma->anon_vma);
2856         vma_iter_free(&vmi);
2857         validate_mm(mm);
2858         return error;
2859 }
2860 #endif /* CONFIG_STACK_GROWSUP */
2861
2862 /*
2863  * vma is the first one with address < vma->vm_start.  Have to extend vma.
2864  * mmap_lock held for writing.
2865  */
2866 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
2867 {
2868         struct mm_struct *mm = vma->vm_mm;
2869         struct vm_area_struct *prev;
2870         int error = 0;
2871         VMA_ITERATOR(vmi, mm, vma->vm_start);
2872
2873         if (!(vma->vm_flags & VM_GROWSDOWN))
2874                 return -EFAULT;
2875
2876         mmap_assert_write_locked(mm);
2877
2878         address &= PAGE_MASK;
2879         if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
2880                 return -EPERM;
2881
2882         /* Enforce stack_guard_gap */
2883         prev = vma_prev(&vmi);
2884         /* Check that both stack segments have the same anon_vma? */
2885         if (prev) {
2886                 if (!(prev->vm_flags & VM_GROWSDOWN) &&
2887                     vma_is_accessible(prev) &&
2888                     (address - prev->vm_end < stack_guard_gap))
2889                         return -ENOMEM;
2890         }
2891
2892         if (prev)
2893                 vma_iter_next_range_limit(&vmi, vma->vm_start);
2894
2895         vma_iter_config(&vmi, address, vma->vm_end);
2896         if (vma_iter_prealloc(&vmi, vma))
2897                 return -ENOMEM;
2898
2899         /* We must make sure the anon_vma is allocated. */
2900         if (unlikely(anon_vma_prepare(vma))) {
2901                 vma_iter_free(&vmi);
2902                 return -ENOMEM;
2903         }
2904
2905         /* Lock the VMA before expanding to prevent concurrent page faults */
2906         vma_start_write(vma);
2907         /* We update the anon VMA tree. */
2908         anon_vma_lock_write(vma->anon_vma);
2909
2910         /* Somebody else might have raced and expanded it already */
2911         if (address < vma->vm_start) {
2912                 unsigned long size, grow;
2913
2914                 size = vma->vm_end - address;
2915                 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2916
2917                 error = -ENOMEM;
2918                 if (grow <= vma->vm_pgoff) {
2919                         error = acct_stack_growth(vma, size, grow);
2920                         if (!error) {
2921                                 if (vma->vm_flags & VM_LOCKED)
2922                                         mm->locked_vm += grow;
2923                                 vm_stat_account(mm, vma->vm_flags, grow);
2924                                 anon_vma_interval_tree_pre_update_vma(vma);
2925                                 vma->vm_start = address;
2926                                 vma->vm_pgoff -= grow;
2927                                 /* Overwrite old entry in mtree. */
2928                                 vma_iter_store(&vmi, vma);
2929                                 anon_vma_interval_tree_post_update_vma(vma);
2930
2931                                 perf_event_mmap(vma);
2932                         }
2933                 }
2934         }
2935         anon_vma_unlock_write(vma->anon_vma);
2936         vma_iter_free(&vmi);
2937         validate_mm(mm);
2938         return error;
2939 }
2940
2941 int __vm_munmap(unsigned long start, size_t len, bool unlock)
2942 {
2943         int ret;
2944         struct mm_struct *mm = current->mm;
2945         LIST_HEAD(uf);
2946         VMA_ITERATOR(vmi, mm, start);
2947
2948         if (mmap_write_lock_killable(mm))
2949                 return -EINTR;
2950
2951         ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
2952         if (ret || !unlock)
2953                 mmap_write_unlock(mm);
2954
2955         userfaultfd_unmap_complete(mm, &uf);
2956         return ret;
2957 }
This page took 0.189992 seconds and 4 git commands to generate.