1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * VMA-specific functions.
7 #include "vma_internal.h"
11 * If the vma has a ->close operation then the driver probably needs to release
12 * per-vma resources, so we don't attempt to merge those if the caller indicates
13 * the current vma may be removed as part of the merge.
15 static inline bool is_mergeable_vma(struct vm_area_struct *vma,
16 struct file *file, unsigned long vm_flags,
17 struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
18 struct anon_vma_name *anon_name, bool may_remove_vma)
21 * VM_SOFTDIRTY should not prevent from VMA merging, if we
22 * match the flags but dirty bit -- the caller should mark
23 * merged VMA as dirty. If dirty bit won't be excluded from
24 * comparison, we increase pressure on the memory system forcing
25 * the kernel to generate new VMAs when old one could be
28 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
30 if (vma->vm_file != file)
32 if (may_remove_vma && vma->vm_ops && vma->vm_ops->close)
34 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
36 if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
41 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
42 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
45 * The list_is_singular() test is to avoid merging VMA cloned from
46 * parents. This can improve scalability caused by anon_vma lock.
48 if ((!anon_vma1 || !anon_vma2) && (!vma ||
49 list_is_singular(&vma->anon_vma_chain)))
51 return anon_vma1 == anon_vma2;
55 * init_multi_vma_prep() - Initializer for struct vma_prepare
56 * @vp: The vma_prepare struct
57 * @vma: The vma that will be altered once locked
58 * @next: The next vma if it is to be adjusted
59 * @remove: The first vma to be removed
60 * @remove2: The second vma to be removed
62 static void init_multi_vma_prep(struct vma_prepare *vp,
63 struct vm_area_struct *vma,
64 struct vm_area_struct *next,
65 struct vm_area_struct *remove,
66 struct vm_area_struct *remove2)
68 memset(vp, 0, sizeof(struct vma_prepare));
70 vp->anon_vma = vma->anon_vma;
72 vp->remove2 = remove2;
74 if (!vp->anon_vma && next)
75 vp->anon_vma = next->anon_vma;
77 vp->file = vma->vm_file;
79 vp->mapping = vma->vm_file->f_mapping;
84 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
85 * in front of (at a lower virtual address and file offset than) the vma.
87 * We cannot merge two vmas if they have differently assigned (non-NULL)
88 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
90 * We don't check here for the merged mmap wrapping around the end of pagecache
91 * indices (16TB on ia32) because do_mmap() does not permit mmap's which
92 * wrap, nor mmaps which cover the final page at index -1UL.
94 * We assume the vma may be removed as part of the merge.
97 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
98 struct anon_vma *anon_vma, struct file *file,
99 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
100 struct anon_vma_name *anon_name)
102 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) &&
103 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
104 if (vma->vm_pgoff == vm_pgoff)
111 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
112 * beyond (at a higher virtual address and file offset than) the vma.
114 * We cannot merge two vmas if they have differently assigned (non-NULL)
115 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
117 * We assume that vma is not removed as part of the merge.
120 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
121 struct anon_vma *anon_vma, struct file *file,
122 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
123 struct anon_vma_name *anon_name)
125 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) &&
126 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
129 vm_pglen = vma_pages(vma);
130 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
137 * Close a vm structure and free it.
139 void remove_vma(struct vm_area_struct *vma, bool unreachable)
142 if (vma->vm_ops && vma->vm_ops->close)
143 vma->vm_ops->close(vma);
146 mpol_put(vma_policy(vma));
154 * Get rid of page table information in the indicated region.
156 * Called with the mm semaphore held.
158 void unmap_region(struct mm_struct *mm, struct ma_state *mas,
159 struct vm_area_struct *vma, struct vm_area_struct *prev,
160 struct vm_area_struct *next, unsigned long start,
161 unsigned long end, unsigned long tree_end, bool mm_wr_locked)
163 struct mmu_gather tlb;
164 unsigned long mt_start = mas->index;
167 tlb_gather_mmu(&tlb, mm);
168 update_hiwater_rss(mm);
169 unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked);
170 mas_set(mas, mt_start);
171 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
172 next ? next->vm_start : USER_PGTABLES_CEILING,
174 tlb_finish_mmu(&tlb);
178 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
179 * has already been checked or doesn't make sense to fail.
180 * VMA Iterator will point to the end VMA.
182 static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
183 unsigned long addr, int new_below)
185 struct vma_prepare vp;
186 struct vm_area_struct *new;
189 WARN_ON(vma->vm_start >= addr);
190 WARN_ON(vma->vm_end <= addr);
192 if (vma->vm_ops && vma->vm_ops->may_split) {
193 err = vma->vm_ops->may_split(vma, addr);
198 new = vm_area_dup(vma);
205 new->vm_start = addr;
206 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
210 vma_iter_config(vmi, new->vm_start, new->vm_end);
211 if (vma_iter_prealloc(vmi, new))
214 err = vma_dup_policy(vma, new);
218 err = anon_vma_clone(new, vma);
223 get_file(new->vm_file);
225 if (new->vm_ops && new->vm_ops->open)
226 new->vm_ops->open(new);
228 vma_start_write(vma);
229 vma_start_write(new);
231 init_vma_prep(&vp, vma);
234 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
237 vma->vm_start = addr;
238 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
243 /* vma_complete stores the new vma */
244 vma_complete(&vp, vmi, vma->vm_mm);
252 mpol_put(vma_policy(new));
261 * Split a vma into two pieces at address 'addr', a new vma is allocated
262 * either for the first part or the tail.
264 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
265 unsigned long addr, int new_below)
267 if (vma->vm_mm->map_count >= sysctl_max_map_count)
270 return __split_vma(vmi, vma, addr, new_below);
274 * Ok - we have the memory areas we should free on a maple tree so release them,
275 * and do the vma updates.
277 * Called with the mm semaphore held.
279 static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
281 unsigned long nr_accounted = 0;
282 struct vm_area_struct *vma;
284 /* Update high watermark before we lower total_vm */
285 update_hiwater_vm(mm);
286 mas_for_each(mas, vma, ULONG_MAX) {
287 long nrpages = vma_pages(vma);
289 if (vma->vm_flags & VM_ACCOUNT)
290 nr_accounted += nrpages;
291 vm_stat_account(mm, vma->vm_flags, -nrpages);
292 remove_vma(vma, false);
294 vm_unacct_memory(nr_accounted);
298 * init_vma_prep() - Initializer wrapper for vma_prepare struct
299 * @vp: The vma_prepare struct
300 * @vma: The vma that will be altered once locked
302 void init_vma_prep(struct vma_prepare *vp,
303 struct vm_area_struct *vma)
305 init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
309 * Requires inode->i_mapping->i_mmap_rwsem
311 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
312 struct address_space *mapping)
314 if (vma_is_shared_maywrite(vma))
315 mapping_unmap_writable(mapping);
317 flush_dcache_mmap_lock(mapping);
318 vma_interval_tree_remove(vma, &mapping->i_mmap);
319 flush_dcache_mmap_unlock(mapping);
323 * vma has some anon_vma assigned, and is already inserted on that
324 * anon_vma's interval trees.
326 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
327 * vma must be removed from the anon_vma's interval trees using
328 * anon_vma_interval_tree_pre_update_vma().
330 * After the update, the vma will be reinserted using
331 * anon_vma_interval_tree_post_update_vma().
333 * The entire update must be protected by exclusive mmap_lock and by
334 * the root anon_vma's mutex.
337 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
339 struct anon_vma_chain *avc;
341 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
342 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
346 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
348 struct anon_vma_chain *avc;
350 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
351 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
354 static void __vma_link_file(struct vm_area_struct *vma,
355 struct address_space *mapping)
357 if (vma_is_shared_maywrite(vma))
358 mapping_allow_writable(mapping);
360 flush_dcache_mmap_lock(mapping);
361 vma_interval_tree_insert(vma, &mapping->i_mmap);
362 flush_dcache_mmap_unlock(mapping);
366 * vma_prepare() - Helper function for handling locking VMAs prior to altering
367 * @vp: The initialized vma_prepare struct
369 void vma_prepare(struct vma_prepare *vp)
372 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
375 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
376 vp->adj_next->vm_end);
378 i_mmap_lock_write(vp->mapping);
379 if (vp->insert && vp->insert->vm_file) {
381 * Put into interval tree now, so instantiated pages
382 * are visible to arm/parisc __flush_dcache_page
383 * throughout; but we cannot insert into address
384 * space until vma start or end is updated.
386 __vma_link_file(vp->insert,
387 vp->insert->vm_file->f_mapping);
392 anon_vma_lock_write(vp->anon_vma);
393 anon_vma_interval_tree_pre_update_vma(vp->vma);
395 anon_vma_interval_tree_pre_update_vma(vp->adj_next);
399 flush_dcache_mmap_lock(vp->mapping);
400 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
402 vma_interval_tree_remove(vp->adj_next,
403 &vp->mapping->i_mmap);
409 * dup_anon_vma() - Helper function to duplicate anon_vma
410 * @dst: The destination VMA
411 * @src: The source VMA
412 * @dup: Pointer to the destination VMA when successful.
414 * Returns: 0 on success.
416 static int dup_anon_vma(struct vm_area_struct *dst,
417 struct vm_area_struct *src, struct vm_area_struct **dup)
420 * Easily overlooked: when mprotect shifts the boundary, make sure the
421 * expanding vma has anon_vma set if the shrinking vma had, to cover any
422 * anon pages imported.
424 if (src->anon_vma && !dst->anon_vma) {
427 vma_assert_write_locked(dst);
428 dst->anon_vma = src->anon_vma;
429 ret = anon_vma_clone(dst, src);
439 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
440 void validate_mm(struct mm_struct *mm)
444 struct vm_area_struct *vma;
445 VMA_ITERATOR(vmi, mm, 0);
447 mt_validate(&mm->mm_mt);
448 for_each_vma(vmi, vma) {
449 #ifdef CONFIG_DEBUG_VM_RB
450 struct anon_vma *anon_vma = vma->anon_vma;
451 struct anon_vma_chain *avc;
453 unsigned long vmi_start, vmi_end;
456 vmi_start = vma_iter_addr(&vmi);
457 vmi_end = vma_iter_end(&vmi);
458 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
461 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
465 pr_emerg("issue in %s\n", current->comm);
468 pr_emerg("tree range: %px start %lx end %lx\n", vma,
469 vmi_start, vmi_end - 1);
470 vma_iter_dump_tree(&vmi);
473 #ifdef CONFIG_DEBUG_VM_RB
475 anon_vma_lock_read(anon_vma);
476 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
477 anon_vma_interval_tree_verify(avc);
478 anon_vma_unlock_read(anon_vma);
483 if (i != mm->map_count) {
484 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
487 VM_BUG_ON_MM(bug, mm);
489 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
492 * vma_expand - Expand an existing VMA
494 * @vmi: The vma iterator
495 * @vma: The vma to expand
496 * @start: The start of the vma
497 * @end: The exclusive end of the vma
498 * @pgoff: The page offset of vma
499 * @next: The current of next vma.
501 * Expand @vma to @start and @end. Can expand off the start and end. Will
502 * expand over @next if it's different from @vma and @end == @next->vm_end.
503 * Checking if the @vma can expand and merge with @next needs to be handled by
506 * Returns: 0 on success
508 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
509 unsigned long start, unsigned long end, pgoff_t pgoff,
510 struct vm_area_struct *next)
512 struct vm_area_struct *anon_dup = NULL;
513 bool remove_next = false;
514 struct vma_prepare vp;
516 vma_start_write(vma);
517 if (next && (vma != next) && (end == next->vm_end)) {
521 vma_start_write(next);
522 ret = dup_anon_vma(vma, next, &anon_dup);
527 init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL);
528 /* Not merging but overwriting any part of next is not handled. */
529 VM_WARN_ON(next && !vp.remove &&
530 next != vma && end > next->vm_start);
531 /* Only handles expanding */
532 VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
534 /* Note: vma iterator must be pointing to 'start' */
535 vma_iter_config(vmi, start, end);
536 if (vma_iter_prealloc(vmi, vma))
540 vma_adjust_trans_huge(vma, start, end, 0);
541 vma_set_range(vma, start, end, pgoff);
542 vma_iter_store(vmi, vma);
544 vma_complete(&vp, vmi, vma->vm_mm);
549 unlink_anon_vmas(anon_dup);
554 * vma_shrink() - Reduce an existing VMAs memory area
555 * @vmi: The vma iterator
556 * @vma: The VMA to modify
557 * @start: The new start
560 * Returns: 0 on success, -ENOMEM otherwise
562 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
563 unsigned long start, unsigned long end, pgoff_t pgoff)
565 struct vma_prepare vp;
567 WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
569 if (vma->vm_start < start)
570 vma_iter_config(vmi, vma->vm_start, start);
572 vma_iter_config(vmi, end, vma->vm_end);
574 if (vma_iter_prealloc(vmi, NULL))
577 vma_start_write(vma);
579 init_vma_prep(&vp, vma);
581 vma_adjust_trans_huge(vma, start, end, 0);
584 vma_set_range(vma, start, end, pgoff);
585 vma_complete(&vp, vmi, vma->vm_mm);
590 * vma_complete- Helper function for handling the unlocking after altering VMAs,
591 * or for inserting a VMA.
593 * @vp: The vma_prepare struct
594 * @vmi: The vma iterator
597 void vma_complete(struct vma_prepare *vp,
598 struct vma_iterator *vmi, struct mm_struct *mm)
602 vma_interval_tree_insert(vp->adj_next,
603 &vp->mapping->i_mmap);
604 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
605 flush_dcache_mmap_unlock(vp->mapping);
608 if (vp->remove && vp->file) {
609 __remove_shared_vm_struct(vp->remove, vp->mapping);
611 __remove_shared_vm_struct(vp->remove2, vp->mapping);
612 } else if (vp->insert) {
614 * split_vma has split insert from vma, and needs
615 * us to insert it before dropping the locks
616 * (it may either follow vma or precede it).
618 vma_iter_store(vmi, vp->insert);
623 anon_vma_interval_tree_post_update_vma(vp->vma);
625 anon_vma_interval_tree_post_update_vma(vp->adj_next);
626 anon_vma_unlock_write(vp->anon_vma);
630 i_mmap_unlock_write(vp->mapping);
631 uprobe_mmap(vp->vma);
634 uprobe_mmap(vp->adj_next);
639 vma_mark_detached(vp->remove, true);
641 uprobe_munmap(vp->remove, vp->remove->vm_start,
645 if (vp->remove->anon_vma)
646 anon_vma_merge(vp->vma, vp->remove);
648 mpol_put(vma_policy(vp->remove));
650 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
651 vm_area_free(vp->remove);
654 * In mprotect's case 6 (see comments on vma_merge),
655 * we are removing both mid and next vmas
658 vp->remove = vp->remove2;
663 if (vp->insert && vp->file)
664 uprobe_mmap(vp->insert);
669 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
670 * @vmi: The vma iterator
671 * @vma: The starting vm_area_struct
673 * @start: The aligned start address to munmap.
674 * @end: The aligned end address to munmap.
675 * @uf: The userfaultfd list_head
676 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
679 * Return: 0 on success and drops the lock if so directed, error and leaves the
680 * lock held otherwise.
683 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
684 struct mm_struct *mm, unsigned long start,
685 unsigned long end, struct list_head *uf, bool unlock)
687 struct vm_area_struct *prev, *next = NULL;
688 struct maple_tree mt_detach;
691 unsigned long locked_vm = 0;
692 MA_STATE(mas_detach, &mt_detach, 0, 0);
693 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
694 mt_on_stack(mt_detach);
697 * If we need to split any vma, do it now to save pain later.
699 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
700 * unmapped vm_area_struct will remain in use: so lower split_vma
701 * places tmp vma above, and higher split_vma places tmp vma below.
704 /* Does it split the first one? */
705 if (start > vma->vm_start) {
708 * Make sure that map_count on return from munmap() will
709 * not exceed its limit; but let map_count go just above
710 * its limit temporarily, to help free resources as expected.
712 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
713 goto map_count_exceeded;
715 error = __split_vma(vmi, vma, start, 1);
717 goto start_split_failed;
721 * Detach a range of VMAs from the mm. Using next as a temp variable as
722 * it is always overwritten.
726 /* Does it split the end? */
727 if (next->vm_end > end) {
728 error = __split_vma(vmi, next, end, 0);
730 goto end_split_failed;
732 vma_start_write(next);
733 mas_set(&mas_detach, count);
734 error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
736 goto munmap_gather_failed;
737 vma_mark_detached(next, true);
738 if (next->vm_flags & VM_LOCKED)
739 locked_vm += vma_pages(next);
744 * If userfaultfd_unmap_prep returns an error the vmas
745 * will remain split, but userland will get a
746 * highly unexpected error anyway. This is no
747 * different than the case where the first of the two
748 * __split_vma fails, but we don't undo the first
749 * split, despite we could. This is unlikely enough
750 * failure that it's not worth optimizing it for.
752 error = userfaultfd_unmap_prep(next, start, end, uf);
755 goto userfaultfd_error;
757 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
758 BUG_ON(next->vm_start < start);
759 BUG_ON(next->vm_start > end);
761 } for_each_vma_range(*vmi, next, end);
763 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
764 /* Make sure no VMAs are about to be lost. */
766 MA_STATE(test, &mt_detach, 0, 0);
767 struct vm_area_struct *vma_mas, *vma_test;
770 vma_iter_set(vmi, start);
772 vma_test = mas_find(&test, count - 1);
773 for_each_vma_range(*vmi, vma_mas, end) {
774 BUG_ON(vma_mas != vma_test);
776 vma_test = mas_next(&test, count - 1);
779 BUG_ON(count != test_count);
783 while (vma_iter_addr(vmi) > start)
784 vma_iter_prev_range(vmi);
786 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
788 goto clear_tree_failed;
790 /* Point of no return */
791 mm->locked_vm -= locked_vm;
792 mm->map_count -= count;
794 mmap_write_downgrade(mm);
796 prev = vma_iter_prev_range(vmi);
797 next = vma_next(vmi);
799 vma_iter_prev_range(vmi);
802 * We can free page tables without write-locking mmap_lock because VMAs
803 * were isolated before we downgraded mmap_lock.
805 mas_set(&mas_detach, 1);
806 unmap_region(mm, &mas_detach, vma, prev, next, start, end, count,
808 /* Statistics and freeing VMAs */
809 mas_set(&mas_detach, 0);
810 remove_mt(mm, &mas_detach);
813 mmap_read_unlock(mm);
815 __mt_destroy(&mt_detach);
820 munmap_gather_failed:
822 mas_set(&mas_detach, 0);
823 mas_for_each(&mas_detach, next, end)
824 vma_mark_detached(next, false);
826 __mt_destroy(&mt_detach);
834 * do_vmi_munmap() - munmap a given range.
835 * @vmi: The vma iterator
837 * @start: The start address to munmap
838 * @len: The length of the range to munmap
839 * @uf: The userfaultfd list_head
840 * @unlock: set to true if the user wants to drop the mmap_lock on success
842 * This function takes a @mas that is either pointing to the previous VMA or set
843 * to MA_START and sets it up to remove the mapping(s). The @len will be
844 * aligned and any arch_unmap work will be preformed.
846 * Return: 0 on success and drops the lock if so directed, error and leaves the
847 * lock held otherwise.
849 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
850 unsigned long start, size_t len, struct list_head *uf,
854 struct vm_area_struct *vma;
856 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
859 end = start + PAGE_ALIGN(len);
864 * Check if memory is sealed before arch_unmap.
865 * Prevent unmapping a sealed VMA.
866 * can_modify_mm assumes we have acquired the lock on MM.
868 if (unlikely(!can_modify_mm(mm, start, end)))
871 /* arch_unmap() might do unmaps itself. */
872 arch_unmap(mm, start, end);
874 /* Find the first overlapping VMA */
875 vma = vma_find(vmi, end);
878 mmap_write_unlock(mm);
882 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
886 * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
887 * figure out whether that can be merged with its predecessor or its
888 * successor. Or both (it neatly fills a hole).
890 * In most cases - when called for mmap, brk or mremap - [addr,end) is
891 * certain not to be mapped by the time vma_merge is called; but when
892 * called for mprotect, it is certain to be already mapped (either at
893 * an offset within prev, or at the start of next), and the flags of
894 * this area are about to be changed to vm_flags - and the no-change
895 * case has already been eliminated.
897 * The following mprotect cases have to be considered, where **** is
898 * the area passed down from mprotect_fixup, never extending beyond one
899 * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts
900 * at the same address as **** and is of the same or larger span, and
901 * NNNN the next vma after ****:
904 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC
905 * cannot merge might become might become
906 * PPNNNNNNNNNN PPPPPPPPPPCC
907 * mmap, brk or case 4 below case 5 below
910 * PPPP NNNN PPPPCCCCNNNN
911 * might become might become
912 * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or
913 * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or
914 * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8
916 * It is important for case 8 that the vma CCCC overlapping the
917 * region **** is never going to extended over NNNN. Instead NNNN must
918 * be extended in region **** and CCCC must be removed. This way in
919 * all cases where vma_merge succeeds, the moment vma_merge drops the
920 * rmap_locks, the properties of the merged vma will be already
921 * correct for the whole merged range. Some of those properties like
922 * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
923 * be correct for the whole merged range immediately after the
924 * rmap_locks are released. Otherwise if NNNN would be removed and
925 * CCCC would be extended over the NNNN range, remove_migration_ptes
926 * or other rmap walkers (if working on addresses beyond the "end"
927 * parameter) may establish ptes with the wrong permissions of CCCC
928 * instead of the right permissions of NNNN.
931 * PPPP is represented by *prev
932 * CCCC is represented by *curr or not represented at all (NULL)
933 * NNNN is represented by *next or not represented at all (NULL)
934 * **** is not represented - it will be merged and the vma containing the
935 * area is returned, or the function will return NULL
937 static struct vm_area_struct
938 *vma_merge(struct vma_iterator *vmi, struct vm_area_struct *prev,
939 struct vm_area_struct *src, unsigned long addr, unsigned long end,
940 unsigned long vm_flags, pgoff_t pgoff, struct mempolicy *policy,
941 struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
942 struct anon_vma_name *anon_name)
944 struct mm_struct *mm = src->vm_mm;
945 struct anon_vma *anon_vma = src->anon_vma;
946 struct file *file = src->vm_file;
947 struct vm_area_struct *curr, *next, *res;
948 struct vm_area_struct *vma, *adjust, *remove, *remove2;
949 struct vm_area_struct *anon_dup = NULL;
950 struct vma_prepare vp;
953 bool merge_prev = false;
954 bool merge_next = false;
955 bool vma_expanded = false;
956 unsigned long vma_start = addr;
957 unsigned long vma_end = end;
958 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
962 * We later require that vma->vm_flags == vm_flags,
963 * so this tests vma->vm_flags & VM_SPECIAL, too.
965 if (vm_flags & VM_SPECIAL)
968 /* Does the input range span an existing VMA? (cases 5 - 8) */
969 curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end);
971 if (!curr || /* cases 1 - 4 */
972 end == curr->vm_end) /* cases 6 - 8, adjacent VMA */
973 next = vma_lookup(mm, end);
975 next = NULL; /* case 5 */
978 vma_start = prev->vm_start;
979 vma_pgoff = prev->vm_pgoff;
981 /* Can we merge the predecessor? */
982 if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy)
983 && can_vma_merge_after(prev, vm_flags, anon_vma, file,
984 pgoff, vm_userfaultfd_ctx, anon_name)) {
990 /* Can we merge the successor? */
991 if (next && mpol_equal(policy, vma_policy(next)) &&
992 can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen,
993 vm_userfaultfd_ctx, anon_name)) {
997 /* Verify some invariant that must be enforced by the caller. */
998 VM_WARN_ON(prev && addr <= prev->vm_start);
999 VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
1000 VM_WARN_ON(addr >= end);
1002 if (!merge_prev && !merge_next)
1003 return NULL; /* Not mergeable. */
1006 vma_start_write(prev);
1009 remove = remove2 = adjust = NULL;
1011 /* Can we merge both the predecessor and the successor? */
1012 if (merge_prev && merge_next &&
1013 is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
1014 vma_start_write(next);
1015 remove = next; /* case 1 */
1016 vma_end = next->vm_end;
1017 err = dup_anon_vma(prev, next, &anon_dup);
1018 if (curr) { /* case 6 */
1019 vma_start_write(curr);
1023 * Note that the dup_anon_vma below cannot overwrite err
1024 * since the first caller would do nothing unless next
1027 if (!next->anon_vma)
1028 err = dup_anon_vma(prev, curr, &anon_dup);
1030 } else if (merge_prev) { /* case 2 */
1032 vma_start_write(curr);
1033 if (end == curr->vm_end) { /* case 7 */
1035 * can_vma_merge_after() assumed we would not be
1036 * removing prev vma, so it skipped the check
1037 * for vm_ops->close, but we are removing curr
1039 if (curr->vm_ops && curr->vm_ops->close)
1042 } else { /* case 5 */
1044 adj_start = (end - curr->vm_start);
1047 err = dup_anon_vma(prev, curr, &anon_dup);
1049 } else { /* merge_next */
1050 vma_start_write(next);
1052 if (prev && addr < prev->vm_end) { /* case 4 */
1053 vma_start_write(prev);
1056 adj_start = -(prev->vm_end - addr);
1057 err = dup_anon_vma(next, prev, &anon_dup);
1060 * Note that cases 3 and 8 are the ONLY ones where prev
1061 * is permitted to be (but is not necessarily) NULL.
1063 vma = next; /* case 3 */
1065 vma_end = next->vm_end;
1066 vma_pgoff = next->vm_pgoff - pglen;
1067 if (curr) { /* case 8 */
1068 vma_pgoff = curr->vm_pgoff;
1069 vma_start_write(curr);
1071 err = dup_anon_vma(next, curr, &anon_dup);
1076 /* Error in anon_vma clone. */
1080 if (vma_start < vma->vm_start || vma_end > vma->vm_end)
1081 vma_expanded = true;
1084 vma_iter_config(vmi, vma_start, vma_end);
1086 vma_iter_config(vmi, adjust->vm_start + adj_start,
1090 if (vma_iter_prealloc(vmi, vma))
1093 init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
1094 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
1095 vp.anon_vma != adjust->anon_vma);
1098 vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
1099 vma_set_range(vma, vma_start, vma_end, vma_pgoff);
1102 vma_iter_store(vmi, vma);
1105 adjust->vm_start += adj_start;
1106 adjust->vm_pgoff += adj_start >> PAGE_SHIFT;
1107 if (adj_start < 0) {
1108 WARN_ON(vma_expanded);
1109 vma_iter_store(vmi, next);
1113 vma_complete(&vp, vmi, mm);
1114 khugepaged_enter_vma(res, vm_flags);
1119 unlink_anon_vmas(anon_dup);
1122 vma_iter_set(vmi, addr);
1128 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1129 * context and anonymous VMA name within the range [start, end).
1131 * As a result, we might be able to merge the newly modified VMA range with an
1132 * adjacent VMA with identical properties.
1134 * If no merge is possible and the range does not span the entirety of the VMA,
1135 * we then need to split the VMA to accommodate the change.
1137 * The function returns either the merged VMA, the original VMA if a split was
1138 * required instead, or an error if the split failed.
1140 struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
1141 struct vm_area_struct *prev,
1142 struct vm_area_struct *vma,
1143 unsigned long start, unsigned long end,
1144 unsigned long vm_flags,
1145 struct mempolicy *policy,
1146 struct vm_userfaultfd_ctx uffd_ctx,
1147 struct anon_vma_name *anon_name)
1149 pgoff_t pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
1150 struct vm_area_struct *merged;
1152 merged = vma_merge(vmi, prev, vma, start, end, vm_flags,
1153 pgoff, policy, uffd_ctx, anon_name);
1157 if (vma->vm_start < start) {
1158 int err = split_vma(vmi, vma, start, 1);
1161 return ERR_PTR(err);
1164 if (vma->vm_end > end) {
1165 int err = split_vma(vmi, vma, end, 0);
1168 return ERR_PTR(err);
1175 * Attempt to merge a newly mapped VMA with those adjacent to it. The caller
1176 * must ensure that [start, end) does not overlap any existing VMA.
1178 struct vm_area_struct
1179 *vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev,
1180 struct vm_area_struct *vma, unsigned long start,
1181 unsigned long end, pgoff_t pgoff)
1183 return vma_merge(vmi, prev, vma, start, end, vma->vm_flags, pgoff,
1184 vma_policy(vma), vma->vm_userfaultfd_ctx, anon_vma_name(vma));
1188 * Expand vma by delta bytes, potentially merging with an immediately adjacent
1189 * VMA with identical properties.
1191 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1192 struct vm_area_struct *vma,
1193 unsigned long delta)
1195 pgoff_t pgoff = vma->vm_pgoff + vma_pages(vma);
1197 /* vma is specified as prev, so case 1 or 2 will apply. */
1198 return vma_merge(vmi, vma, vma, vma->vm_end, vma->vm_end + delta,
1199 vma->vm_flags, pgoff, vma_policy(vma),
1200 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
1203 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
1208 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
1210 struct address_space *mapping;
1213 mapping = vb->vmas[0]->vm_file->f_mapping;
1214 i_mmap_lock_write(mapping);
1215 for (i = 0; i < vb->count; i++) {
1216 VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
1217 __remove_shared_vm_struct(vb->vmas[i], mapping);
1219 i_mmap_unlock_write(mapping);
1221 unlink_file_vma_batch_init(vb);
1224 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
1225 struct vm_area_struct *vma)
1227 if (vma->vm_file == NULL)
1230 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
1231 vb->count == ARRAY_SIZE(vb->vmas))
1232 unlink_file_vma_batch_process(vb);
1234 vb->vmas[vb->count] = vma;
1238 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
1241 unlink_file_vma_batch_process(vb);
1245 * Unlink a file-based vm structure from its interval tree, to hide
1246 * vma from rmap and vmtruncate before freeing its page tables.
1248 void unlink_file_vma(struct vm_area_struct *vma)
1250 struct file *file = vma->vm_file;
1253 struct address_space *mapping = file->f_mapping;
1255 i_mmap_lock_write(mapping);
1256 __remove_shared_vm_struct(vma, mapping);
1257 i_mmap_unlock_write(mapping);
1261 void vma_link_file(struct vm_area_struct *vma)
1263 struct file *file = vma->vm_file;
1264 struct address_space *mapping;
1267 mapping = file->f_mapping;
1268 i_mmap_lock_write(mapping);
1269 __vma_link_file(vma, mapping);
1270 i_mmap_unlock_write(mapping);
1274 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
1276 VMA_ITERATOR(vmi, mm, 0);
1278 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1279 if (vma_iter_prealloc(&vmi, vma))
1282 vma_start_write(vma);
1283 vma_iter_store(&vmi, vma);
1291 * Copy the vma structure to a new location in the same mm,
1292 * prior to moving page table entries, to effect an mremap move.
1294 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
1295 unsigned long addr, unsigned long len, pgoff_t pgoff,
1296 bool *need_rmap_locks)
1298 struct vm_area_struct *vma = *vmap;
1299 unsigned long vma_start = vma->vm_start;
1300 struct mm_struct *mm = vma->vm_mm;
1301 struct vm_area_struct *new_vma, *prev;
1302 bool faulted_in_anon_vma = true;
1303 VMA_ITERATOR(vmi, mm, addr);
1306 * If anonymous vma has not yet been faulted, update new pgoff
1307 * to match new location, to increase its chance of merging.
1309 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
1310 pgoff = addr >> PAGE_SHIFT;
1311 faulted_in_anon_vma = false;
1314 new_vma = find_vma_prev(mm, addr, &prev);
1315 if (new_vma && new_vma->vm_start < addr + len)
1316 return NULL; /* should never get here */
1318 new_vma = vma_merge_new_vma(&vmi, prev, vma, addr, addr + len, pgoff);
1321 * Source vma may have been merged into new_vma
1323 if (unlikely(vma_start >= new_vma->vm_start &&
1324 vma_start < new_vma->vm_end)) {
1326 * The only way we can get a vma_merge with
1327 * self during an mremap is if the vma hasn't
1328 * been faulted in yet and we were allowed to
1329 * reset the dst vma->vm_pgoff to the
1330 * destination address of the mremap to allow
1331 * the merge to happen. mremap must change the
1332 * vm_pgoff linearity between src and dst vmas
1333 * (in turn preventing a vma_merge) to be
1334 * safe. It is only safe to keep the vm_pgoff
1335 * linear if there are no pages mapped yet.
1337 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
1338 *vmap = vma = new_vma;
1340 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
1342 new_vma = vm_area_dup(vma);
1345 vma_set_range(new_vma, addr, addr + len, pgoff);
1346 if (vma_dup_policy(vma, new_vma))
1348 if (anon_vma_clone(new_vma, vma))
1349 goto out_free_mempol;
1350 if (new_vma->vm_file)
1351 get_file(new_vma->vm_file);
1352 if (new_vma->vm_ops && new_vma->vm_ops->open)
1353 new_vma->vm_ops->open(new_vma);
1354 if (vma_link(mm, new_vma))
1356 *need_rmap_locks = false;
1361 if (new_vma->vm_ops && new_vma->vm_ops->close)
1362 new_vma->vm_ops->close(new_vma);
1364 if (new_vma->vm_file)
1365 fput(new_vma->vm_file);
1367 unlink_anon_vmas(new_vma);
1369 mpol_put(vma_policy(new_vma));
1371 vm_area_free(new_vma);
1377 * Rough compatibility check to quickly see if it's even worth looking
1378 * at sharing an anon_vma.
1380 * They need to have the same vm_file, and the flags can only differ
1381 * in things that mprotect may change.
1383 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1384 * we can merge the two vma's. For example, we refuse to merge a vma if
1385 * there is a vm_ops->close() function, because that indicates that the
1386 * driver is doing some kind of reference counting. But that doesn't
1387 * really matter for the anon_vma sharing case.
1389 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1391 return a->vm_end == b->vm_start &&
1392 mpol_equal(vma_policy(a), vma_policy(b)) &&
1393 a->vm_file == b->vm_file &&
1394 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1395 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1399 * Do some basic sanity checking to see if we can re-use the anon_vma
1400 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1401 * the same as 'old', the other will be the new one that is trying
1402 * to share the anon_vma.
1404 * NOTE! This runs with mmap_lock held for reading, so it is possible that
1405 * the anon_vma of 'old' is concurrently in the process of being set up
1406 * by another page fault trying to merge _that_. But that's ok: if it
1407 * is being set up, that automatically means that it will be a singleton
1408 * acceptable for merging, so we can do all of this optimistically. But
1409 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1411 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1412 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1413 * is to return an anon_vma that is "complex" due to having gone through
1416 * We also make sure that the two vma's are compatible (adjacent,
1417 * and with the same memory policies). That's all stable, even with just
1418 * a read lock on the mmap_lock.
1420 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old,
1421 struct vm_area_struct *a,
1422 struct vm_area_struct *b)
1424 if (anon_vma_compatible(a, b)) {
1425 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1427 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1434 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1435 * neighbouring vmas for a suitable anon_vma, before it goes off
1436 * to allocate a new anon_vma. It checks because a repetitive
1437 * sequence of mprotects and faults may otherwise lead to distinct
1438 * anon_vmas being allocated, preventing vma merge in subsequent
1441 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1443 struct anon_vma *anon_vma = NULL;
1444 struct vm_area_struct *prev, *next;
1445 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
1447 /* Try next first. */
1448 next = vma_iter_load(&vmi);
1450 anon_vma = reusable_anon_vma(next, vma, next);
1455 prev = vma_prev(&vmi);
1456 VM_BUG_ON_VMA(prev != vma, vma);
1457 prev = vma_prev(&vmi);
1458 /* Try prev next. */
1460 anon_vma = reusable_anon_vma(prev, prev, vma);
1463 * We might reach here with anon_vma == NULL if we can't find
1464 * any reusable anon_vma.
1465 * There's no absolute need to look only at touching neighbours:
1466 * we could search further afield for "compatible" anon_vmas.
1467 * But it would probably just be a waste of time searching,
1468 * or lead to too many vmas hanging off the same anon_vma.
1469 * We're trying to allow mprotect remerging later on,
1470 * not trying to minimize memory used for anon_vmas.
1475 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1477 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1480 static bool vma_is_shared_writable(struct vm_area_struct *vma)
1482 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1483 (VM_WRITE | VM_SHARED);
1486 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1488 /* No managed pages to writeback. */
1489 if (vma->vm_flags & VM_PFNMAP)
1492 return vma->vm_file && vma->vm_file->f_mapping &&
1493 mapping_can_writeback(vma->vm_file->f_mapping);
1497 * Does this VMA require the underlying folios to have their dirty state
1500 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1502 /* Only shared, writable VMAs require dirty tracking. */
1503 if (!vma_is_shared_writable(vma))
1506 /* Does the filesystem need to be notified? */
1507 if (vm_ops_needs_writenotify(vma->vm_ops))
1511 * Even if the filesystem doesn't indicate a need for writenotify, if it
1512 * can writeback, dirty tracking is still required.
1514 return vma_fs_can_writeback(vma);
1518 * Some shared mappings will want the pages marked read-only
1519 * to track write events. If so, we'll downgrade vm_page_prot
1520 * to the private version (using protection_map[] without the
1523 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1525 /* If it was private or non-writable, the write bit is already clear */
1526 if (!vma_is_shared_writable(vma))
1529 /* The backer wishes to know when pages are first written to? */
1530 if (vm_ops_needs_writenotify(vma->vm_ops))
1533 /* The open routine did something to the protections that pgprot_modify
1534 * won't preserve? */
1535 if (pgprot_val(vm_page_prot) !=
1536 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1540 * Do we need to track softdirty? hugetlb does not support softdirty
1543 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1546 /* Do we need write faults for uffd-wp tracking? */
1547 if (userfaultfd_wp(vma))
1550 /* Can the mapping track the dirty pages? */
1551 return vma_fs_can_writeback(vma);
1554 unsigned long count_vma_pages_range(struct mm_struct *mm,
1555 unsigned long addr, unsigned long end)
1557 VMA_ITERATOR(vmi, mm, addr);
1558 struct vm_area_struct *vma;
1559 unsigned long nr_pages = 0;
1561 for_each_vma_range(vmi, vma, end) {
1562 unsigned long vm_start = max(addr, vma->vm_start);
1563 unsigned long vm_end = min(end, vma->vm_end);
1565 nr_pages += PHYS_PFN(vm_end - vm_start);
1571 static DEFINE_MUTEX(mm_all_locks_mutex);
1573 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
1575 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
1577 * The LSB of head.next can't change from under us
1578 * because we hold the mm_all_locks_mutex.
1580 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
1582 * We can safely modify head.next after taking the
1583 * anon_vma->root->rwsem. If some other vma in this mm shares
1584 * the same anon_vma we won't take it again.
1586 * No need of atomic instructions here, head.next
1587 * can't change from under us thanks to the
1588 * anon_vma->root->rwsem.
1590 if (__test_and_set_bit(0, (unsigned long *)
1591 &anon_vma->root->rb_root.rb_root.rb_node))
1596 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
1598 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
1600 * AS_MM_ALL_LOCKS can't change from under us because
1601 * we hold the mm_all_locks_mutex.
1603 * Operations on ->flags have to be atomic because
1604 * even if AS_MM_ALL_LOCKS is stable thanks to the
1605 * mm_all_locks_mutex, there may be other cpus
1606 * changing other bitflags in parallel to us.
1608 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
1610 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
1615 * This operation locks against the VM for all pte/vma/mm related
1616 * operations that could ever happen on a certain mm. This includes
1617 * vmtruncate, try_to_unmap, and all page faults.
1619 * The caller must take the mmap_lock in write mode before calling
1620 * mm_take_all_locks(). The caller isn't allowed to release the
1621 * mmap_lock until mm_drop_all_locks() returns.
1623 * mmap_lock in write mode is required in order to block all operations
1624 * that could modify pagetables and free pages without need of
1625 * altering the vma layout. It's also needed in write mode to avoid new
1626 * anon_vmas to be associated with existing vmas.
1628 * A single task can't take more than one mm_take_all_locks() in a row
1629 * or it would deadlock.
1631 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
1632 * mapping->flags avoid to take the same lock twice, if more than one
1633 * vma in this mm is backed by the same anon_vma or address_space.
1635 * We take locks in following order, accordingly to comment at beginning
1637 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
1639 * - all vmas marked locked
1640 * - all i_mmap_rwsem locks;
1641 * - all anon_vma->rwseml
1643 * We can take all locks within these types randomly because the VM code
1644 * doesn't nest them and we protected from parallel mm_take_all_locks() by
1645 * mm_all_locks_mutex.
1647 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
1648 * that may have to take thousand of locks.
1650 * mm_take_all_locks() can fail if it's interrupted by signals.
1652 int mm_take_all_locks(struct mm_struct *mm)
1654 struct vm_area_struct *vma;
1655 struct anon_vma_chain *avc;
1656 VMA_ITERATOR(vmi, mm, 0);
1658 mmap_assert_write_locked(mm);
1660 mutex_lock(&mm_all_locks_mutex);
1663 * vma_start_write() does not have a complement in mm_drop_all_locks()
1664 * because vma_start_write() is always asymmetrical; it marks a VMA as
1665 * being written to until mmap_write_unlock() or mmap_write_downgrade()
1668 for_each_vma(vmi, vma) {
1669 if (signal_pending(current))
1671 vma_start_write(vma);
1674 vma_iter_init(&vmi, mm, 0);
1675 for_each_vma(vmi, vma) {
1676 if (signal_pending(current))
1678 if (vma->vm_file && vma->vm_file->f_mapping &&
1679 is_vm_hugetlb_page(vma))
1680 vm_lock_mapping(mm, vma->vm_file->f_mapping);
1683 vma_iter_init(&vmi, mm, 0);
1684 for_each_vma(vmi, vma) {
1685 if (signal_pending(current))
1687 if (vma->vm_file && vma->vm_file->f_mapping &&
1688 !is_vm_hugetlb_page(vma))
1689 vm_lock_mapping(mm, vma->vm_file->f_mapping);
1692 vma_iter_init(&vmi, mm, 0);
1693 for_each_vma(vmi, vma) {
1694 if (signal_pending(current))
1697 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
1698 vm_lock_anon_vma(mm, avc->anon_vma);
1704 mm_drop_all_locks(mm);
1708 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
1710 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
1712 * The LSB of head.next can't change to 0 from under
1713 * us because we hold the mm_all_locks_mutex.
1715 * We must however clear the bitflag before unlocking
1716 * the vma so the users using the anon_vma->rb_root will
1717 * never see our bitflag.
1719 * No need of atomic instructions here, head.next
1720 * can't change from under us until we release the
1721 * anon_vma->root->rwsem.
1723 if (!__test_and_clear_bit(0, (unsigned long *)
1724 &anon_vma->root->rb_root.rb_root.rb_node))
1726 anon_vma_unlock_write(anon_vma);
1730 static void vm_unlock_mapping(struct address_space *mapping)
1732 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
1734 * AS_MM_ALL_LOCKS can't change to 0 from under us
1735 * because we hold the mm_all_locks_mutex.
1737 i_mmap_unlock_write(mapping);
1738 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
1745 * The mmap_lock cannot be released by the caller until
1746 * mm_drop_all_locks() returns.
1748 void mm_drop_all_locks(struct mm_struct *mm)
1750 struct vm_area_struct *vma;
1751 struct anon_vma_chain *avc;
1752 VMA_ITERATOR(vmi, mm, 0);
1754 mmap_assert_write_locked(mm);
1755 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
1757 for_each_vma(vmi, vma) {
1759 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
1760 vm_unlock_anon_vma(avc->anon_vma);
1761 if (vma->vm_file && vma->vm_file->f_mapping)
1762 vm_unlock_mapping(vma->vm_file->f_mapping);
1765 mutex_unlock(&mm_all_locks_mutex);