1 /* SPDX-License-Identifier: GPL-2.0-or-later */
5 * Core VMA manipulation API implemented in vma.c.
11 * VMA lock generalization
14 struct vm_area_struct *vma;
15 struct vm_area_struct *adj_next;
17 struct address_space *mapping;
18 struct anon_vma *anon_vma;
19 struct vm_area_struct *insert;
20 struct vm_area_struct *remove;
21 struct vm_area_struct *remove2;
24 struct unlink_vma_file_batch {
26 struct vm_area_struct *vmas[8];
30 * vma munmap operation
32 struct vma_munmap_struct {
33 struct vma_iterator *vmi;
34 struct vm_area_struct *vma; /* The first vma to munmap */
35 struct vm_area_struct *prev; /* vma before the munmap area */
36 struct vm_area_struct *next; /* vma after the munmap area */
37 struct list_head *uf; /* Userfaultfd list_head */
38 unsigned long start; /* Aligned start addr (inclusive) */
39 unsigned long end; /* Aligned end addr (exclusive) */
40 unsigned long unmap_start; /* Unmap PTE start */
41 unsigned long unmap_end; /* Unmap PTE end */
42 int vma_count; /* Number of vmas that will be removed */
43 bool unlock; /* Unlock after the munmap */
44 bool clear_ptes; /* If there are outstanding PTE to be cleared */
45 bool closed_vm_ops; /* call_mmap() was encountered, so vmas may be closed */
47 unsigned long nr_pages; /* Number of pages being removed */
48 unsigned long locked_vm; /* Number of locked pages */
49 unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */
50 unsigned long exec_vm;
51 unsigned long stack_vm;
52 unsigned long data_vm;
55 enum vma_merge_state {
57 VMA_MERGE_ERROR_NOMEM,
62 /* Represents a VMA merge operation. */
63 struct vma_merge_struct {
65 struct vma_iterator *vmi;
67 struct vm_area_struct *prev;
68 struct vm_area_struct *next; /* Modified by vma_merge(). */
69 struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */
74 struct anon_vma *anon_vma;
75 struct mempolicy *policy;
76 struct vm_userfaultfd_ctx uffd_ctx;
77 struct anon_vma_name *anon_name;
78 enum vma_merge_state state;
81 static inline bool vmg_nomem(struct vma_merge_struct *vmg)
83 return vmg->state == VMA_MERGE_ERROR_NOMEM;
86 /* Assumes addr >= vma->vm_start. */
87 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
90 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
93 #define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_) \
94 struct vma_merge_struct name = { \
101 .state = VMA_MERGE_START, \
104 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \
105 struct vma_merge_struct name = { \
113 .flags = vma_->vm_flags, \
114 .pgoff = vma_pgoff_offset(vma_, start_), \
115 .file = vma_->vm_file, \
116 .anon_vma = vma_->anon_vma, \
117 .policy = vma_policy(vma_), \
118 .uffd_ctx = vma_->vm_userfaultfd_ctx, \
119 .anon_name = anon_vma_name(vma_), \
120 .state = VMA_MERGE_START, \
123 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
124 void validate_mm(struct mm_struct *mm);
126 #define validate_mm(mm) do { } while (0)
129 /* Required for expand_downwards(). */
130 void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma);
132 /* Required for expand_downwards(). */
133 void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma);
135 int vma_expand(struct vma_merge_struct *vmg);
136 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
137 unsigned long start, unsigned long end, pgoff_t pgoff);
139 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
140 struct vm_area_struct *vma, gfp_t gfp)
143 if (vmi->mas.status != ma_start &&
144 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
145 vma_iter_invalidate(vmi);
147 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
148 mas_store_gfp(&vmi->mas, vma, gfp);
149 if (unlikely(mas_is_err(&vmi->mas)))
157 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
158 * @vms: The vma munmap struct
159 * @vmi: The vma iterator
160 * @vma: The first vm_area_struct to munmap
161 * @start: The aligned start address to munmap
162 * @end: The aligned end address to munmap
163 * @uf: The userfaultfd list_head
164 * @unlock: Unlock after the operation. Only unlocked on success
166 static inline void init_vma_munmap(struct vma_munmap_struct *vms,
167 struct vma_iterator *vmi, struct vm_area_struct *vma,
168 unsigned long start, unsigned long end, struct list_head *uf,
177 vms->start = vms->end = 0;
179 vms->unlock = unlock;
182 vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
183 vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
184 vms->unmap_start = FIRST_USER_ADDRESS;
185 vms->unmap_end = USER_PGTABLES_CEILING;
186 vms->clear_ptes = false;
187 vms->closed_vm_ops = false;
191 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
192 struct ma_state *mas_detach);
194 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
195 struct ma_state *mas_detach);
197 void vms_clean_up_area(struct vma_munmap_struct *vms,
198 struct ma_state *mas_detach);
201 * reattach_vmas() - Undo any munmap work and free resources
202 * @mas_detach: The maple state with the detached maple tree
204 * Reattach any detached vmas and free up the maple tree used to track the vmas.
206 static inline void reattach_vmas(struct ma_state *mas_detach)
208 struct vm_area_struct *vma;
210 mas_set(mas_detach, 0);
211 mas_for_each(mas_detach, vma, ULONG_MAX)
212 vma_mark_detached(vma, false);
214 __mt_destroy(mas_detach->tree);
218 * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
220 * @vms: The vma unmap structure
221 * @mas_detach: The maple state with the detached maple tree
223 * Reattach any detached vmas, free up the maple tree used to track the vmas.
224 * If that's not possible because the ptes are cleared (and vm_ops->closed() may
225 * have been called), then a NULL is written over the vmas and the vmas are
226 * removed (munmap() completed).
228 static inline void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
229 struct ma_state *mas_detach)
231 struct ma_state *mas = &vms->vmi->mas;
236 return reattach_vmas(mas_detach);
239 * Aborting cannot just call the vm_ops open() because they are often
240 * not symmetrical and state data has been lost. Resort to the old
241 * failure method of leaving a gap where the MAP_FIXED mapping failed.
243 mas_set_range(mas, vms->start, vms->end - 1);
244 if (unlikely(mas_store_gfp(mas, NULL, GFP_KERNEL))) {
245 pr_warn_once("%s: (%d) Unable to abort munmap() operation\n",
246 current->comm, current->pid);
247 /* Leaving vmas detached and in-tree may hamper recovery */
248 reattach_vmas(mas_detach);
250 /* Clean up the insertion of the unfortunate gap */
251 vms_complete_munmap_vmas(vms, mas_detach);
256 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
257 struct mm_struct *mm, unsigned long start,
258 unsigned long end, struct list_head *uf, bool unlock);
260 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
261 unsigned long start, size_t len, struct list_head *uf,
264 void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed);
266 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
267 struct vm_area_struct *prev, struct vm_area_struct *next);
269 /* We are about to modify the VMA's flags. */
270 struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi,
271 struct vm_area_struct *prev, struct vm_area_struct *vma,
272 unsigned long start, unsigned long end,
273 unsigned long new_flags);
275 /* We are about to modify the VMA's flags and/or anon_name. */
276 struct vm_area_struct
277 *vma_modify_flags_name(struct vma_iterator *vmi,
278 struct vm_area_struct *prev,
279 struct vm_area_struct *vma,
282 unsigned long new_flags,
283 struct anon_vma_name *new_name);
285 /* We are about to modify the VMA's memory policy. */
286 struct vm_area_struct
287 *vma_modify_policy(struct vma_iterator *vmi,
288 struct vm_area_struct *prev,
289 struct vm_area_struct *vma,
290 unsigned long start, unsigned long end,
291 struct mempolicy *new_pol);
293 /* We are about to modify the VMA's flags and/or uffd context. */
294 struct vm_area_struct
295 *vma_modify_flags_uffd(struct vma_iterator *vmi,
296 struct vm_area_struct *prev,
297 struct vm_area_struct *vma,
298 unsigned long start, unsigned long end,
299 unsigned long new_flags,
300 struct vm_userfaultfd_ctx new_ctx);
302 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg);
304 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
305 struct vm_area_struct *vma,
306 unsigned long delta);
308 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
310 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
312 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
313 struct vm_area_struct *vma);
315 void unlink_file_vma(struct vm_area_struct *vma);
317 void vma_link_file(struct vm_area_struct *vma);
319 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
321 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
322 unsigned long addr, unsigned long len, pgoff_t pgoff,
323 bool *need_rmap_locks);
325 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
327 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
328 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
330 int mm_take_all_locks(struct mm_struct *mm);
331 void mm_drop_all_locks(struct mm_struct *mm);
333 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
336 * We want to check manually if we can change individual PTEs writable
337 * if we can't do that automatically for all PTEs in a mapping. For
338 * private mappings, that's always the case when we have write
339 * permissions as we properly have to handle COW.
341 if (vma->vm_flags & VM_SHARED)
342 return vma_wants_writenotify(vma, vma->vm_page_prot);
343 return !!(vma->vm_flags & VM_WRITE);
347 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
349 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
353 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
356 return mas_prev(&vmi->mas, min);
360 * These three helpers classifies VMAs for virtual memory accounting.
364 * Executable code area - executable, not writable, not stack
366 static inline bool is_exec_mapping(vm_flags_t flags)
368 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
372 * Stack area (including shadow stacks)
374 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
375 * do_mmap() forbids all other combinations.
377 static inline bool is_stack_mapping(vm_flags_t flags)
379 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
383 * Data area - private, writable, not stack
385 static inline bool is_data_mapping(vm_flags_t flags)
387 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
391 static inline void vma_iter_config(struct vma_iterator *vmi,
392 unsigned long index, unsigned long last)
394 __mas_set_range(&vmi->mas, index, last - 1);
397 static inline void vma_iter_reset(struct vma_iterator *vmi)
399 mas_reset(&vmi->mas);
403 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
405 return mas_prev_range(&vmi->mas, min);
409 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
411 return mas_next_range(&vmi->mas, max);
414 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
415 unsigned long max, unsigned long size)
417 return mas_empty_area(&vmi->mas, min, max - 1, size);
420 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
421 unsigned long max, unsigned long size)
423 return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
427 * VMA Iterator functions shared between nommu and mmap
429 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
430 struct vm_area_struct *vma)
432 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
435 static inline void vma_iter_clear(struct vma_iterator *vmi)
437 mas_store_prealloc(&vmi->mas, NULL);
440 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
442 return mas_walk(&vmi->mas);
445 /* Store a VMA with preallocated memory */
446 static inline void vma_iter_store(struct vma_iterator *vmi,
447 struct vm_area_struct *vma)
450 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
451 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
452 vmi->mas.index > vma->vm_start)) {
453 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
454 vmi->mas.index, vma->vm_start, vma->vm_start,
455 vma->vm_end, vmi->mas.index, vmi->mas.last);
457 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
458 vmi->mas.last < vma->vm_start)) {
459 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
460 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
461 vmi->mas.index, vmi->mas.last);
465 if (vmi->mas.status != ma_start &&
466 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
467 vma_iter_invalidate(vmi);
469 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
470 mas_store_prealloc(&vmi->mas, vma);
473 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
475 return vmi->mas.index;
478 static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
480 return vmi->mas.last + 1;
483 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
486 return mas_expected_entries(&vmi->mas, count);
490 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
492 return mas_prev_range(&vmi->mas, 0);
496 * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
497 * if no previous VMA, to index 0.
500 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
501 struct vm_area_struct **pprev)
503 struct vm_area_struct *next = vma_next(vmi);
504 struct vm_area_struct *prev = vma_prev(vmi);
507 * Consider the case where no previous VMA exists. We advance to the
508 * next VMA, skipping any gap, then rewind to the start of the range.
510 * If we were to unconditionally advance to the next range we'd wind up
511 * at the next VMA again, so we check to ensure there is a previous VMA
515 vma_iter_next_range(vmi);
525 static inline bool vma_is_sealed(struct vm_area_struct *vma)
527 return (vma->vm_flags & VM_SEALED);
531 * check if a vma is sealed for modification.
532 * return true, if modification is allowed.
534 static inline bool can_modify_vma(struct vm_area_struct *vma)
536 if (unlikely(vma_is_sealed(vma)))
542 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
546 static inline bool can_modify_vma(struct vm_area_struct *vma)
551 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
558 #endif /* __MM_VMA_H */