]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * mm/mmap.c | |
4 | * | |
5 | * Written by obz. | |
6 | * | |
046c6884 | 7 | * Address space accounting code <[email protected]> |
1da177e4 LT |
8 | */ |
9 | ||
b1de0d13 MH |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | ||
e8420a8e | 12 | #include <linux/kernel.h> |
1da177e4 | 13 | #include <linux/slab.h> |
4af3c9cc | 14 | #include <linux/backing-dev.h> |
1da177e4 | 15 | #include <linux/mm.h> |
17fca131 | 16 | #include <linux/mm_inline.h> |
1da177e4 LT |
17 | #include <linux/shm.h> |
18 | #include <linux/mman.h> | |
19 | #include <linux/pagemap.h> | |
20 | #include <linux/swap.h> | |
21 | #include <linux/syscalls.h> | |
c59ede7b | 22 | #include <linux/capability.h> |
1da177e4 LT |
23 | #include <linux/init.h> |
24 | #include <linux/file.h> | |
25 | #include <linux/fs.h> | |
26 | #include <linux/personality.h> | |
27 | #include <linux/security.h> | |
28 | #include <linux/hugetlb.h> | |
c01d5b30 | 29 | #include <linux/shmem_fs.h> |
1da177e4 | 30 | #include <linux/profile.h> |
b95f1b31 | 31 | #include <linux/export.h> |
1da177e4 LT |
32 | #include <linux/mount.h> |
33 | #include <linux/mempolicy.h> | |
34 | #include <linux/rmap.h> | |
cddb8a5c | 35 | #include <linux/mmu_notifier.h> |
82f71ae4 | 36 | #include <linux/mmdebug.h> |
cdd6c482 | 37 | #include <linux/perf_event.h> |
120a795d | 38 | #include <linux/audit.h> |
b15d00b6 | 39 | #include <linux/khugepaged.h> |
2b144498 | 40 | #include <linux/uprobes.h> |
1640879a AS |
41 | #include <linux/notifier.h> |
42 | #include <linux/memory.h> | |
b1de0d13 | 43 | #include <linux/printk.h> |
19a809af | 44 | #include <linux/userfaultfd_k.h> |
d977d56c | 45 | #include <linux/moduleparam.h> |
62b5f7d0 | 46 | #include <linux/pkeys.h> |
21292580 | 47 | #include <linux/oom.h> |
04f5866e | 48 | #include <linux/sched/mm.h> |
d7597f59 | 49 | #include <linux/ksm.h> |
1da177e4 | 50 | |
7c0f6ba6 | 51 | #include <linux/uaccess.h> |
1da177e4 LT |
52 | #include <asm/cacheflush.h> |
53 | #include <asm/tlb.h> | |
d6dd61c8 | 54 | #include <asm/mmu_context.h> |
1da177e4 | 55 | |
df529cab JK |
56 | #define CREATE_TRACE_POINTS |
57 | #include <trace/events/mmap.h> | |
58 | ||
42b77728 JB |
59 | #include "internal.h" |
60 | ||
3a459756 KK |
61 | #ifndef arch_mmap_check |
62 | #define arch_mmap_check(addr, len, flags) (0) | |
63 | #endif | |
64 | ||
d07e2259 DC |
65 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS |
66 | const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; | |
71a5849a | 67 | int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX; |
d07e2259 DC |
68 | int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; |
69 | #endif | |
70 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS | |
71 | const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; | |
72 | const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; | |
73 | int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; | |
74 | #endif | |
75 | ||
f4fcd558 | 76 | static bool ignore_rlimit_data; |
d977d56c | 77 | core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); |
d07e2259 | 78 | |
64e45507 PF |
79 | /* Update vma->vm_page_prot to reflect vma->vm_flags. */ |
80 | void vma_set_page_prot(struct vm_area_struct *vma) | |
81 | { | |
82 | unsigned long vm_flags = vma->vm_flags; | |
6d2329f8 | 83 | pgprot_t vm_page_prot; |
64e45507 | 84 | |
6d2329f8 AA |
85 | vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); |
86 | if (vma_wants_writenotify(vma, vm_page_prot)) { | |
64e45507 | 87 | vm_flags &= ~VM_SHARED; |
6d2329f8 | 88 | vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); |
64e45507 | 89 | } |
c1e8d7c6 | 90 | /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ |
6d2329f8 | 91 | WRITE_ONCE(vma->vm_page_prot, vm_page_prot); |
64e45507 PF |
92 | } |
93 | ||
2e7ce7d3 LH |
94 | /* |
95 | * check_brk_limits() - Use platform specific check of range & verify mlock | |
96 | * limits. | |
97 | * @addr: The address to check | |
98 | * @len: The size of increase. | |
99 | * | |
100 | * Return: 0 on success. | |
101 | */ | |
102 | static int check_brk_limits(unsigned long addr, unsigned long len) | |
103 | { | |
104 | unsigned long mapped_addr; | |
105 | ||
106 | mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); | |
107 | if (IS_ERR_VALUE(mapped_addr)) | |
108 | return mapped_addr; | |
109 | ||
b0cc5e89 | 110 | return mlock_future_ok(current->mm, current->mm->def_flags, len) |
3c54a298 | 111 | ? 0 : -EAGAIN; |
2e7ce7d3 | 112 | } |
92fed820 | 113 | static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, |
763ecb03 | 114 | unsigned long addr, unsigned long request, unsigned long flags); |
6a6160a7 | 115 | SYSCALL_DEFINE1(brk, unsigned long, brk) |
1da177e4 | 116 | { |
9bc8039e | 117 | unsigned long newbrk, oldbrk, origbrk; |
1da177e4 | 118 | struct mm_struct *mm = current->mm; |
2e7ce7d3 | 119 | struct vm_area_struct *brkvma, *next = NULL; |
a5b4592c | 120 | unsigned long min_brk; |
408579cd | 121 | bool populate = false; |
897ab3e0 | 122 | LIST_HEAD(uf); |
92fed820 | 123 | struct vma_iterator vmi; |
1da177e4 | 124 | |
d8ed45c5 | 125 | if (mmap_write_lock_killable(mm)) |
dc0ef0df | 126 | return -EINTR; |
1da177e4 | 127 | |
9bc8039e YS |
128 | origbrk = mm->brk; |
129 | ||
a5b4592c | 130 | #ifdef CONFIG_COMPAT_BRK |
5520e894 JK |
131 | /* |
132 | * CONFIG_COMPAT_BRK can still be overridden by setting | |
133 | * randomize_va_space to 2, which will still cause mm->start_brk | |
134 | * to be arbitrarily shifted | |
135 | */ | |
4471a675 | 136 | if (current->brk_randomized) |
5520e894 JK |
137 | min_brk = mm->start_brk; |
138 | else | |
139 | min_brk = mm->end_data; | |
a5b4592c JK |
140 | #else |
141 | min_brk = mm->start_brk; | |
142 | #endif | |
143 | if (brk < min_brk) | |
1da177e4 | 144 | goto out; |
1e624196 RG |
145 | |
146 | /* | |
147 | * Check against rlimit here. If this check is done later after the test | |
148 | * of oldbrk with newbrk then it can escape the test and let the data | |
149 | * segment grow beyond its set limit the in case where the limit is | |
150 | * not page aligned -Ram Gupta | |
151 | */ | |
8764b338 CG |
152 | if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, |
153 | mm->end_data, mm->start_data)) | |
1e624196 RG |
154 | goto out; |
155 | ||
1da177e4 LT |
156 | newbrk = PAGE_ALIGN(brk); |
157 | oldbrk = PAGE_ALIGN(mm->brk); | |
9bc8039e YS |
158 | if (oldbrk == newbrk) { |
159 | mm->brk = brk; | |
160 | goto success; | |
161 | } | |
1da177e4 | 162 | |
408579cd | 163 | /* Always allow shrinking brk. */ |
1da177e4 | 164 | if (brk <= mm->brk) { |
2e7ce7d3 | 165 | /* Search one past newbrk */ |
92fed820 LH |
166 | vma_iter_init(&vmi, mm, newbrk); |
167 | brkvma = vma_find(&vmi, oldbrk); | |
f5ad5083 | 168 | if (!brkvma || brkvma->vm_start >= oldbrk) |
2e7ce7d3 | 169 | goto out; /* mapping intersects with an existing non-brk vma. */ |
9bc8039e | 170 | /* |
2e7ce7d3 | 171 | * mm->brk must be protected by write mmap_lock. |
63fc66f5 LH |
172 | * do_vmi_align_munmap() will drop the lock on success, so |
173 | * update it before calling do_vma_munmap(). | |
9bc8039e YS |
174 | */ |
175 | mm->brk = brk; | |
63fc66f5 LH |
176 | if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf, |
177 | /* unlock = */ true)) | |
408579cd LH |
178 | goto out; |
179 | ||
180 | goto success_unlocked; | |
1da177e4 LT |
181 | } |
182 | ||
2e7ce7d3 LH |
183 | if (check_brk_limits(oldbrk, newbrk - oldbrk)) |
184 | goto out; | |
185 | ||
186 | /* | |
187 | * Only check if the next VMA is within the stack_guard_gap of the | |
188 | * expansion area | |
189 | */ | |
92fed820 LH |
190 | vma_iter_init(&vmi, mm, oldbrk); |
191 | next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap); | |
1be7107f | 192 | if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) |
1da177e4 LT |
193 | goto out; |
194 | ||
92fed820 | 195 | brkvma = vma_prev_limit(&vmi, mm->start_brk); |
1da177e4 | 196 | /* Ok, looks good - let it rip. */ |
92fed820 | 197 | if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0) |
1da177e4 | 198 | goto out; |
2e7ce7d3 | 199 | |
49b1b8d6 LS |
200 | mm->brk = brk; |
201 | if (mm->def_flags & VM_LOCKED) | |
202 | populate = true; | |
a67c8caa | 203 | |
49b1b8d6 LS |
204 | success: |
205 | mmap_write_unlock(mm); | |
206 | success_unlocked: | |
207 | userfaultfd_unmap_complete(mm, &uf); | |
208 | if (populate) | |
209 | mm_populate(oldbrk, newbrk - oldbrk); | |
210 | return brk; | |
a67c8caa | 211 | |
49b1b8d6 LS |
212 | out: |
213 | mm->brk = origbrk; | |
214 | mmap_write_unlock(mm); | |
215 | return origbrk; | |
1da177e4 LT |
216 | } |
217 | ||
40401530 AV |
218 | /* |
219 | * If a hint addr is less than mmap_min_addr change hint to be as | |
220 | * low as possible but still greater than mmap_min_addr | |
221 | */ | |
222 | static inline unsigned long round_hint_to_min(unsigned long hint) | |
223 | { | |
224 | hint &= PAGE_MASK; | |
225 | if (((void *)hint != NULL) && | |
226 | (hint < mmap_min_addr)) | |
227 | return PAGE_ALIGN(mmap_min_addr); | |
228 | return hint; | |
229 | } | |
230 | ||
b0cc5e89 | 231 | bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, |
3c54a298 | 232 | unsigned long bytes) |
363ee17f | 233 | { |
3c54a298 | 234 | unsigned long locked_pages, limit_pages; |
363ee17f | 235 | |
3c54a298 LS |
236 | if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) |
237 | return true; | |
238 | ||
239 | locked_pages = bytes >> PAGE_SHIFT; | |
240 | locked_pages += mm->locked_vm; | |
241 | ||
242 | limit_pages = rlimit(RLIMIT_MEMLOCK); | |
243 | limit_pages >>= PAGE_SHIFT; | |
244 | ||
245 | return locked_pages <= limit_pages; | |
363ee17f DB |
246 | } |
247 | ||
be83bbf8 LT |
248 | static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) |
249 | { | |
250 | if (S_ISREG(inode->i_mode)) | |
423913ad | 251 | return MAX_LFS_FILESIZE; |
be83bbf8 LT |
252 | |
253 | if (S_ISBLK(inode->i_mode)) | |
254 | return MAX_LFS_FILESIZE; | |
255 | ||
76f34950 IK |
256 | if (S_ISSOCK(inode->i_mode)) |
257 | return MAX_LFS_FILESIZE; | |
258 | ||
be83bbf8 | 259 | /* Special "we do even unsigned file positions" case */ |
641bb439 | 260 | if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET) |
be83bbf8 LT |
261 | return 0; |
262 | ||
263 | /* Yes, random drivers might want more. But I'm tired of buggy drivers */ | |
264 | return ULONG_MAX; | |
265 | } | |
266 | ||
267 | static inline bool file_mmap_ok(struct file *file, struct inode *inode, | |
268 | unsigned long pgoff, unsigned long len) | |
269 | { | |
270 | u64 maxsize = file_mmap_size_max(file, inode); | |
271 | ||
272 | if (maxsize && len > maxsize) | |
273 | return false; | |
274 | maxsize -= len; | |
275 | if (pgoff > maxsize >> PAGE_SHIFT) | |
276 | return false; | |
277 | return true; | |
278 | } | |
279 | ||
1da177e4 | 280 | /* |
3e4e28c5 | 281 | * The caller must write-lock current->mm->mmap_lock. |
1da177e4 | 282 | */ |
1fcfd8db | 283 | unsigned long do_mmap(struct file *file, unsigned long addr, |
1da177e4 | 284 | unsigned long len, unsigned long prot, |
592b5fad YY |
285 | unsigned long flags, vm_flags_t vm_flags, |
286 | unsigned long pgoff, unsigned long *populate, | |
287 | struct list_head *uf) | |
1da177e4 | 288 | { |
cc71aba3 | 289 | struct mm_struct *mm = current->mm; |
62b5f7d0 | 290 | int pkey = 0; |
1da177e4 | 291 | |
41badc15 | 292 | *populate = 0; |
bebeb3d6 | 293 | |
e37609bb PK |
294 | if (!len) |
295 | return -EINVAL; | |
296 | ||
1da177e4 LT |
297 | /* |
298 | * Does the application expect PROT_READ to imply PROT_EXEC? | |
299 | * | |
300 | * (the exception is when the underlying filesystem is noexec | |
be16dd76 | 301 | * mounted, in which case we don't add PROT_EXEC.) |
1da177e4 LT |
302 | */ |
303 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) | |
90f8572b | 304 | if (!(file && path_noexec(&file->f_path))) |
1da177e4 LT |
305 | prot |= PROT_EXEC; |
306 | ||
a4ff8e86 MH |
307 | /* force arch specific MAP_FIXED handling in get_unmapped_area */ |
308 | if (flags & MAP_FIXED_NOREPLACE) | |
309 | flags |= MAP_FIXED; | |
310 | ||
7cd94146 EP |
311 | if (!(flags & MAP_FIXED)) |
312 | addr = round_hint_to_min(addr); | |
313 | ||
1da177e4 LT |
314 | /* Careful about overflows.. */ |
315 | len = PAGE_ALIGN(len); | |
9206de95 | 316 | if (!len) |
1da177e4 LT |
317 | return -ENOMEM; |
318 | ||
319 | /* offset overflow? */ | |
320 | if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) | |
cc71aba3 | 321 | return -EOVERFLOW; |
1da177e4 LT |
322 | |
323 | /* Too many mappings? */ | |
324 | if (mm->map_count > sysctl_max_map_count) | |
325 | return -ENOMEM; | |
326 | ||
8be7258a JX |
327 | /* |
328 | * addr is returned from get_unmapped_area, | |
329 | * There are two cases: | |
330 | * 1> MAP_FIXED == false | |
331 | * unallocated memory, no need to check sealing. | |
332 | * 1> MAP_FIXED == true | |
333 | * sealing is checked inside mmap_region when | |
334 | * do_vmi_munmap is called. | |
335 | */ | |
336 | ||
62b5f7d0 DH |
337 | if (prot == PROT_EXEC) { |
338 | pkey = execute_only_pkey(mm); | |
339 | if (pkey < 0) | |
340 | pkey = 0; | |
341 | } | |
342 | ||
1da177e4 LT |
343 | /* Do simple checking here so the lower-level routines won't have |
344 | * to. we assume access permissions have been handled by the open | |
345 | * of the memory object, so we don't do any here. | |
346 | */ | |
592b5fad | 347 | vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | |
1da177e4 LT |
348 | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; |
349 | ||
8a0fe564 RE |
350 | /* Obtain the address to map to. we verify (or select) it and ensure |
351 | * that it represents a valid section of the address space. | |
352 | */ | |
353 | addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags); | |
354 | if (IS_ERR_VALUE(addr)) | |
355 | return addr; | |
356 | ||
357 | if (flags & MAP_FIXED_NOREPLACE) { | |
358 | if (find_vma_intersection(mm, addr, addr + len)) | |
359 | return -EEXIST; | |
360 | } | |
361 | ||
cdf7b341 | 362 | if (flags & MAP_LOCKED) |
1da177e4 LT |
363 | if (!can_do_mlock()) |
364 | return -EPERM; | |
ba470de4 | 365 | |
b0cc5e89 | 366 | if (!mlock_future_ok(mm, vm_flags, len)) |
363ee17f | 367 | return -EAGAIN; |
1da177e4 | 368 | |
1da177e4 | 369 | if (file) { |
077bf22b | 370 | struct inode *inode = file_inode(file); |
1c972597 DW |
371 | unsigned long flags_mask; |
372 | ||
be83bbf8 LT |
373 | if (!file_mmap_ok(file, inode, pgoff, len)) |
374 | return -EOVERFLOW; | |
375 | ||
210a03c9 CB |
376 | flags_mask = LEGACY_MAP_MASK; |
377 | if (file->f_op->fop_flags & FOP_MMAP_SYNC) | |
378 | flags_mask |= MAP_SYNC; | |
077bf22b | 379 | |
1da177e4 LT |
380 | switch (flags & MAP_TYPE) { |
381 | case MAP_SHARED: | |
1c972597 DW |
382 | /* |
383 | * Force use of MAP_SHARED_VALIDATE with non-legacy | |
384 | * flags. E.g. MAP_SYNC is dangerous to use with | |
385 | * MAP_SHARED as you don't know which consistency model | |
386 | * you will get. We silently ignore unsupported flags | |
387 | * with MAP_SHARED to preserve backward compatibility. | |
388 | */ | |
389 | flags &= LEGACY_MAP_MASK; | |
e4a9bc58 | 390 | fallthrough; |
1c972597 DW |
391 | case MAP_SHARED_VALIDATE: |
392 | if (flags & ~flags_mask) | |
393 | return -EOPNOTSUPP; | |
dc617f29 DW |
394 | if (prot & PROT_WRITE) { |
395 | if (!(file->f_mode & FMODE_WRITE)) | |
396 | return -EACCES; | |
397 | if (IS_SWAPFILE(file->f_mapping->host)) | |
398 | return -ETXTBSY; | |
399 | } | |
1da177e4 LT |
400 | |
401 | /* | |
402 | * Make sure we don't allow writing to an append-only | |
403 | * file.. | |
404 | */ | |
405 | if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) | |
406 | return -EACCES; | |
407 | ||
1da177e4 LT |
408 | vm_flags |= VM_SHARED | VM_MAYSHARE; |
409 | if (!(file->f_mode & FMODE_WRITE)) | |
410 | vm_flags &= ~(VM_MAYWRITE | VM_SHARED); | |
e4a9bc58 | 411 | fallthrough; |
1da177e4 LT |
412 | case MAP_PRIVATE: |
413 | if (!(file->f_mode & FMODE_READ)) | |
414 | return -EACCES; | |
90f8572b | 415 | if (path_noexec(&file->f_path)) { |
80c5606c LT |
416 | if (vm_flags & VM_EXEC) |
417 | return -EPERM; | |
418 | vm_flags &= ~VM_MAYEXEC; | |
419 | } | |
80c5606c | 420 | |
72c2d531 | 421 | if (!file->f_op->mmap) |
80c5606c | 422 | return -ENODEV; |
b2c56e4f ON |
423 | if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) |
424 | return -EINVAL; | |
1da177e4 LT |
425 | break; |
426 | ||
427 | default: | |
428 | return -EINVAL; | |
429 | } | |
430 | } else { | |
431 | switch (flags & MAP_TYPE) { | |
432 | case MAP_SHARED: | |
b2c56e4f ON |
433 | if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) |
434 | return -EINVAL; | |
ce363942 TH |
435 | /* |
436 | * Ignore pgoff. | |
437 | */ | |
438 | pgoff = 0; | |
1da177e4 LT |
439 | vm_flags |= VM_SHARED | VM_MAYSHARE; |
440 | break; | |
9651fced JD |
441 | case MAP_DROPPABLE: |
442 | if (VM_DROPPABLE == VM_NONE) | |
443 | return -ENOTSUPP; | |
444 | /* | |
445 | * A locked or stack area makes no sense to be droppable. | |
446 | * | |
447 | * Also, since droppable pages can just go away at any time | |
448 | * it makes no sense to copy them on fork or dump them. | |
449 | * | |
450 | * And don't attempt to combine with hugetlb for now. | |
451 | */ | |
452 | if (flags & (MAP_LOCKED | MAP_HUGETLB)) | |
453 | return -EINVAL; | |
454 | if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) | |
455 | return -EINVAL; | |
456 | ||
457 | vm_flags |= VM_DROPPABLE; | |
458 | ||
459 | /* | |
460 | * If the pages can be dropped, then it doesn't make | |
461 | * sense to reserve them. | |
462 | */ | |
463 | vm_flags |= VM_NORESERVE; | |
464 | ||
465 | /* | |
466 | * Likewise, they're volatile enough that they | |
467 | * shouldn't survive forks or coredumps. | |
468 | */ | |
469 | vm_flags |= VM_WIPEONFORK | VM_DONTDUMP; | |
470 | fallthrough; | |
1da177e4 LT |
471 | case MAP_PRIVATE: |
472 | /* | |
473 | * Set pgoff according to addr for anon_vma. | |
474 | */ | |
475 | pgoff = addr >> PAGE_SHIFT; | |
476 | break; | |
477 | default: | |
478 | return -EINVAL; | |
479 | } | |
480 | } | |
481 | ||
c22c0d63 ML |
482 | /* |
483 | * Set 'VM_NORESERVE' if we should not account for the | |
484 | * memory use of this mapping. | |
485 | */ | |
486 | if (flags & MAP_NORESERVE) { | |
487 | /* We honor MAP_NORESERVE if allowed to overcommit */ | |
488 | if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) | |
489 | vm_flags |= VM_NORESERVE; | |
490 | ||
491 | /* hugetlb applies strict overcommit unless MAP_NORESERVE */ | |
492 | if (file && is_file_hugepages(file)) | |
493 | vm_flags |= VM_NORESERVE; | |
494 | } | |
495 | ||
897ab3e0 | 496 | addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); |
09a9f1d2 ML |
497 | if (!IS_ERR_VALUE(addr) && |
498 | ((vm_flags & VM_LOCKED) || | |
499 | (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) | |
41badc15 | 500 | *populate = len; |
bebeb3d6 | 501 | return addr; |
0165ab44 | 502 | } |
6be5ceb0 | 503 | |
a90f590a DB |
504 | unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, |
505 | unsigned long prot, unsigned long flags, | |
506 | unsigned long fd, unsigned long pgoff) | |
66f0dc48 HD |
507 | { |
508 | struct file *file = NULL; | |
1e3ee14b | 509 | unsigned long retval; |
66f0dc48 HD |
510 | |
511 | if (!(flags & MAP_ANONYMOUS)) { | |
120a795d | 512 | audit_mmap_fd(fd, flags); |
66f0dc48 HD |
513 | file = fget(fd); |
514 | if (!file) | |
1e3ee14b | 515 | return -EBADF; |
7bba8f0e | 516 | if (is_file_hugepages(file)) { |
af73e4d9 | 517 | len = ALIGN(len, huge_page_size(hstate_file(file))); |
7bba8f0e ZL |
518 | } else if (unlikely(flags & MAP_HUGETLB)) { |
519 | retval = -EINVAL; | |
493af578 | 520 | goto out_fput; |
7bba8f0e | 521 | } |
66f0dc48 | 522 | } else if (flags & MAP_HUGETLB) { |
c103a4dc | 523 | struct hstate *hs; |
af73e4d9 | 524 | |
20ac2893 | 525 | hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); |
091d0d55 LZ |
526 | if (!hs) |
527 | return -EINVAL; | |
528 | ||
529 | len = ALIGN(len, huge_page_size(hs)); | |
66f0dc48 HD |
530 | /* |
531 | * VM_NORESERVE is used because the reservations will be | |
532 | * taken when vm_ops->mmap() is called | |
66f0dc48 | 533 | */ |
af73e4d9 | 534 | file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, |
42d7395f | 535 | VM_NORESERVE, |
83c1fd76 | 536 | HUGETLB_ANONHUGE_INODE, |
42d7395f | 537 | (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); |
66f0dc48 HD |
538 | if (IS_ERR(file)) |
539 | return PTR_ERR(file); | |
540 | } | |
541 | ||
9fbeb5ab | 542 | retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); |
493af578 | 543 | out_fput: |
66f0dc48 HD |
544 | if (file) |
545 | fput(file); | |
66f0dc48 HD |
546 | return retval; |
547 | } | |
548 | ||
a90f590a DB |
549 | SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, |
550 | unsigned long, prot, unsigned long, flags, | |
551 | unsigned long, fd, unsigned long, pgoff) | |
552 | { | |
553 | return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); | |
554 | } | |
555 | ||
a4679373 CH |
556 | #ifdef __ARCH_WANT_SYS_OLD_MMAP |
557 | struct mmap_arg_struct { | |
558 | unsigned long addr; | |
559 | unsigned long len; | |
560 | unsigned long prot; | |
561 | unsigned long flags; | |
562 | unsigned long fd; | |
563 | unsigned long offset; | |
564 | }; | |
565 | ||
566 | SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) | |
567 | { | |
568 | struct mmap_arg_struct a; | |
569 | ||
570 | if (copy_from_user(&a, arg, sizeof(a))) | |
571 | return -EFAULT; | |
de1741a1 | 572 | if (offset_in_page(a.offset)) |
a4679373 CH |
573 | return -EINVAL; |
574 | ||
a90f590a DB |
575 | return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, |
576 | a.offset >> PAGE_SHIFT); | |
a4679373 CH |
577 | } |
578 | #endif /* __ARCH_WANT_SYS_OLD_MMAP */ | |
579 | ||
fc8744ad LT |
580 | /* |
581 | * We account for memory if it's a private writeable mapping, | |
5a6fe125 | 582 | * not hugepages and VM_NORESERVE wasn't set. |
fc8744ad | 583 | */ |
2bd9e6ee | 584 | static inline bool accountable_mapping(struct file *file, vm_flags_t vm_flags) |
fc8744ad | 585 | { |
5a6fe125 MG |
586 | /* |
587 | * hugetlb has its own accounting separate from the core VM | |
588 | * VM_HUGETLB may not be set yet so we cannot check for that flag. | |
589 | */ | |
590 | if (file && is_file_hugepages(file)) | |
2bd9e6ee | 591 | return false; |
5a6fe125 | 592 | |
fc8744ad LT |
593 | return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; |
594 | } | |
595 | ||
3499a131 LH |
596 | /** |
597 | * unmapped_area() - Find an area between the low_limit and the high_limit with | |
598 | * the correct alignment and offset, all from @info. Note: current->mm is used | |
599 | * for the search. | |
600 | * | |
82b24936 VY |
601 | * @info: The unmapped area information including the range [low_limit - |
602 | * high_limit), the alignment offset and mask. | |
3499a131 LH |
603 | * |
604 | * Return: A memory address or -ENOMEM. | |
605 | */ | |
baceaf1c | 606 | static unsigned long unmapped_area(struct vm_unmapped_area_info *info) |
db4fbfb9 | 607 | { |
6b008640 LT |
608 | unsigned long length, gap; |
609 | unsigned long low_limit, high_limit; | |
58c5d0d6 | 610 | struct vm_area_struct *tmp; |
d4e6b397 | 611 | VMA_ITERATOR(vmi, current->mm, 0); |
db4fbfb9 ML |
612 | |
613 | /* Adjust search length to account for worst case alignment overhead */ | |
44bd7ace | 614 | length = info->length + info->align_mask + info->start_gap; |
db4fbfb9 ML |
615 | if (length < info->length) |
616 | return -ENOMEM; | |
617 | ||
58c5d0d6 | 618 | low_limit = info->low_limit; |
6b008640 LT |
619 | if (low_limit < mmap_min_addr) |
620 | low_limit = mmap_min_addr; | |
621 | high_limit = info->high_limit; | |
58c5d0d6 | 622 | retry: |
d4e6b397 | 623 | if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length)) |
db4fbfb9 ML |
624 | return -ENOMEM; |
625 | ||
44bd7ace RE |
626 | /* |
627 | * Adjust for the gap first so it doesn't interfere with the | |
628 | * later alignment. The first step is the minimum needed to | |
629 | * fulill the start gap, the next steps is the minimum to align | |
630 | * that. It is the minimum needed to fulill both. | |
631 | */ | |
632 | gap = vma_iter_addr(&vmi) + info->start_gap; | |
3499a131 | 633 | gap += (info->align_offset - gap) & info->align_mask; |
d4e6b397 | 634 | tmp = vma_next(&vmi); |
0266e7c5 | 635 | if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ |
58c5d0d6 LH |
636 | if (vm_start_gap(tmp) < gap + length - 1) { |
637 | low_limit = tmp->vm_end; | |
d4e6b397 | 638 | vma_iter_reset(&vmi); |
58c5d0d6 LH |
639 | goto retry; |
640 | } | |
641 | } else { | |
d4e6b397 | 642 | tmp = vma_prev(&vmi); |
58c5d0d6 LH |
643 | if (tmp && vm_end_gap(tmp) > gap) { |
644 | low_limit = vm_end_gap(tmp); | |
d4e6b397 | 645 | vma_iter_reset(&vmi); |
58c5d0d6 LH |
646 | goto retry; |
647 | } | |
648 | } | |
649 | ||
3499a131 | 650 | return gap; |
db4fbfb9 ML |
651 | } |
652 | ||
3499a131 LH |
653 | /** |
654 | * unmapped_area_topdown() - Find an area between the low_limit and the | |
82b24936 | 655 | * high_limit with the correct alignment and offset at the highest available |
3499a131 LH |
656 | * address, all from @info. Note: current->mm is used for the search. |
657 | * | |
82b24936 VY |
658 | * @info: The unmapped area information including the range [low_limit - |
659 | * high_limit), the alignment offset and mask. | |
3499a131 LH |
660 | * |
661 | * Return: A memory address or -ENOMEM. | |
662 | */ | |
baceaf1c | 663 | static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) |
db4fbfb9 | 664 | { |
6b008640 LT |
665 | unsigned long length, gap, gap_end; |
666 | unsigned long low_limit, high_limit; | |
58c5d0d6 | 667 | struct vm_area_struct *tmp; |
d4e6b397 | 668 | VMA_ITERATOR(vmi, current->mm, 0); |
db4fbfb9 ML |
669 | |
670 | /* Adjust search length to account for worst case alignment overhead */ | |
44bd7ace | 671 | length = info->length + info->align_mask + info->start_gap; |
db4fbfb9 ML |
672 | if (length < info->length) |
673 | return -ENOMEM; | |
674 | ||
6b008640 LT |
675 | low_limit = info->low_limit; |
676 | if (low_limit < mmap_min_addr) | |
677 | low_limit = mmap_min_addr; | |
58c5d0d6 LH |
678 | high_limit = info->high_limit; |
679 | retry: | |
d4e6b397 | 680 | if (vma_iter_area_highest(&vmi, low_limit, high_limit, length)) |
db4fbfb9 | 681 | return -ENOMEM; |
db4fbfb9 | 682 | |
d4e6b397 | 683 | gap = vma_iter_end(&vmi) - info->length; |
3499a131 | 684 | gap -= (gap - info->align_offset) & info->align_mask; |
d4e6b397 YD |
685 | gap_end = vma_iter_end(&vmi); |
686 | tmp = vma_next(&vmi); | |
0266e7c5 | 687 | if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ |
d4e6b397 | 688 | if (vm_start_gap(tmp) < gap_end) { |
58c5d0d6 | 689 | high_limit = vm_start_gap(tmp); |
d4e6b397 | 690 | vma_iter_reset(&vmi); |
58c5d0d6 LH |
691 | goto retry; |
692 | } | |
693 | } else { | |
d4e6b397 | 694 | tmp = vma_prev(&vmi); |
58c5d0d6 LH |
695 | if (tmp && vm_end_gap(tmp) > gap) { |
696 | high_limit = tmp->vm_start; | |
d4e6b397 | 697 | vma_iter_reset(&vmi); |
58c5d0d6 LH |
698 | goto retry; |
699 | } | |
700 | } | |
701 | ||
3499a131 | 702 | return gap; |
db4fbfb9 ML |
703 | } |
704 | ||
df7e1286 MB |
705 | /* |
706 | * Determine if the allocation needs to ensure that there is no | |
707 | * existing mapping within it's guard gaps, for use as start_gap. | |
708 | */ | |
709 | static inline unsigned long stack_guard_placement(vm_flags_t vm_flags) | |
710 | { | |
711 | if (vm_flags & VM_SHADOW_STACK) | |
712 | return PAGE_SIZE; | |
713 | ||
714 | return 0; | |
715 | } | |
716 | ||
baceaf1c JK |
717 | /* |
718 | * Search for an unmapped address range. | |
719 | * | |
720 | * We are looking for a range that: | |
721 | * - does not intersect with any VMA; | |
722 | * - is contained within the [low_limit, high_limit) interval; | |
723 | * - is at least the desired size. | |
724 | * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) | |
725 | */ | |
726 | unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) | |
727 | { | |
df529cab JK |
728 | unsigned long addr; |
729 | ||
baceaf1c | 730 | if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) |
df529cab | 731 | addr = unmapped_area_topdown(info); |
baceaf1c | 732 | else |
df529cab JK |
733 | addr = unmapped_area(info); |
734 | ||
735 | trace_vm_unmapped_area(addr, info); | |
736 | return addr; | |
baceaf1c | 737 | } |
f6795053 | 738 | |
1da177e4 LT |
739 | /* Get an address range which is currently unmapped. |
740 | * For shmat() with addr=0. | |
741 | * | |
742 | * Ugly calling convention alert: | |
743 | * Return value with the low bits set means error value, | |
744 | * ie | |
745 | * if (ret & ~PAGE_MASK) | |
746 | * error = ret; | |
747 | * | |
748 | * This function "knows" that -ENOMEM has the bits set. | |
749 | */ | |
1da177e4 | 750 | unsigned long |
4b439e25 CL |
751 | generic_get_unmapped_area(struct file *filp, unsigned long addr, |
752 | unsigned long len, unsigned long pgoff, | |
540e00a7 | 753 | unsigned long flags, vm_flags_t vm_flags) |
1da177e4 LT |
754 | { |
755 | struct mm_struct *mm = current->mm; | |
1be7107f | 756 | struct vm_area_struct *vma, *prev; |
b80fa3cb | 757 | struct vm_unmapped_area_info info = {}; |
2cb4de08 | 758 | const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); |
1da177e4 | 759 | |
f6795053 | 760 | if (len > mmap_end - mmap_min_addr) |
1da177e4 LT |
761 | return -ENOMEM; |
762 | ||
06abdfb4 BH |
763 | if (flags & MAP_FIXED) |
764 | return addr; | |
765 | ||
1da177e4 LT |
766 | if (addr) { |
767 | addr = PAGE_ALIGN(addr); | |
1be7107f | 768 | vma = find_vma_prev(mm, addr, &prev); |
f6795053 | 769 | if (mmap_end - len >= addr && addr >= mmap_min_addr && |
1be7107f HD |
770 | (!vma || addr + len <= vm_start_gap(vma)) && |
771 | (!prev || addr >= vm_end_gap(prev))) | |
1da177e4 LT |
772 | return addr; |
773 | } | |
1da177e4 | 774 | |
db4fbfb9 | 775 | info.length = len; |
4e99b021 | 776 | info.low_limit = mm->mmap_base; |
f6795053 | 777 | info.high_limit = mmap_end; |
df7e1286 | 778 | info.start_gap = stack_guard_placement(vm_flags); |
db4fbfb9 | 779 | return vm_unmapped_area(&info); |
1da177e4 | 780 | } |
4b439e25 CL |
781 | |
782 | #ifndef HAVE_ARCH_UNMAPPED_AREA | |
783 | unsigned long | |
784 | arch_get_unmapped_area(struct file *filp, unsigned long addr, | |
785 | unsigned long len, unsigned long pgoff, | |
25d4054c | 786 | unsigned long flags, vm_flags_t vm_flags) |
4b439e25 | 787 | { |
540e00a7 MB |
788 | return generic_get_unmapped_area(filp, addr, len, pgoff, flags, |
789 | vm_flags); | |
4b439e25 | 790 | } |
cc71aba3 | 791 | #endif |
1da177e4 | 792 | |
1da177e4 LT |
793 | /* |
794 | * This mmap-allocator allocates new areas top-down from below the | |
795 | * stack's low limit (the base): | |
796 | */ | |
1da177e4 | 797 | unsigned long |
4b439e25 CL |
798 | generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, |
799 | unsigned long len, unsigned long pgoff, | |
540e00a7 | 800 | unsigned long flags, vm_flags_t vm_flags) |
1da177e4 | 801 | { |
1be7107f | 802 | struct vm_area_struct *vma, *prev; |
1da177e4 | 803 | struct mm_struct *mm = current->mm; |
b80fa3cb | 804 | struct vm_unmapped_area_info info = {}; |
2cb4de08 | 805 | const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); |
1da177e4 LT |
806 | |
807 | /* requested length too big for entire address space */ | |
f6795053 | 808 | if (len > mmap_end - mmap_min_addr) |
1da177e4 LT |
809 | return -ENOMEM; |
810 | ||
06abdfb4 BH |
811 | if (flags & MAP_FIXED) |
812 | return addr; | |
813 | ||
1da177e4 LT |
814 | /* requesting a specific address */ |
815 | if (addr) { | |
816 | addr = PAGE_ALIGN(addr); | |
1be7107f | 817 | vma = find_vma_prev(mm, addr, &prev); |
f6795053 | 818 | if (mmap_end - len >= addr && addr >= mmap_min_addr && |
1be7107f HD |
819 | (!vma || addr + len <= vm_start_gap(vma)) && |
820 | (!prev || addr >= vm_end_gap(prev))) | |
1da177e4 LT |
821 | return addr; |
822 | } | |
823 | ||
db4fbfb9 ML |
824 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
825 | info.length = len; | |
6b008640 | 826 | info.low_limit = PAGE_SIZE; |
f6795053 | 827 | info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); |
df7e1286 | 828 | info.start_gap = stack_guard_placement(vm_flags); |
db4fbfb9 | 829 | addr = vm_unmapped_area(&info); |
b716ad95 | 830 | |
1da177e4 LT |
831 | /* |
832 | * A failed mmap() very likely causes application failure, | |
833 | * so fall back to the bottom-up function here. This scenario | |
834 | * can happen with large stack limits and large mmap() | |
835 | * allocations. | |
836 | */ | |
de1741a1 | 837 | if (offset_in_page(addr)) { |
db4fbfb9 ML |
838 | VM_BUG_ON(addr != -ENOMEM); |
839 | info.flags = 0; | |
840 | info.low_limit = TASK_UNMAPPED_BASE; | |
f6795053 | 841 | info.high_limit = mmap_end; |
db4fbfb9 ML |
842 | addr = vm_unmapped_area(&info); |
843 | } | |
1da177e4 LT |
844 | |
845 | return addr; | |
846 | } | |
4b439e25 CL |
847 | |
848 | #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | |
849 | unsigned long | |
850 | arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | |
851 | unsigned long len, unsigned long pgoff, | |
25d4054c | 852 | unsigned long flags, vm_flags_t vm_flags) |
96114870 | 853 | { |
540e00a7 MB |
854 | return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags, |
855 | vm_flags); | |
96114870 RE |
856 | } |
857 | #endif | |
858 | ||
859 | unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp, | |
860 | unsigned long addr, unsigned long len, | |
861 | unsigned long pgoff, unsigned long flags, | |
862 | vm_flags_t vm_flags) | |
863 | { | |
864 | if (test_bit(MMF_TOPDOWN, &mm->flags)) | |
25d4054c MB |
865 | return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, |
866 | flags, vm_flags); | |
867 | return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags); | |
96114870 RE |
868 | } |
869 | ||
1da177e4 | 870 | unsigned long |
8a0fe564 RE |
871 | __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, |
872 | unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) | |
1da177e4 | 873 | { |
06abdfb4 | 874 | unsigned long (*get_area)(struct file *, unsigned long, |
529ce23a RE |
875 | unsigned long, unsigned long, unsigned long) |
876 | = NULL; | |
06abdfb4 | 877 | |
9206de95 AV |
878 | unsigned long error = arch_mmap_check(addr, len, flags); |
879 | if (error) | |
880 | return error; | |
881 | ||
882 | /* Careful about overflows.. */ | |
883 | if (len > TASK_SIZE) | |
884 | return -ENOMEM; | |
885 | ||
c01d5b30 HD |
886 | if (file) { |
887 | if (file->f_op->get_unmapped_area) | |
888 | get_area = file->f_op->get_unmapped_area; | |
889 | } else if (flags & MAP_SHARED) { | |
890 | /* | |
891 | * mmap_region() will call shmem_zero_setup() to create a file, | |
892 | * so use shmem's get_unmapped_area in case it can be huge. | |
c01d5b30 | 893 | */ |
c01d5b30 HD |
894 | get_area = shmem_get_unmapped_area; |
895 | } | |
896 | ||
96204e15 RR |
897 | /* Always treat pgoff as zero for anonymous memory. */ |
898 | if (!file) | |
899 | pgoff = 0; | |
900 | ||
ed48e87c | 901 | if (get_area) { |
529ce23a | 902 | addr = get_area(file, addr, len, pgoff, flags); |
ed48e87c RE |
903 | } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { |
904 | /* Ensures that larger anonymous mappings are THP aligned. */ | |
905 | addr = thp_get_unmapped_area_vmflags(file, addr, len, | |
906 | pgoff, flags, vm_flags); | |
907 | } else { | |
8a0fe564 RE |
908 | addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len, |
909 | pgoff, flags, vm_flags); | |
ed48e87c | 910 | } |
06abdfb4 BH |
911 | if (IS_ERR_VALUE(addr)) |
912 | return addr; | |
1da177e4 | 913 | |
07ab67c8 LT |
914 | if (addr > TASK_SIZE - len) |
915 | return -ENOMEM; | |
de1741a1 | 916 | if (offset_in_page(addr)) |
07ab67c8 | 917 | return -EINVAL; |
06abdfb4 | 918 | |
9ac4ed4b AV |
919 | error = security_mmap_addr(addr); |
920 | return error ? error : addr; | |
1da177e4 LT |
921 | } |
922 | ||
529ce23a RE |
923 | unsigned long |
924 | mm_get_unmapped_area(struct mm_struct *mm, struct file *file, | |
925 | unsigned long addr, unsigned long len, | |
926 | unsigned long pgoff, unsigned long flags) | |
927 | { | |
928 | if (test_bit(MMF_TOPDOWN, &mm->flags)) | |
25d4054c MB |
929 | return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags, 0); |
930 | return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0); | |
529ce23a RE |
931 | } |
932 | EXPORT_SYMBOL(mm_get_unmapped_area); | |
1da177e4 | 933 | |
abdba2dd LH |
934 | /** |
935 | * find_vma_intersection() - Look up the first VMA which intersects the interval | |
936 | * @mm: The process address space. | |
937 | * @start_addr: The inclusive start user address. | |
938 | * @end_addr: The exclusive end user address. | |
939 | * | |
940 | * Returns: The first VMA within the provided range, %NULL otherwise. Assumes | |
941 | * start_addr < end_addr. | |
942 | */ | |
943 | struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, | |
944 | unsigned long start_addr, | |
945 | unsigned long end_addr) | |
946 | { | |
abdba2dd LH |
947 | unsigned long index = start_addr; |
948 | ||
949 | mmap_assert_locked(mm); | |
7964cf8c | 950 | return mt_find(&mm->mm_mt, &index, end_addr - 1); |
abdba2dd LH |
951 | } |
952 | EXPORT_SYMBOL(find_vma_intersection); | |
953 | ||
be8432e7 LH |
954 | /** |
955 | * find_vma() - Find the VMA for a given address, or the next VMA. | |
956 | * @mm: The mm_struct to check | |
957 | * @addr: The address | |
958 | * | |
959 | * Returns: The VMA associated with addr, or the next VMA. | |
960 | * May return %NULL in the case of no VMA at addr or above. | |
961 | */ | |
48aae425 | 962 | struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) |
1da177e4 | 963 | { |
be8432e7 | 964 | unsigned long index = addr; |
1da177e4 | 965 | |
5b78ed24 | 966 | mmap_assert_locked(mm); |
7964cf8c | 967 | return mt_find(&mm->mm_mt, &index, ULONG_MAX); |
1da177e4 | 968 | } |
1da177e4 LT |
969 | EXPORT_SYMBOL(find_vma); |
970 | ||
7fdbd37d LH |
971 | /** |
972 | * find_vma_prev() - Find the VMA for a given address, or the next vma and | |
973 | * set %pprev to the previous VMA, if any. | |
974 | * @mm: The mm_struct to check | |
975 | * @addr: The address | |
976 | * @pprev: The pointer to set to the previous VMA | |
977 | * | |
978 | * Note that RCU lock is missing here since the external mmap_lock() is used | |
979 | * instead. | |
980 | * | |
981 | * Returns: The VMA associated with @addr, or the next vma. | |
982 | * May return %NULL in the case of no vma at addr or above. | |
6bd4837d | 983 | */ |
1da177e4 LT |
984 | struct vm_area_struct * |
985 | find_vma_prev(struct mm_struct *mm, unsigned long addr, | |
986 | struct vm_area_struct **pprev) | |
987 | { | |
6bd4837d | 988 | struct vm_area_struct *vma; |
d4e6b397 | 989 | VMA_ITERATOR(vmi, mm, addr); |
1da177e4 | 990 | |
d4e6b397 YD |
991 | vma = vma_iter_load(&vmi); |
992 | *pprev = vma_prev(&vmi); | |
7fdbd37d | 993 | if (!vma) |
d4e6b397 | 994 | vma = vma_next(&vmi); |
6bd4837d | 995 | return vma; |
1da177e4 LT |
996 | } |
997 | ||
998 | /* | |
999 | * Verify that the stack growth is acceptable and | |
1000 | * update accounting. This is shared with both the | |
1001 | * grow-up and grow-down cases. | |
1002 | */ | |
1be7107f HD |
1003 | static int acct_stack_growth(struct vm_area_struct *vma, |
1004 | unsigned long size, unsigned long grow) | |
1da177e4 LT |
1005 | { |
1006 | struct mm_struct *mm = vma->vm_mm; | |
1be7107f | 1007 | unsigned long new_start; |
1da177e4 LT |
1008 | |
1009 | /* address space limit tests */ | |
84638335 | 1010 | if (!may_expand_vm(mm, vma->vm_flags, grow)) |
1da177e4 LT |
1011 | return -ENOMEM; |
1012 | ||
1013 | /* Stack limit test */ | |
24c79d8e | 1014 | if (size > rlimit(RLIMIT_STACK)) |
1da177e4 LT |
1015 | return -ENOMEM; |
1016 | ||
1017 | /* mlock limit tests */ | |
b0cc5e89 | 1018 | if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) |
c5d8a364 | 1019 | return -ENOMEM; |
1da177e4 | 1020 | |
0d59a01b AL |
1021 | /* Check to ensure the stack will not grow into a hugetlb-only region */ |
1022 | new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : | |
1023 | vma->vm_end - size; | |
1024 | if (is_hugepage_only_range(vma->vm_mm, new_start, size)) | |
1025 | return -EFAULT; | |
1026 | ||
1da177e4 LT |
1027 | /* |
1028 | * Overcommit.. This must be the final test, as it will | |
1029 | * update security statistics. | |
1030 | */ | |
05fa199d | 1031 | if (security_vm_enough_memory_mm(mm, grow)) |
1da177e4 LT |
1032 | return -ENOMEM; |
1033 | ||
1da177e4 LT |
1034 | return 0; |
1035 | } | |
1036 | ||
cf8e8658 | 1037 | #if defined(CONFIG_STACK_GROWSUP) |
1da177e4 | 1038 | /* |
cf8e8658 | 1039 | * PA-RISC uses this for its stack. |
46dea3d0 | 1040 | * vma is the last one with address > vma->vm_end. Have to extend vma. |
1da177e4 | 1041 | */ |
8d7071af | 1042 | static int expand_upwards(struct vm_area_struct *vma, unsigned long address) |
1da177e4 | 1043 | { |
09357814 | 1044 | struct mm_struct *mm = vma->vm_mm; |
1be7107f HD |
1045 | struct vm_area_struct *next; |
1046 | unsigned long gap_addr; | |
12352d3c | 1047 | int error = 0; |
d4e6b397 | 1048 | VMA_ITERATOR(vmi, mm, vma->vm_start); |
1da177e4 LT |
1049 | |
1050 | if (!(vma->vm_flags & VM_GROWSUP)) | |
1051 | return -EFAULT; | |
1052 | ||
bd726c90 | 1053 | /* Guard against exceeding limits of the address space. */ |
1be7107f | 1054 | address &= PAGE_MASK; |
37511fb5 | 1055 | if (address >= (TASK_SIZE & PAGE_MASK)) |
12352d3c | 1056 | return -ENOMEM; |
bd726c90 | 1057 | address += PAGE_SIZE; |
12352d3c | 1058 | |
1be7107f HD |
1059 | /* Enforce stack_guard_gap */ |
1060 | gap_addr = address + stack_guard_gap; | |
bd726c90 HD |
1061 | |
1062 | /* Guard against overflow */ | |
1063 | if (gap_addr < address || gap_addr > TASK_SIZE) | |
1064 | gap_addr = TASK_SIZE; | |
1065 | ||
763ecb03 LH |
1066 | next = find_vma_intersection(mm, vma->vm_end, gap_addr); |
1067 | if (next && vma_is_accessible(next)) { | |
1be7107f HD |
1068 | if (!(next->vm_flags & VM_GROWSUP)) |
1069 | return -ENOMEM; | |
1070 | /* Check that both stack segments have the same anon_vma? */ | |
1071 | } | |
1072 | ||
b5df0922 | 1073 | if (next) |
d4e6b397 | 1074 | vma_iter_prev_range_limit(&vmi, address); |
b5df0922 | 1075 | |
d4e6b397 YD |
1076 | vma_iter_config(&vmi, vma->vm_start, address); |
1077 | if (vma_iter_prealloc(&vmi, vma)) | |
d4af56c5 LH |
1078 | return -ENOMEM; |
1079 | ||
12352d3c | 1080 | /* We must make sure the anon_vma is allocated. */ |
d4af56c5 | 1081 | if (unlikely(anon_vma_prepare(vma))) { |
d4e6b397 | 1082 | vma_iter_free(&vmi); |
1da177e4 | 1083 | return -ENOMEM; |
d4af56c5 | 1084 | } |
1da177e4 | 1085 | |
c137381f SB |
1086 | /* Lock the VMA before expanding to prevent concurrent page faults */ |
1087 | vma_start_write(vma); | |
1da177e4 LT |
1088 | /* |
1089 | * vma->vm_start/vm_end cannot change under us because the caller | |
c1e8d7c6 | 1090 | * is required to hold the mmap_lock in read mode. We need the |
1da177e4 LT |
1091 | * anon_vma lock to serialize against concurrent expand_stacks. |
1092 | */ | |
12352d3c | 1093 | anon_vma_lock_write(vma->anon_vma); |
1da177e4 LT |
1094 | |
1095 | /* Somebody else might have raced and expanded it already */ | |
1096 | if (address > vma->vm_end) { | |
1097 | unsigned long size, grow; | |
1098 | ||
1099 | size = address - vma->vm_start; | |
1100 | grow = (address - vma->vm_end) >> PAGE_SHIFT; | |
1101 | ||
42c36f63 HD |
1102 | error = -ENOMEM; |
1103 | if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { | |
1104 | error = acct_stack_growth(vma, size, grow); | |
1105 | if (!error) { | |
4128997b | 1106 | /* |
524e00b3 LH |
1107 | * We only hold a shared mmap_lock lock here, so |
1108 | * we need to protect against concurrent vma | |
1109 | * expansions. anon_vma_lock_write() doesn't | |
1110 | * help here, as we don't guarantee that all | |
1111 | * growable vmas in a mm share the same root | |
1112 | * anon vma. So, we reuse mm->page_table_lock | |
1113 | * to guard against concurrent vma expansions. | |
4128997b | 1114 | */ |
09357814 | 1115 | spin_lock(&mm->page_table_lock); |
87e8827b | 1116 | if (vma->vm_flags & VM_LOCKED) |
09357814 | 1117 | mm->locked_vm += grow; |
84638335 | 1118 | vm_stat_account(mm, vma->vm_flags, grow); |
bf181b9f | 1119 | anon_vma_interval_tree_pre_update_vma(vma); |
42c36f63 | 1120 | vma->vm_end = address; |
d4af56c5 | 1121 | /* Overwrite old entry in mtree. */ |
d4e6b397 | 1122 | vma_iter_store(&vmi, vma); |
bf181b9f | 1123 | anon_vma_interval_tree_post_update_vma(vma); |
09357814 | 1124 | spin_unlock(&mm->page_table_lock); |
4128997b | 1125 | |
42c36f63 HD |
1126 | perf_event_mmap(vma); |
1127 | } | |
3af9e859 | 1128 | } |
1da177e4 | 1129 | } |
12352d3c | 1130 | anon_vma_unlock_write(vma->anon_vma); |
d4e6b397 | 1131 | vma_iter_free(&vmi); |
2574d5e4 | 1132 | validate_mm(mm); |
1da177e4 LT |
1133 | return error; |
1134 | } | |
cf8e8658 | 1135 | #endif /* CONFIG_STACK_GROWSUP */ |
46dea3d0 | 1136 | |
1da177e4 LT |
1137 | /* |
1138 | * vma is the first one with address < vma->vm_start. Have to extend vma. | |
8d7071af | 1139 | * mmap_lock held for writing. |
1da177e4 | 1140 | */ |
524e00b3 | 1141 | int expand_downwards(struct vm_area_struct *vma, unsigned long address) |
1da177e4 | 1142 | { |
09357814 | 1143 | struct mm_struct *mm = vma->vm_mm; |
1be7107f | 1144 | struct vm_area_struct *prev; |
0a1d5299 | 1145 | int error = 0; |
d4e6b397 | 1146 | VMA_ITERATOR(vmi, mm, vma->vm_start); |
1da177e4 | 1147 | |
8d7071af LT |
1148 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
1149 | return -EFAULT; | |
1150 | ||
8869477a | 1151 | address &= PAGE_MASK; |
8b35ca3e | 1152 | if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) |
0a1d5299 | 1153 | return -EPERM; |
8869477a | 1154 | |
1be7107f | 1155 | /* Enforce stack_guard_gap */ |
d4e6b397 | 1156 | prev = vma_prev(&vmi); |
32e4e6d5 | 1157 | /* Check that both stack segments have the same anon_vma? */ |
f440fa1a LH |
1158 | if (prev) { |
1159 | if (!(prev->vm_flags & VM_GROWSDOWN) && | |
1160 | vma_is_accessible(prev) && | |
1161 | (address - prev->vm_end < stack_guard_gap)) | |
1be7107f | 1162 | return -ENOMEM; |
1be7107f HD |
1163 | } |
1164 | ||
b5df0922 | 1165 | if (prev) |
d4e6b397 | 1166 | vma_iter_next_range_limit(&vmi, vma->vm_start); |
b5df0922 | 1167 | |
d4e6b397 YD |
1168 | vma_iter_config(&vmi, address, vma->vm_end); |
1169 | if (vma_iter_prealloc(&vmi, vma)) | |
d4af56c5 LH |
1170 | return -ENOMEM; |
1171 | ||
12352d3c | 1172 | /* We must make sure the anon_vma is allocated. */ |
d4af56c5 | 1173 | if (unlikely(anon_vma_prepare(vma))) { |
d4e6b397 | 1174 | vma_iter_free(&vmi); |
12352d3c | 1175 | return -ENOMEM; |
d4af56c5 | 1176 | } |
1da177e4 | 1177 | |
c137381f SB |
1178 | /* Lock the VMA before expanding to prevent concurrent page faults */ |
1179 | vma_start_write(vma); | |
1da177e4 LT |
1180 | /* |
1181 | * vma->vm_start/vm_end cannot change under us because the caller | |
c1e8d7c6 | 1182 | * is required to hold the mmap_lock in read mode. We need the |
1da177e4 LT |
1183 | * anon_vma lock to serialize against concurrent expand_stacks. |
1184 | */ | |
12352d3c | 1185 | anon_vma_lock_write(vma->anon_vma); |
1da177e4 LT |
1186 | |
1187 | /* Somebody else might have raced and expanded it already */ | |
1188 | if (address < vma->vm_start) { | |
1189 | unsigned long size, grow; | |
1190 | ||
1191 | size = vma->vm_end - address; | |
1192 | grow = (vma->vm_start - address) >> PAGE_SHIFT; | |
1193 | ||
a626ca6a LT |
1194 | error = -ENOMEM; |
1195 | if (grow <= vma->vm_pgoff) { | |
1196 | error = acct_stack_growth(vma, size, grow); | |
1197 | if (!error) { | |
4128997b | 1198 | /* |
524e00b3 LH |
1199 | * We only hold a shared mmap_lock lock here, so |
1200 | * we need to protect against concurrent vma | |
1201 | * expansions. anon_vma_lock_write() doesn't | |
1202 | * help here, as we don't guarantee that all | |
1203 | * growable vmas in a mm share the same root | |
1204 | * anon vma. So, we reuse mm->page_table_lock | |
1205 | * to guard against concurrent vma expansions. | |
4128997b | 1206 | */ |
09357814 | 1207 | spin_lock(&mm->page_table_lock); |
87e8827b | 1208 | if (vma->vm_flags & VM_LOCKED) |
09357814 | 1209 | mm->locked_vm += grow; |
84638335 | 1210 | vm_stat_account(mm, vma->vm_flags, grow); |
bf181b9f | 1211 | anon_vma_interval_tree_pre_update_vma(vma); |
a626ca6a LT |
1212 | vma->vm_start = address; |
1213 | vma->vm_pgoff -= grow; | |
d4af56c5 | 1214 | /* Overwrite old entry in mtree. */ |
d4e6b397 | 1215 | vma_iter_store(&vmi, vma); |
bf181b9f | 1216 | anon_vma_interval_tree_post_update_vma(vma); |
09357814 | 1217 | spin_unlock(&mm->page_table_lock); |
4128997b | 1218 | |
a626ca6a LT |
1219 | perf_event_mmap(vma); |
1220 | } | |
1da177e4 LT |
1221 | } |
1222 | } | |
12352d3c | 1223 | anon_vma_unlock_write(vma->anon_vma); |
d4e6b397 | 1224 | vma_iter_free(&vmi); |
2574d5e4 | 1225 | validate_mm(mm); |
1da177e4 LT |
1226 | return error; |
1227 | } | |
1228 | ||
1be7107f HD |
1229 | /* enforced gap between the expanding stack and other mappings. */ |
1230 | unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; | |
1231 | ||
1232 | static int __init cmdline_parse_stack_guard_gap(char *p) | |
1233 | { | |
1234 | unsigned long val; | |
1235 | char *endptr; | |
1236 | ||
1237 | val = simple_strtoul(p, &endptr, 10); | |
1238 | if (!*endptr) | |
1239 | stack_guard_gap = val << PAGE_SHIFT; | |
1240 | ||
e6d09493 | 1241 | return 1; |
1be7107f HD |
1242 | } |
1243 | __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); | |
1244 | ||
b6a2fea3 | 1245 | #ifdef CONFIG_STACK_GROWSUP |
8d7071af | 1246 | int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) |
b6a2fea3 OW |
1247 | { |
1248 | return expand_upwards(vma, address); | |
1249 | } | |
1250 | ||
8d7071af | 1251 | struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) |
b6a2fea3 OW |
1252 | { |
1253 | struct vm_area_struct *vma, *prev; | |
1254 | ||
1255 | addr &= PAGE_MASK; | |
1256 | vma = find_vma_prev(mm, addr, &prev); | |
1257 | if (vma && (vma->vm_start <= addr)) | |
1258 | return vma; | |
f440fa1a LH |
1259 | if (!prev) |
1260 | return NULL; | |
8d7071af | 1261 | if (expand_stack_locked(prev, addr)) |
b6a2fea3 | 1262 | return NULL; |
cea10a19 | 1263 | if (prev->vm_flags & VM_LOCKED) |
fc05f566 | 1264 | populate_vma_page_range(prev, addr, prev->vm_end, NULL); |
b6a2fea3 OW |
1265 | return prev; |
1266 | } | |
1267 | #else | |
8d7071af | 1268 | int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) |
b6a2fea3 OW |
1269 | { |
1270 | return expand_downwards(vma, address); | |
1271 | } | |
1272 | ||
8d7071af | 1273 | struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) |
1da177e4 | 1274 | { |
cc71aba3 | 1275 | struct vm_area_struct *vma; |
1da177e4 LT |
1276 | unsigned long start; |
1277 | ||
1278 | addr &= PAGE_MASK; | |
cc71aba3 | 1279 | vma = find_vma(mm, addr); |
1da177e4 LT |
1280 | if (!vma) |
1281 | return NULL; | |
1282 | if (vma->vm_start <= addr) | |
1283 | return vma; | |
1da177e4 | 1284 | start = vma->vm_start; |
8d7071af | 1285 | if (expand_stack_locked(vma, addr)) |
1da177e4 | 1286 | return NULL; |
cea10a19 | 1287 | if (vma->vm_flags & VM_LOCKED) |
fc05f566 | 1288 | populate_vma_page_range(vma, addr, start, NULL); |
1da177e4 LT |
1289 | return vma; |
1290 | } | |
1291 | #endif | |
1292 | ||
69e583ea | 1293 | #if defined(CONFIG_STACK_GROWSUP) |
8d7071af | 1294 | |
49b1b8d6 LS |
1295 | #define vma_expand_up(vma,addr) expand_upwards(vma, addr) |
1296 | #define vma_expand_down(vma, addr) (-EFAULT) | |
6935e052 | 1297 | |
49b1b8d6 | 1298 | #else |
1da177e4 | 1299 | |
49b1b8d6 LS |
1300 | #define vma_expand_up(vma,addr) (-EFAULT) |
1301 | #define vma_expand_down(vma, addr) expand_downwards(vma, addr) | |
d4af56c5 | 1302 | |
49b1b8d6 | 1303 | #endif |
1da177e4 | 1304 | |
11f9a21a | 1305 | /* |
49b1b8d6 LS |
1306 | * expand_stack(): legacy interface for page faulting. Don't use unless |
1307 | * you have to. | |
11f9a21a | 1308 | * |
49b1b8d6 LS |
1309 | * This is called with the mm locked for reading, drops the lock, takes |
1310 | * the lock for writing, tries to look up a vma again, expands it if | |
1311 | * necessary, and downgrades the lock to reading again. | |
11f9a21a | 1312 | * |
49b1b8d6 LS |
1313 | * If no vma is found or it can't be expanded, it returns NULL and has |
1314 | * dropped the lock. | |
11f9a21a | 1315 | */ |
49b1b8d6 | 1316 | struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) |
11f9a21a | 1317 | { |
49b1b8d6 | 1318 | struct vm_area_struct *vma, *prev; |
11f9a21a | 1319 | |
49b1b8d6 LS |
1320 | mmap_read_unlock(mm); |
1321 | if (mmap_write_lock_killable(mm)) | |
1322 | return NULL; | |
11f9a21a | 1323 | |
49b1b8d6 LS |
1324 | vma = find_vma_prev(mm, addr, &prev); |
1325 | if (vma && vma->vm_start <= addr) | |
1326 | goto success; | |
11f9a21a | 1327 | |
49b1b8d6 LS |
1328 | if (prev && !vma_expand_up(prev, addr)) { |
1329 | vma = prev; | |
1330 | goto success; | |
1331 | } | |
8be7258a | 1332 | |
49b1b8d6 LS |
1333 | if (vma && !vma_expand_down(vma, addr)) |
1334 | goto success; | |
11f9a21a | 1335 | |
49b1b8d6 LS |
1336 | mmap_write_unlock(mm); |
1337 | return NULL; | |
11f9a21a | 1338 | |
49b1b8d6 LS |
1339 | success: |
1340 | mmap_write_downgrade(mm); | |
1341 | return vma; | |
11f9a21a LH |
1342 | } |
1343 | ||
1344 | /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. | |
1345 | * @mm: The mm_struct | |
1346 | * @start: The start address to munmap | |
1347 | * @len: The length to be munmapped. | |
1348 | * @uf: The userfaultfd list_head | |
408579cd LH |
1349 | * |
1350 | * Return: 0 on success, error otherwise. | |
11f9a21a | 1351 | */ |
dd2283f2 YS |
1352 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, |
1353 | struct list_head *uf) | |
1354 | { | |
183654ce | 1355 | VMA_ITERATOR(vmi, mm, start); |
11f9a21a | 1356 | |
183654ce | 1357 | return do_vmi_munmap(&vmi, mm, start, len, uf, false); |
dd2283f2 YS |
1358 | } |
1359 | ||
e99668a5 LH |
1360 | unsigned long mmap_region(struct file *file, unsigned long addr, |
1361 | unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, | |
1362 | struct list_head *uf) | |
1363 | { | |
1364 | struct mm_struct *mm = current->mm; | |
1365 | struct vm_area_struct *vma = NULL; | |
5972d97c | 1366 | pgoff_t pglen = PHYS_PFN(len); |
cacded5e | 1367 | struct vm_area_struct *merge; |
e99668a5 | 1368 | unsigned long charged = 0; |
9014b230 LH |
1369 | struct vma_munmap_struct vms; |
1370 | struct ma_state mas_detach; | |
1371 | struct maple_tree mt_detach; | |
e99668a5 | 1372 | unsigned long end = addr + len; |
15897894 | 1373 | bool writable_file_mapping = false; |
f8d112a4 | 1374 | int error = -ENOMEM; |
183654ce | 1375 | VMA_ITERATOR(vmi, mm, addr); |
2f1c6611 | 1376 | VMG_STATE(vmg, mm, &vmi, addr, end, vm_flags, pgoff); |
e99668a5 | 1377 | |
2f1c6611 | 1378 | vmg.file = file; |
c7c0c3c3 LH |
1379 | /* Find the first overlapping VMA */ |
1380 | vma = vma_find(&vmi, end); | |
58e60f82 | 1381 | init_vma_munmap(&vms, &vmi, vma, addr, end, uf, /* unlock = */ false); |
c7c0c3c3 | 1382 | if (vma) { |
9014b230 LH |
1383 | mt_init_flags(&mt_detach, vmi.mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); |
1384 | mt_on_stack(mt_detach); | |
1385 | mas_init(&mas_detach, &mt_detach, /* addr = */ 0); | |
9014b230 LH |
1386 | /* Prepare to unmap any existing mapping in the area */ |
1387 | error = vms_gather_munmap_vmas(&vms, &mas_detach); | |
c7c0c3c3 | 1388 | if (error) |
d744f4ac | 1389 | goto gather_failed; |
e99668a5 | 1390 | |
cacded5e LS |
1391 | vmg.next = vms.next; |
1392 | vmg.prev = vms.prev; | |
c7c0c3c3 | 1393 | vma = NULL; |
9014b230 | 1394 | } else { |
cacded5e | 1395 | vmg.next = vma_iter_next_rewind(&vmi, &vmg.prev); |
e99668a5 LH |
1396 | } |
1397 | ||
224c1c70 LH |
1398 | /* Check against address space limit. */ |
1399 | if (!may_expand_vm(mm, vm_flags, pglen - vms.nr_pages)) | |
1400 | goto abort_munmap; | |
e99668a5 LH |
1401 | |
1402 | /* | |
1403 | * Private writable mapping: check memory availability | |
1404 | */ | |
1405 | if (accountable_mapping(file, vm_flags)) { | |
5972d97c | 1406 | charged = pglen; |
13d77e01 LH |
1407 | charged -= vms.nr_accounted; |
1408 | if (charged && security_vm_enough_memory_mm(mm, charged)) | |
f8d112a4 | 1409 | goto abort_munmap; |
e99668a5 | 1410 | |
f8d112a4 | 1411 | vms.nr_accounted = 0; |
e99668a5 | 1412 | vm_flags |= VM_ACCOUNT; |
2f1c6611 | 1413 | vmg.flags = vm_flags; |
e99668a5 LH |
1414 | } |
1415 | ||
cacded5e LS |
1416 | vma = vma_merge_new_range(&vmg); |
1417 | if (vma) | |
e99668a5 | 1418 | goto expanded; |
e99668a5 LH |
1419 | /* |
1420 | * Determine the object being mapped and call the appropriate | |
1421 | * specific mapper. the address has already been validated, but | |
1422 | * not unmapped, but the maps are removed from the list. | |
1423 | */ | |
1424 | vma = vm_area_alloc(mm); | |
f8d112a4 | 1425 | if (!vma) |
e99668a5 | 1426 | goto unacct_error; |
e99668a5 | 1427 | |
53bee98d | 1428 | vma_iter_config(&vmi, addr, end); |
412c6ef9 | 1429 | vma_set_range(vma, addr, end, pgoff); |
1c71222e | 1430 | vm_flags_init(vma, vm_flags); |
e99668a5 | 1431 | vma->vm_page_prot = vm_get_page_prot(vm_flags); |
e99668a5 LH |
1432 | |
1433 | if (file) { | |
e99668a5 | 1434 | vma->vm_file = get_file(file); |
f8d112a4 LH |
1435 | /* |
1436 | * call_mmap() may map PTE, so ensure there are no existing PTEs | |
63fc66f5 | 1437 | * and call the vm_ops close function if one exists. |
f8d112a4 | 1438 | */ |
63fc66f5 | 1439 | vms_clean_up_area(&vms, &mas_detach); |
e99668a5 LH |
1440 | error = call_mmap(file, vma); |
1441 | if (error) | |
1442 | goto unmap_and_free_vma; | |
1443 | ||
15897894 LS |
1444 | if (vma_is_shared_maywrite(vma)) { |
1445 | error = mapping_map_writable(file->f_mapping); | |
1446 | if (error) | |
1447 | goto close_and_free_vma; | |
1448 | ||
1449 | writable_file_mapping = true; | |
1450 | } | |
1451 | ||
a57b7051 LH |
1452 | /* |
1453 | * Expansion is handled above, merging is handled below. | |
1454 | * Drivers should not alter the address of the VMA. | |
e99668a5 | 1455 | */ |
cc8d1b09 LH |
1456 | error = -EINVAL; |
1457 | if (WARN_ON((addr != vma->vm_start))) | |
a57b7051 | 1458 | goto close_and_free_vma; |
e99668a5 | 1459 | |
53bee98d | 1460 | vma_iter_config(&vmi, addr, end); |
e99668a5 LH |
1461 | /* |
1462 | * If vm_flags changed after call_mmap(), we should try merge | |
1463 | * vma again as we may succeed this time. | |
1464 | */ | |
cacded5e LS |
1465 | if (unlikely(vm_flags != vma->vm_flags && vmg.prev)) { |
1466 | vmg.flags = vma->vm_flags; | |
1467 | /* If this fails, state is reset ready for a reattempt. */ | |
1468 | merge = vma_merge_new_range(&vmg); | |
1469 | ||
e99668a5 LH |
1470 | if (merge) { |
1471 | /* | |
1472 | * ->mmap() can change vma->vm_file and fput | |
1473 | * the original file. So fput the vma->vm_file | |
1474 | * here or we would add an extra fput for file | |
1475 | * and cause general protection fault | |
1476 | * ultimately. | |
1477 | */ | |
1478 | fput(vma->vm_file); | |
1479 | vm_area_free(vma); | |
1480 | vma = merge; | |
1481 | /* Update vm_flags to pick up the change. */ | |
e99668a5 LH |
1482 | vm_flags = vma->vm_flags; |
1483 | goto unmap_writable; | |
1484 | } | |
cacded5e | 1485 | vma_iter_config(&vmi, addr, end); |
e99668a5 LH |
1486 | } |
1487 | ||
1488 | vm_flags = vma->vm_flags; | |
1489 | } else if (vm_flags & VM_SHARED) { | |
1490 | error = shmem_zero_setup(vma); | |
1491 | if (error) | |
1492 | goto free_vma; | |
1493 | } else { | |
1494 | vma_set_anonymous(vma); | |
1495 | } | |
1496 | ||
b507808e JG |
1497 | if (map_deny_write_exec(vma, vma->vm_flags)) { |
1498 | error = -EACCES; | |
6bbf1090 | 1499 | goto close_and_free_vma; |
b507808e JG |
1500 | } |
1501 | ||
e99668a5 | 1502 | /* Allow architectures to sanity-check the vm_flags */ |
cc8d1b09 LH |
1503 | error = -EINVAL; |
1504 | if (!arch_validate_flags(vma->vm_flags)) | |
1505 | goto close_and_free_vma; | |
e99668a5 | 1506 | |
cc8d1b09 | 1507 | error = -ENOMEM; |
b5df0922 | 1508 | if (vma_iter_prealloc(&vmi, vma)) |
cc8d1b09 | 1509 | goto close_and_free_vma; |
e99668a5 | 1510 | |
1c7873e3 HD |
1511 | /* Lock the VMA since it is modified after insertion into VMA tree */ |
1512 | vma_start_write(vma); | |
183654ce | 1513 | vma_iter_store(&vmi, vma); |
e99668a5 | 1514 | mm->map_count++; |
30afc8c3 | 1515 | vma_link_file(vma); |
e99668a5 LH |
1516 | |
1517 | /* | |
cacded5e | 1518 | * vma_merge_new_range() calls khugepaged_enter_vma() too, the below |
e99668a5 LH |
1519 | * call covers the non-merge case. |
1520 | */ | |
1521 | khugepaged_enter_vma(vma, vma->vm_flags); | |
1522 | ||
1523 | /* Once vma denies write, undo our temporary denial count */ | |
1524 | unmap_writable: | |
15897894 | 1525 | if (writable_file_mapping) |
e99668a5 LH |
1526 | mapping_unmap_writable(file->f_mapping); |
1527 | file = vma->vm_file; | |
d7597f59 | 1528 | ksm_add_vma(vma); |
e99668a5 LH |
1529 | expanded: |
1530 | perf_event_mmap(vma); | |
1531 | ||
f8d112a4 LH |
1532 | /* Unmap any existing mapping in the area */ |
1533 | vms_complete_munmap_vmas(&vms, &mas_detach); | |
1534 | ||
5972d97c | 1535 | vm_stat_account(mm, vm_flags, pglen); |
e99668a5 LH |
1536 | if (vm_flags & VM_LOCKED) { |
1537 | if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || | |
1538 | is_vm_hugetlb_page(vma) || | |
1539 | vma == get_gate_vma(current->mm)) | |
e430a95a | 1540 | vm_flags_clear(vma, VM_LOCKED_MASK); |
e99668a5 | 1541 | else |
5972d97c | 1542 | mm->locked_vm += pglen; |
e99668a5 LH |
1543 | } |
1544 | ||
1545 | if (file) | |
1546 | uprobe_mmap(vma); | |
1547 | ||
1548 | /* | |
1549 | * New (or expanded) vma always get soft dirty status. | |
1550 | * Otherwise user-space soft-dirty page tracker won't | |
1551 | * be able to distinguish situation when vma area unmapped, | |
1552 | * then new mapped in-place (which must be aimed as | |
1553 | * a completely new data area). | |
1554 | */ | |
1c71222e | 1555 | vm_flags_set(vma, VM_SOFTDIRTY); |
e99668a5 LH |
1556 | |
1557 | vma_set_page_prot(vma); | |
1558 | ||
1559 | validate_mm(mm); | |
1560 | return addr; | |
1561 | ||
deb0f656 | 1562 | close_and_free_vma: |
f8d112a4 | 1563 | if (file && !vms.closed_vm_ops && vma->vm_ops && vma->vm_ops->close) |
deb0f656 | 1564 | vma->vm_ops->close(vma); |
cc8d1b09 LH |
1565 | |
1566 | if (file || vma->vm_file) { | |
e99668a5 | 1567 | unmap_and_free_vma: |
cc8d1b09 LH |
1568 | fput(vma->vm_file); |
1569 | vma->vm_file = NULL; | |
e99668a5 | 1570 | |
fd892593 | 1571 | vma_iter_set(&vmi, vma->vm_end); |
cc8d1b09 | 1572 | /* Undo any partial mapping done by a device driver. */ |
cacded5e | 1573 | unmap_region(&vmi.mas, vma, vmg.prev, vmg.next); |
cc8d1b09 | 1574 | } |
15897894 | 1575 | if (writable_file_mapping) |
e99668a5 LH |
1576 | mapping_unmap_writable(file->f_mapping); |
1577 | free_vma: | |
1578 | vm_area_free(vma); | |
1579 | unacct_error: | |
1580 | if (charged) | |
1581 | vm_unacct_memory(charged); | |
d744f4ac | 1582 | |
f8d112a4 | 1583 | abort_munmap: |
4f87153e | 1584 | vms_abort_munmap_vmas(&vms, &mas_detach); |
d744f4ac | 1585 | gather_failed: |
e99668a5 LH |
1586 | validate_mm(mm); |
1587 | return error; | |
1588 | } | |
1589 | ||
408579cd | 1590 | static int __vm_munmap(unsigned long start, size_t len, bool unlock) |
1da177e4 LT |
1591 | { |
1592 | int ret; | |
bfce281c | 1593 | struct mm_struct *mm = current->mm; |
897ab3e0 | 1594 | LIST_HEAD(uf); |
183654ce | 1595 | VMA_ITERATOR(vmi, mm, start); |
1da177e4 | 1596 | |
d8ed45c5 | 1597 | if (mmap_write_lock_killable(mm)) |
ae798783 MH |
1598 | return -EINTR; |
1599 | ||
408579cd LH |
1600 | ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock); |
1601 | if (ret || !unlock) | |
d8ed45c5 | 1602 | mmap_write_unlock(mm); |
dd2283f2 | 1603 | |
897ab3e0 | 1604 | userfaultfd_unmap_complete(mm, &uf); |
1da177e4 LT |
1605 | return ret; |
1606 | } | |
dd2283f2 YS |
1607 | |
1608 | int vm_munmap(unsigned long start, size_t len) | |
1609 | { | |
1610 | return __vm_munmap(start, len, false); | |
1611 | } | |
a46ef99d LT |
1612 | EXPORT_SYMBOL(vm_munmap); |
1613 | ||
1614 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) | |
1615 | { | |
ce18d171 | 1616 | addr = untagged_addr(addr); |
dd2283f2 | 1617 | return __vm_munmap(addr, len, true); |
a46ef99d | 1618 | } |
1da177e4 | 1619 | |
c8d78c18 KS |
1620 | |
1621 | /* | |
1622 | * Emulation of deprecated remap_file_pages() syscall. | |
1623 | */ | |
1624 | SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, | |
1625 | unsigned long, prot, unsigned long, pgoff, unsigned long, flags) | |
1626 | { | |
1627 | ||
1628 | struct mm_struct *mm = current->mm; | |
1629 | struct vm_area_struct *vma; | |
1630 | unsigned long populate = 0; | |
1631 | unsigned long ret = -EINVAL; | |
1632 | struct file *file; | |
1633 | ||
ee65728e | 1634 | pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n", |
756a025f | 1635 | current->comm, current->pid); |
c8d78c18 KS |
1636 | |
1637 | if (prot) | |
1638 | return ret; | |
1639 | start = start & PAGE_MASK; | |
1640 | size = size & PAGE_MASK; | |
1641 | ||
1642 | if (start + size <= start) | |
1643 | return ret; | |
1644 | ||
1645 | /* Does pgoff wrap? */ | |
1646 | if (pgoff + (size >> PAGE_SHIFT) < pgoff) | |
1647 | return ret; | |
1648 | ||
d8ed45c5 | 1649 | if (mmap_write_lock_killable(mm)) |
dc0ef0df MH |
1650 | return -EINTR; |
1651 | ||
9b593cb2 | 1652 | vma = vma_lookup(mm, start); |
c8d78c18 KS |
1653 | |
1654 | if (!vma || !(vma->vm_flags & VM_SHARED)) | |
1655 | goto out; | |
1656 | ||
48f7df32 | 1657 | if (start + size > vma->vm_end) { |
763ecb03 LH |
1658 | VMA_ITERATOR(vmi, mm, vma->vm_end); |
1659 | struct vm_area_struct *next, *prev = vma; | |
48f7df32 | 1660 | |
763ecb03 | 1661 | for_each_vma_range(vmi, next, start + size) { |
48f7df32 | 1662 | /* hole between vmas ? */ |
763ecb03 | 1663 | if (next->vm_start != prev->vm_end) |
48f7df32 KS |
1664 | goto out; |
1665 | ||
1666 | if (next->vm_file != vma->vm_file) | |
1667 | goto out; | |
1668 | ||
1669 | if (next->vm_flags != vma->vm_flags) | |
1670 | goto out; | |
1671 | ||
1db43d3f LH |
1672 | if (start + size <= next->vm_end) |
1673 | break; | |
1674 | ||
763ecb03 | 1675 | prev = next; |
48f7df32 KS |
1676 | } |
1677 | ||
1678 | if (!next) | |
1679 | goto out; | |
c8d78c18 KS |
1680 | } |
1681 | ||
1682 | prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; | |
1683 | prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; | |
1684 | prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; | |
1685 | ||
1686 | flags &= MAP_NONBLOCK; | |
1687 | flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; | |
fce000b1 | 1688 | if (vma->vm_flags & VM_LOCKED) |
c8d78c18 | 1689 | flags |= MAP_LOCKED; |
48f7df32 | 1690 | |
c8d78c18 | 1691 | file = get_file(vma->vm_file); |
ea7e2d5e SH |
1692 | ret = security_mmap_file(vma->vm_file, prot, flags); |
1693 | if (ret) | |
1694 | goto out_fput; | |
45e55300 | 1695 | ret = do_mmap(vma->vm_file, start, size, |
592b5fad | 1696 | prot, flags, 0, pgoff, &populate, NULL); |
ea7e2d5e | 1697 | out_fput: |
c8d78c18 KS |
1698 | fput(file); |
1699 | out: | |
d8ed45c5 | 1700 | mmap_write_unlock(mm); |
c8d78c18 KS |
1701 | if (populate) |
1702 | mm_populate(ret, populate); | |
1703 | if (!IS_ERR_VALUE(ret)) | |
1704 | ret = 0; | |
1705 | return ret; | |
1706 | } | |
1707 | ||
2e7ce7d3 LH |
1708 | /* |
1709 | * do_brk_flags() - Increase the brk vma if the flags match. | |
92fed820 | 1710 | * @vmi: The vma iterator |
2e7ce7d3 LH |
1711 | * @addr: The start address |
1712 | * @len: The length of the increase | |
1713 | * @vma: The vma, | |
1714 | * @flags: The VMA Flags | |
1715 | * | |
1716 | * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags | |
1717 | * do not match then create a new anonymous VMA. Eventually we may be able to | |
1718 | * do some brk-specific accounting here. | |
1719 | */ | |
92fed820 | 1720 | static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, |
763ecb03 | 1721 | unsigned long addr, unsigned long len, unsigned long flags) |
2e7ce7d3 LH |
1722 | { |
1723 | struct mm_struct *mm = current->mm; | |
1da177e4 | 1724 | |
2e7ce7d3 LH |
1725 | /* |
1726 | * Check against address space limits by the changed size | |
1727 | * Note: This happens *after* clearing old mappings in some code paths. | |
1728 | */ | |
1729 | flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; | |
84638335 | 1730 | if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) |
1da177e4 LT |
1731 | return -ENOMEM; |
1732 | ||
1733 | if (mm->map_count > sysctl_max_map_count) | |
1734 | return -ENOMEM; | |
1735 | ||
191c5424 | 1736 | if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) |
1da177e4 LT |
1737 | return -ENOMEM; |
1738 | ||
1da177e4 | 1739 | /* |
2e7ce7d3 LH |
1740 | * Expand the existing vma if possible; Note that singular lists do not |
1741 | * occur after forking, so the expand will only happen on new VMAs. | |
1da177e4 | 1742 | */ |
3e01310d | 1743 | if (vma && vma->vm_end == addr) { |
2f1c6611 | 1744 | VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr)); |
28c5609f | 1745 | |
2f1c6611 | 1746 | vmg.prev = vma; |
cacded5e | 1747 | vma_iter_next_range(vmi); |
2e7ce7d3 | 1748 | |
cacded5e | 1749 | if (vma_merge_new_range(&vmg)) |
2f1c6611 | 1750 | goto out; |
cacded5e LS |
1751 | else if (vmg_nomem(&vmg)) |
1752 | goto unacct_fail; | |
1da177e4 | 1753 | } |
2e7ce7d3 | 1754 | |
b5df0922 LH |
1755 | if (vma) |
1756 | vma_iter_next_range(vmi); | |
2e7ce7d3 LH |
1757 | /* create a vma struct for an anonymous mapping */ |
1758 | vma = vm_area_alloc(mm); | |
1759 | if (!vma) | |
675eaca1 | 1760 | goto unacct_fail; |
1da177e4 | 1761 | |
bfd40eaf | 1762 | vma_set_anonymous(vma); |
412c6ef9 | 1763 | vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); |
1c71222e | 1764 | vm_flags_init(vma, flags); |
3ed75eb8 | 1765 | vma->vm_page_prot = vm_get_page_prot(flags); |
ad9f0063 | 1766 | vma_start_write(vma); |
92fed820 | 1767 | if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) |
2e7ce7d3 | 1768 | goto mas_store_fail; |
d4af56c5 | 1769 | |
2e7ce7d3 | 1770 | mm->map_count++; |
2574d5e4 | 1771 | validate_mm(mm); |
d7597f59 | 1772 | ksm_add_vma(vma); |
1da177e4 | 1773 | out: |
3af9e859 | 1774 | perf_event_mmap(vma); |
1da177e4 | 1775 | mm->total_vm += len >> PAGE_SHIFT; |
84638335 | 1776 | mm->data_vm += len >> PAGE_SHIFT; |
128557ff ML |
1777 | if (flags & VM_LOCKED) |
1778 | mm->locked_vm += (len >> PAGE_SHIFT); | |
1c71222e | 1779 | vm_flags_set(vma, VM_SOFTDIRTY); |
5d22fc25 | 1780 | return 0; |
d4af56c5 | 1781 | |
2e7ce7d3 | 1782 | mas_store_fail: |
d4af56c5 | 1783 | vm_area_free(vma); |
675eaca1 | 1784 | unacct_fail: |
2e7ce7d3 LH |
1785 | vm_unacct_memory(len >> PAGE_SHIFT); |
1786 | return -ENOMEM; | |
1da177e4 LT |
1787 | } |
1788 | ||
bb177a73 | 1789 | int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) |
e4eb1ff6 LT |
1790 | { |
1791 | struct mm_struct *mm = current->mm; | |
2e7ce7d3 | 1792 | struct vm_area_struct *vma = NULL; |
bb177a73 | 1793 | unsigned long len; |
5d22fc25 | 1794 | int ret; |
128557ff | 1795 | bool populate; |
897ab3e0 | 1796 | LIST_HEAD(uf); |
92fed820 | 1797 | VMA_ITERATOR(vmi, mm, addr); |
e4eb1ff6 | 1798 | |
bb177a73 MH |
1799 | len = PAGE_ALIGN(request); |
1800 | if (len < request) | |
1801 | return -ENOMEM; | |
1802 | if (!len) | |
1803 | return 0; | |
1804 | ||
2e7ce7d3 LH |
1805 | /* Until we need other flags, refuse anything except VM_EXEC. */ |
1806 | if ((flags & (~VM_EXEC)) != 0) | |
1807 | return -EINVAL; | |
1808 | ||
e0f81ab1 SO |
1809 | if (mmap_write_lock_killable(mm)) |
1810 | return -EINTR; | |
1811 | ||
2e7ce7d3 LH |
1812 | ret = check_brk_limits(addr, len); |
1813 | if (ret) | |
1814 | goto limits_failed; | |
1815 | ||
183654ce | 1816 | ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0); |
2e7ce7d3 LH |
1817 | if (ret) |
1818 | goto munmap_failed; | |
1819 | ||
92fed820 LH |
1820 | vma = vma_prev(&vmi); |
1821 | ret = do_brk_flags(&vmi, vma, addr, len, flags); | |
128557ff | 1822 | populate = ((mm->def_flags & VM_LOCKED) != 0); |
d8ed45c5 | 1823 | mmap_write_unlock(mm); |
897ab3e0 | 1824 | userfaultfd_unmap_complete(mm, &uf); |
5d22fc25 | 1825 | if (populate && !ret) |
128557ff | 1826 | mm_populate(addr, len); |
e4eb1ff6 | 1827 | return ret; |
2e7ce7d3 LH |
1828 | |
1829 | munmap_failed: | |
1830 | limits_failed: | |
1831 | mmap_write_unlock(mm); | |
1832 | return ret; | |
e4eb1ff6 | 1833 | } |
16e72e9b DV |
1834 | EXPORT_SYMBOL(vm_brk_flags); |
1835 | ||
1da177e4 LT |
1836 | /* Release all mmaps. */ |
1837 | void exit_mmap(struct mm_struct *mm) | |
1838 | { | |
d16dfc55 | 1839 | struct mmu_gather tlb; |
ba470de4 | 1840 | struct vm_area_struct *vma; |
1da177e4 | 1841 | unsigned long nr_accounted = 0; |
d4e6b397 | 1842 | VMA_ITERATOR(vmi, mm, 0); |
763ecb03 | 1843 | int count = 0; |
1da177e4 | 1844 | |
d6dd61c8 | 1845 | /* mm's last user has gone, and its about to be pulled down */ |
cddb8a5c | 1846 | mmu_notifier_release(mm); |
d6dd61c8 | 1847 | |
bf3980c8 | 1848 | mmap_read_lock(mm); |
9480c53e JF |
1849 | arch_exit_mmap(mm); |
1850 | ||
d4e6b397 | 1851 | vma = vma_next(&vmi); |
d2406291 | 1852 | if (!vma || unlikely(xa_is_zero(vma))) { |
64591e86 | 1853 | /* Can happen if dup_mmap() received an OOM */ |
bf3980c8 | 1854 | mmap_read_unlock(mm); |
d2406291 PZ |
1855 | mmap_write_lock(mm); |
1856 | goto destroy; | |
64591e86 | 1857 | } |
9480c53e | 1858 | |
1da177e4 | 1859 | lru_add_drain(); |
1da177e4 | 1860 | flush_cache_mm(mm); |
d8b45053 | 1861 | tlb_gather_mmu_fullmm(&tlb, mm); |
901608d9 | 1862 | /* update_hiwater_rss(mm) here? but nobody should be looking */ |
763ecb03 | 1863 | /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ |
d4e6b397 | 1864 | unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false); |
bf3980c8 SB |
1865 | mmap_read_unlock(mm); |
1866 | ||
1867 | /* | |
1868 | * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper | |
b3541d91 | 1869 | * because the memory has been already freed. |
bf3980c8 SB |
1870 | */ |
1871 | set_bit(MMF_OOM_SKIP, &mm->flags); | |
1872 | mmap_write_lock(mm); | |
3dd44325 | 1873 | mt_clear_in_rcu(&mm->mm_mt); |
d4e6b397 YD |
1874 | vma_iter_set(&vmi, vma->vm_end); |
1875 | free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS, | |
98e51a22 | 1876 | USER_PGTABLES_CEILING, true); |
ae8eba8b | 1877 | tlb_finish_mmu(&tlb); |
1da177e4 | 1878 | |
763ecb03 LH |
1879 | /* |
1880 | * Walk the list again, actually closing and freeing it, with preemption | |
1881 | * enabled, without holding any MM locks besides the unreachable | |
1882 | * mmap_write_lock. | |
1883 | */ | |
d4e6b397 | 1884 | vma_iter_set(&vmi, vma->vm_end); |
763ecb03 | 1885 | do { |
4f74d2c8 LT |
1886 | if (vma->vm_flags & VM_ACCOUNT) |
1887 | nr_accounted += vma_pages(vma); | |
f8d112a4 | 1888 | remove_vma(vma, /* unreachable = */ true, /* closed = */ false); |
763ecb03 | 1889 | count++; |
0a3b3c25 | 1890 | cond_resched(); |
d4e6b397 | 1891 | vma = vma_next(&vmi); |
d2406291 | 1892 | } while (vma && likely(!xa_is_zero(vma))); |
763ecb03 LH |
1893 | |
1894 | BUG_ON(count != mm->map_count); | |
d4af56c5 LH |
1895 | |
1896 | trace_exit_mmap(mm); | |
d2406291 | 1897 | destroy: |
d4af56c5 | 1898 | __mt_destroy(&mm->mm_mt); |
64591e86 | 1899 | mmap_write_unlock(mm); |
4f74d2c8 | 1900 | vm_unacct_memory(nr_accounted); |
1da177e4 LT |
1901 | } |
1902 | ||
1903 | /* Insert vm structure into process list sorted by address | |
1904 | * and into the inode's i_mmap tree. If vm_file is non-NULL | |
c8c06efa | 1905 | * then i_mmap_rwsem is taken here. |
1da177e4 | 1906 | */ |
6597d783 | 1907 | int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) |
1da177e4 | 1908 | { |
d4af56c5 | 1909 | unsigned long charged = vma_pages(vma); |
1da177e4 | 1910 | |
d4af56c5 | 1911 | |
d0601a50 | 1912 | if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) |
c9d13f5f | 1913 | return -ENOMEM; |
d4af56c5 | 1914 | |
c9d13f5f | 1915 | if ((vma->vm_flags & VM_ACCOUNT) && |
d4af56c5 | 1916 | security_vm_enough_memory_mm(mm, charged)) |
c9d13f5f CG |
1917 | return -ENOMEM; |
1918 | ||
1da177e4 LT |
1919 | /* |
1920 | * The vm_pgoff of a purely anonymous vma should be irrelevant | |
1921 | * until its first write fault, when page's anon_vma and index | |
1922 | * are set. But now set the vm_pgoff it will almost certainly | |
1923 | * end up with (unless mremap moves it elsewhere before that | |
1924 | * first wfault), so /proc/pid/maps tells a consistent story. | |
1925 | * | |
1926 | * By setting it to reflect the virtual start address of the | |
1927 | * vma, merges and splits can happen in a seamless way, just | |
1928 | * using the existing file pgoff checks and manipulations. | |
8332326e | 1929 | * Similarly in do_mmap and in do_brk_flags. |
1da177e4 | 1930 | */ |
8a9cc3b5 | 1931 | if (vma_is_anonymous(vma)) { |
1da177e4 LT |
1932 | BUG_ON(vma->anon_vma); |
1933 | vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; | |
1934 | } | |
2b144498 | 1935 | |
763ecb03 | 1936 | if (vma_link(mm, vma)) { |
dd34d9fe AY |
1937 | if (vma->vm_flags & VM_ACCOUNT) |
1938 | vm_unacct_memory(charged); | |
d4af56c5 LH |
1939 | return -ENOMEM; |
1940 | } | |
1941 | ||
1da177e4 LT |
1942 | return 0; |
1943 | } | |
1944 | ||
119f657c | 1945 | /* |
1946 | * Return true if the calling process may expand its vm space by the passed | |
1947 | * number of pages | |
1948 | */ | |
84638335 | 1949 | bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) |
119f657c | 1950 | { |
84638335 KK |
1951 | if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) |
1952 | return false; | |
119f657c | 1953 | |
d977d56c KK |
1954 | if (is_data_mapping(flags) && |
1955 | mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { | |
f4fcd558 KK |
1956 | /* Workaround for Valgrind */ |
1957 | if (rlimit(RLIMIT_DATA) == 0 && | |
1958 | mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) | |
1959 | return true; | |
57a7702b DW |
1960 | |
1961 | pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", | |
1962 | current->comm, current->pid, | |
1963 | (mm->data_vm + npages) << PAGE_SHIFT, | |
1964 | rlimit(RLIMIT_DATA), | |
1965 | ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); | |
1966 | ||
1967 | if (!ignore_rlimit_data) | |
d977d56c KK |
1968 | return false; |
1969 | } | |
119f657c | 1970 | |
84638335 KK |
1971 | return true; |
1972 | } | |
1973 | ||
1974 | void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) | |
1975 | { | |
7866076b | 1976 | WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); |
84638335 | 1977 | |
d977d56c | 1978 | if (is_exec_mapping(flags)) |
84638335 | 1979 | mm->exec_vm += npages; |
d977d56c | 1980 | else if (is_stack_mapping(flags)) |
84638335 | 1981 | mm->stack_vm += npages; |
d977d56c | 1982 | else if (is_data_mapping(flags)) |
84638335 | 1983 | mm->data_vm += npages; |
119f657c | 1984 | } |
fa5dc22f | 1985 | |
b3ec9f33 | 1986 | static vm_fault_t special_mapping_fault(struct vm_fault *vmf); |
a62c34bd AL |
1987 | |
1988 | /* | |
223febc6 ME |
1989 | * Close hook, called for unmap() and on the old vma for mremap(). |
1990 | * | |
a62c34bd AL |
1991 | * Having a close hook prevents vma merging regardless of flags. |
1992 | */ | |
1993 | static void special_mapping_close(struct vm_area_struct *vma) | |
1994 | { | |
223febc6 ME |
1995 | const struct vm_special_mapping *sm = vma->vm_private_data; |
1996 | ||
1997 | if (sm->close) | |
1998 | sm->close(sm, vma); | |
a62c34bd AL |
1999 | } |
2000 | ||
2001 | static const char *special_mapping_name(struct vm_area_struct *vma) | |
2002 | { | |
2003 | return ((struct vm_special_mapping *)vma->vm_private_data)->name; | |
2004 | } | |
2005 | ||
14d07113 | 2006 | static int special_mapping_mremap(struct vm_area_struct *new_vma) |
b059a453 DS |
2007 | { |
2008 | struct vm_special_mapping *sm = new_vma->vm_private_data; | |
2009 | ||
280e87e9 DS |
2010 | if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) |
2011 | return -EFAULT; | |
2012 | ||
b059a453 DS |
2013 | if (sm->mremap) |
2014 | return sm->mremap(sm, new_vma); | |
280e87e9 | 2015 | |
b059a453 DS |
2016 | return 0; |
2017 | } | |
2018 | ||
871402e0 DS |
2019 | static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) |
2020 | { | |
2021 | /* | |
2022 | * Forbid splitting special mappings - kernel has expectations over | |
2023 | * the number of pages in mapping. Together with VM_DONTEXPAND | |
2024 | * the size of vma should stay the same over the special mapping's | |
2025 | * lifetime. | |
2026 | */ | |
2027 | return -EINVAL; | |
2028 | } | |
2029 | ||
a62c34bd AL |
2030 | static const struct vm_operations_struct special_mapping_vmops = { |
2031 | .close = special_mapping_close, | |
2032 | .fault = special_mapping_fault, | |
b059a453 | 2033 | .mremap = special_mapping_mremap, |
a62c34bd | 2034 | .name = special_mapping_name, |
af34ebeb DS |
2035 | /* vDSO code relies that VVAR can't be accessed remotely */ |
2036 | .access = NULL, | |
871402e0 | 2037 | .may_split = special_mapping_split, |
a62c34bd AL |
2038 | }; |
2039 | ||
b3ec9f33 | 2040 | static vm_fault_t special_mapping_fault(struct vm_fault *vmf) |
fa5dc22f | 2041 | { |
11bac800 | 2042 | struct vm_area_struct *vma = vmf->vma; |
b1d0e4f5 | 2043 | pgoff_t pgoff; |
fa5dc22f | 2044 | struct page **pages; |
497258df | 2045 | struct vm_special_mapping *sm = vma->vm_private_data; |
fa5dc22f | 2046 | |
497258df LT |
2047 | if (sm->fault) |
2048 | return sm->fault(sm, vmf->vma, vmf); | |
f872f540 | 2049 | |
497258df | 2050 | pages = sm->pages; |
a62c34bd | 2051 | |
8a9cc3b5 | 2052 | for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) |
b1d0e4f5 | 2053 | pgoff--; |
fa5dc22f RM |
2054 | |
2055 | if (*pages) { | |
2056 | struct page *page = *pages; | |
2057 | get_page(page); | |
b1d0e4f5 NP |
2058 | vmf->page = page; |
2059 | return 0; | |
fa5dc22f RM |
2060 | } |
2061 | ||
b1d0e4f5 | 2062 | return VM_FAULT_SIGBUS; |
fa5dc22f RM |
2063 | } |
2064 | ||
a62c34bd AL |
2065 | static struct vm_area_struct *__install_special_mapping( |
2066 | struct mm_struct *mm, | |
2067 | unsigned long addr, unsigned long len, | |
27f28b97 CG |
2068 | unsigned long vm_flags, void *priv, |
2069 | const struct vm_operations_struct *ops) | |
fa5dc22f | 2070 | { |
462e635e | 2071 | int ret; |
fa5dc22f RM |
2072 | struct vm_area_struct *vma; |
2073 | ||
490fc053 | 2074 | vma = vm_area_alloc(mm); |
fa5dc22f | 2075 | if (unlikely(vma == NULL)) |
3935ed6a | 2076 | return ERR_PTR(-ENOMEM); |
fa5dc22f | 2077 | |
412c6ef9 | 2078 | vma_set_range(vma, addr, addr + len, 0); |
e430a95a SB |
2079 | vm_flags_init(vma, (vm_flags | mm->def_flags | |
2080 | VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); | |
3ed75eb8 | 2081 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
fa5dc22f | 2082 | |
a62c34bd AL |
2083 | vma->vm_ops = ops; |
2084 | vma->vm_private_data = priv; | |
fa5dc22f | 2085 | |
462e635e TO |
2086 | ret = insert_vm_struct(mm, vma); |
2087 | if (ret) | |
2088 | goto out; | |
fa5dc22f | 2089 | |
84638335 | 2090 | vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); |
fa5dc22f | 2091 | |
cdd6c482 | 2092 | perf_event_mmap(vma); |
089dd79d | 2093 | |
3935ed6a | 2094 | return vma; |
462e635e TO |
2095 | |
2096 | out: | |
3928d4f5 | 2097 | vm_area_free(vma); |
3935ed6a SS |
2098 | return ERR_PTR(ret); |
2099 | } | |
2100 | ||
2eefd878 DS |
2101 | bool vma_is_special_mapping(const struct vm_area_struct *vma, |
2102 | const struct vm_special_mapping *sm) | |
2103 | { | |
2104 | return vma->vm_private_data == sm && | |
497258df | 2105 | vma->vm_ops == &special_mapping_vmops; |
2eefd878 DS |
2106 | } |
2107 | ||
a62c34bd | 2108 | /* |
c1e8d7c6 | 2109 | * Called with mm->mmap_lock held for writing. |
a62c34bd AL |
2110 | * Insert a new vma covering the given region, with the given flags. |
2111 | * Its pages are supplied by the given array of struct page *. | |
2112 | * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. | |
2113 | * The region past the last page supplied will always produce SIGBUS. | |
2114 | * The array pointer and the pages it points to are assumed to stay alive | |
2115 | * for as long as this mapping might exist. | |
2116 | */ | |
2117 | struct vm_area_struct *_install_special_mapping( | |
2118 | struct mm_struct *mm, | |
2119 | unsigned long addr, unsigned long len, | |
2120 | unsigned long vm_flags, const struct vm_special_mapping *spec) | |
2121 | { | |
27f28b97 CG |
2122 | return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, |
2123 | &special_mapping_vmops); | |
a62c34bd AL |
2124 | } |
2125 | ||
8feae131 | 2126 | /* |
3edf41d8 | 2127 | * initialise the percpu counter for VM |
8feae131 DH |
2128 | */ |
2129 | void __init mmap_init(void) | |
2130 | { | |
00a62ce9 KM |
2131 | int ret; |
2132 | ||
908c7f19 | 2133 | ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); |
00a62ce9 | 2134 | VM_BUG_ON(ret); |
8feae131 | 2135 | } |
c9b1d098 AS |
2136 | |
2137 | /* | |
2138 | * Initialise sysctl_user_reserve_kbytes. | |
2139 | * | |
2140 | * This is intended to prevent a user from starting a single memory hogging | |
2141 | * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER | |
2142 | * mode. | |
2143 | * | |
2144 | * The default value is min(3% of free memory, 128MB) | |
2145 | * 128MB is enough to recover with sshd/login, bash, and top/kill. | |
2146 | */ | |
1640879a | 2147 | static int init_user_reserve(void) |
c9b1d098 AS |
2148 | { |
2149 | unsigned long free_kbytes; | |
2150 | ||
b1773e0e | 2151 | free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); |
c9b1d098 | 2152 | |
9c793854 | 2153 | sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K); |
c9b1d098 AS |
2154 | return 0; |
2155 | } | |
a64fb3cd | 2156 | subsys_initcall(init_user_reserve); |
4eeab4f5 AS |
2157 | |
2158 | /* | |
2159 | * Initialise sysctl_admin_reserve_kbytes. | |
2160 | * | |
2161 | * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin | |
2162 | * to log in and kill a memory hogging process. | |
2163 | * | |
2164 | * Systems with more than 256MB will reserve 8MB, enough to recover | |
2165 | * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will | |
2166 | * only reserve 3% of free pages by default. | |
2167 | */ | |
1640879a | 2168 | static int init_admin_reserve(void) |
4eeab4f5 AS |
2169 | { |
2170 | unsigned long free_kbytes; | |
2171 | ||
b1773e0e | 2172 | free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); |
4eeab4f5 | 2173 | |
9c793854 | 2174 | sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K); |
4eeab4f5 AS |
2175 | return 0; |
2176 | } | |
a64fb3cd | 2177 | subsys_initcall(init_admin_reserve); |
1640879a AS |
2178 | |
2179 | /* | |
2180 | * Reinititalise user and admin reserves if memory is added or removed. | |
2181 | * | |
2182 | * The default user reserve max is 128MB, and the default max for the | |
2183 | * admin reserve is 8MB. These are usually, but not always, enough to | |
2184 | * enable recovery from a memory hogging process using login/sshd, a shell, | |
2185 | * and tools like top. It may make sense to increase or even disable the | |
2186 | * reserve depending on the existence of swap or variations in the recovery | |
2187 | * tools. So, the admin may have changed them. | |
2188 | * | |
2189 | * If memory is added and the reserves have been eliminated or increased above | |
2190 | * the default max, then we'll trust the admin. | |
2191 | * | |
2192 | * If memory is removed and there isn't enough free memory, then we | |
2193 | * need to reset the reserves. | |
2194 | * | |
2195 | * Otherwise keep the reserve set by the admin. | |
2196 | */ | |
2197 | static int reserve_mem_notifier(struct notifier_block *nb, | |
2198 | unsigned long action, void *data) | |
2199 | { | |
2200 | unsigned long tmp, free_kbytes; | |
2201 | ||
2202 | switch (action) { | |
2203 | case MEM_ONLINE: | |
2204 | /* Default max is 128MB. Leave alone if modified by operator. */ | |
2205 | tmp = sysctl_user_reserve_kbytes; | |
9c793854 | 2206 | if (tmp > 0 && tmp < SZ_128K) |
1640879a AS |
2207 | init_user_reserve(); |
2208 | ||
2209 | /* Default max is 8MB. Leave alone if modified by operator. */ | |
2210 | tmp = sysctl_admin_reserve_kbytes; | |
9c793854 | 2211 | if (tmp > 0 && tmp < SZ_8K) |
1640879a AS |
2212 | init_admin_reserve(); |
2213 | ||
2214 | break; | |
2215 | case MEM_OFFLINE: | |
b1773e0e | 2216 | free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); |
1640879a AS |
2217 | |
2218 | if (sysctl_user_reserve_kbytes > free_kbytes) { | |
2219 | init_user_reserve(); | |
2220 | pr_info("vm.user_reserve_kbytes reset to %lu\n", | |
2221 | sysctl_user_reserve_kbytes); | |
2222 | } | |
2223 | ||
2224 | if (sysctl_admin_reserve_kbytes > free_kbytes) { | |
2225 | init_admin_reserve(); | |
2226 | pr_info("vm.admin_reserve_kbytes reset to %lu\n", | |
2227 | sysctl_admin_reserve_kbytes); | |
2228 | } | |
2229 | break; | |
2230 | default: | |
2231 | break; | |
2232 | } | |
2233 | return NOTIFY_OK; | |
2234 | } | |
2235 | ||
1640879a AS |
2236 | static int __meminit init_reserve_notifier(void) |
2237 | { | |
1eeaa4fd | 2238 | if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI)) |
b1de0d13 | 2239 | pr_err("Failed registering memory add/remove notifier for admin reserve\n"); |
1640879a AS |
2240 | |
2241 | return 0; | |
2242 | } | |
a64fb3cd | 2243 | subsys_initcall(init_reserve_notifier); |
d61f0d59 LS |
2244 | |
2245 | /* | |
2246 | * Relocate a VMA downwards by shift bytes. There cannot be any VMAs between | |
2247 | * this VMA and its relocated range, which will now reside at [vma->vm_start - | |
2248 | * shift, vma->vm_end - shift). | |
2249 | * | |
2250 | * This function is almost certainly NOT what you want for anything other than | |
2251 | * early executable temporary stack relocation. | |
2252 | */ | |
2253 | int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) | |
2254 | { | |
2255 | /* | |
2256 | * The process proceeds as follows: | |
2257 | * | |
2258 | * 1) Use shift to calculate the new vma endpoints. | |
2259 | * 2) Extend vma to cover both the old and new ranges. This ensures the | |
2260 | * arguments passed to subsequent functions are consistent. | |
2261 | * 3) Move vma's page tables to the new range. | |
2262 | * 4) Free up any cleared pgd range. | |
2263 | * 5) Shrink the vma to cover only the new range. | |
2264 | */ | |
2265 | ||
2266 | struct mm_struct *mm = vma->vm_mm; | |
2267 | unsigned long old_start = vma->vm_start; | |
2268 | unsigned long old_end = vma->vm_end; | |
2269 | unsigned long length = old_end - old_start; | |
2270 | unsigned long new_start = old_start - shift; | |
2271 | unsigned long new_end = old_end - shift; | |
2272 | VMA_ITERATOR(vmi, mm, new_start); | |
fc21959f | 2273 | VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff); |
d61f0d59 LS |
2274 | struct vm_area_struct *next; |
2275 | struct mmu_gather tlb; | |
2276 | ||
2277 | BUG_ON(new_start > new_end); | |
2278 | ||
2279 | /* | |
2280 | * ensure there are no vmas between where we want to go | |
2281 | * and where we are | |
2282 | */ | |
2283 | if (vma != vma_next(&vmi)) | |
2284 | return -EFAULT; | |
2285 | ||
2286 | vma_iter_prev_range(&vmi); | |
2287 | /* | |
2288 | * cover the whole range: [new_start, old_end) | |
2289 | */ | |
fc21959f LS |
2290 | vmg.vma = vma; |
2291 | if (vma_expand(&vmg)) | |
d61f0d59 LS |
2292 | return -ENOMEM; |
2293 | ||
2294 | /* | |
2295 | * move the page tables downwards, on failure we rely on | |
2296 | * process cleanup to remove whatever mess we made. | |
2297 | */ | |
2298 | if (length != move_page_tables(vma, old_start, | |
2299 | vma, new_start, length, false, true)) | |
2300 | return -ENOMEM; | |
2301 | ||
2302 | lru_add_drain(); | |
2303 | tlb_gather_mmu(&tlb, mm); | |
2304 | next = vma_next(&vmi); | |
2305 | if (new_end > old_start) { | |
2306 | /* | |
2307 | * when the old and new regions overlap clear from new_end. | |
2308 | */ | |
2309 | free_pgd_range(&tlb, new_end, old_end, new_end, | |
2310 | next ? next->vm_start : USER_PGTABLES_CEILING); | |
2311 | } else { | |
2312 | /* | |
2313 | * otherwise, clean from old_start; this is done to not touch | |
2314 | * the address space in [new_end, old_start) some architectures | |
2315 | * have constraints on va-space that make this illegal (IA64) - | |
2316 | * for the others its just a little faster. | |
2317 | */ | |
2318 | free_pgd_range(&tlb, old_start, old_end, new_end, | |
2319 | next ? next->vm_start : USER_PGTABLES_CEILING); | |
2320 | } | |
2321 | tlb_finish_mmu(&tlb); | |
2322 | ||
2323 | vma_prev(&vmi); | |
2324 | /* Shrink the vma to just the new range */ | |
2325 | return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff); | |
2326 | } |