]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
16d69265 | 2 | #include <linux/mm.h> |
30992c97 MM |
3 | #include <linux/slab.h> |
4 | #include <linux/string.h> | |
3b32123d | 5 | #include <linux/compiler.h> |
b95f1b31 | 6 | #include <linux/export.h> |
96840aa0 | 7 | #include <linux/err.h> |
3b8f14b4 | 8 | #include <linux/sched.h> |
6e84f315 | 9 | #include <linux/sched/mm.h> |
79eb597c | 10 | #include <linux/sched/signal.h> |
68db0cf1 | 11 | #include <linux/sched/task_stack.h> |
eb36c587 | 12 | #include <linux/security.h> |
9800339b | 13 | #include <linux/swap.h> |
33806f06 | 14 | #include <linux/swapops.h> |
00619bcc JM |
15 | #include <linux/mman.h> |
16 | #include <linux/hugetlb.h> | |
39f1f78d | 17 | #include <linux/vmalloc.h> |
897ab3e0 | 18 | #include <linux/userfaultfd_k.h> |
649775be | 19 | #include <linux/elf.h> |
67f3977f AG |
20 | #include <linux/elf-randomize.h> |
21 | #include <linux/personality.h> | |
649775be | 22 | #include <linux/random.h> |
67f3977f AG |
23 | #include <linux/processor.h> |
24 | #include <linux/sizes.h> | |
25 | #include <linux/compat.h> | |
00619bcc | 26 | |
7c0f6ba6 | 27 | #include <linux/uaccess.h> |
30992c97 | 28 | |
6038def0 NK |
29 | #include "internal.h" |
30 | ||
a4bb1e43 AH |
31 | /** |
32 | * kfree_const - conditionally free memory | |
33 | * @x: pointer to the memory | |
34 | * | |
35 | * Function calls kfree only if @x is not in .rodata section. | |
36 | */ | |
37 | void kfree_const(const void *x) | |
38 | { | |
39 | if (!is_kernel_rodata((unsigned long)x)) | |
40 | kfree(x); | |
41 | } | |
42 | EXPORT_SYMBOL(kfree_const); | |
43 | ||
30992c97 | 44 | /** |
30992c97 | 45 | * kstrdup - allocate space for and copy an existing string |
30992c97 MM |
46 | * @s: the string to duplicate |
47 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
a862f68a MR |
48 | * |
49 | * Return: newly allocated copy of @s or %NULL in case of error | |
30992c97 MM |
50 | */ |
51 | char *kstrdup(const char *s, gfp_t gfp) | |
52 | { | |
53 | size_t len; | |
54 | char *buf; | |
55 | ||
56 | if (!s) | |
57 | return NULL; | |
58 | ||
59 | len = strlen(s) + 1; | |
1d2c8eea | 60 | buf = kmalloc_track_caller(len, gfp); |
30992c97 MM |
61 | if (buf) |
62 | memcpy(buf, s, len); | |
63 | return buf; | |
64 | } | |
65 | EXPORT_SYMBOL(kstrdup); | |
96840aa0 | 66 | |
a4bb1e43 AH |
67 | /** |
68 | * kstrdup_const - conditionally duplicate an existing const string | |
69 | * @s: the string to duplicate | |
70 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
71 | * | |
295a1730 BG |
72 | * Note: Strings allocated by kstrdup_const should be freed by kfree_const and |
73 | * must not be passed to krealloc(). | |
a862f68a MR |
74 | * |
75 | * Return: source string if it is in .rodata section otherwise | |
76 | * fallback to kstrdup. | |
a4bb1e43 AH |
77 | */ |
78 | const char *kstrdup_const(const char *s, gfp_t gfp) | |
79 | { | |
80 | if (is_kernel_rodata((unsigned long)s)) | |
81 | return s; | |
82 | ||
83 | return kstrdup(s, gfp); | |
84 | } | |
85 | EXPORT_SYMBOL(kstrdup_const); | |
86 | ||
1e66df3e JF |
87 | /** |
88 | * kstrndup - allocate space for and copy an existing string | |
89 | * @s: the string to duplicate | |
90 | * @max: read at most @max chars from @s | |
91 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
f3515741 DH |
92 | * |
93 | * Note: Use kmemdup_nul() instead if the size is known exactly. | |
a862f68a MR |
94 | * |
95 | * Return: newly allocated copy of @s or %NULL in case of error | |
1e66df3e JF |
96 | */ |
97 | char *kstrndup(const char *s, size_t max, gfp_t gfp) | |
98 | { | |
99 | size_t len; | |
100 | char *buf; | |
101 | ||
102 | if (!s) | |
103 | return NULL; | |
104 | ||
105 | len = strnlen(s, max); | |
106 | buf = kmalloc_track_caller(len+1, gfp); | |
107 | if (buf) { | |
108 | memcpy(buf, s, len); | |
109 | buf[len] = '\0'; | |
110 | } | |
111 | return buf; | |
112 | } | |
113 | EXPORT_SYMBOL(kstrndup); | |
114 | ||
1a2f67b4 AD |
115 | /** |
116 | * kmemdup - duplicate region of memory | |
117 | * | |
118 | * @src: memory region to duplicate | |
119 | * @len: memory region length | |
120 | * @gfp: GFP mask to use | |
a862f68a MR |
121 | * |
122 | * Return: newly allocated copy of @src or %NULL in case of error | |
1a2f67b4 AD |
123 | */ |
124 | void *kmemdup(const void *src, size_t len, gfp_t gfp) | |
125 | { | |
126 | void *p; | |
127 | ||
1d2c8eea | 128 | p = kmalloc_track_caller(len, gfp); |
1a2f67b4 AD |
129 | if (p) |
130 | memcpy(p, src, len); | |
131 | return p; | |
132 | } | |
133 | EXPORT_SYMBOL(kmemdup); | |
134 | ||
f3515741 DH |
135 | /** |
136 | * kmemdup_nul - Create a NUL-terminated string from unterminated data | |
137 | * @s: The data to stringify | |
138 | * @len: The size of the data | |
139 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
a862f68a MR |
140 | * |
141 | * Return: newly allocated copy of @s with NUL-termination or %NULL in | |
142 | * case of error | |
f3515741 DH |
143 | */ |
144 | char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) | |
145 | { | |
146 | char *buf; | |
147 | ||
148 | if (!s) | |
149 | return NULL; | |
150 | ||
151 | buf = kmalloc_track_caller(len + 1, gfp); | |
152 | if (buf) { | |
153 | memcpy(buf, s, len); | |
154 | buf[len] = '\0'; | |
155 | } | |
156 | return buf; | |
157 | } | |
158 | EXPORT_SYMBOL(kmemdup_nul); | |
159 | ||
610a77e0 LZ |
160 | /** |
161 | * memdup_user - duplicate memory region from user space | |
162 | * | |
163 | * @src: source address in user space | |
164 | * @len: number of bytes to copy | |
165 | * | |
a862f68a | 166 | * Return: an ERR_PTR() on failure. Result is physically |
50fd2f29 | 167 | * contiguous, to be freed by kfree(). |
610a77e0 LZ |
168 | */ |
169 | void *memdup_user(const void __user *src, size_t len) | |
170 | { | |
171 | void *p; | |
172 | ||
6c8fcc09 | 173 | p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); |
610a77e0 LZ |
174 | if (!p) |
175 | return ERR_PTR(-ENOMEM); | |
176 | ||
177 | if (copy_from_user(p, src, len)) { | |
178 | kfree(p); | |
179 | return ERR_PTR(-EFAULT); | |
180 | } | |
181 | ||
182 | return p; | |
183 | } | |
184 | EXPORT_SYMBOL(memdup_user); | |
185 | ||
50fd2f29 AV |
186 | /** |
187 | * vmemdup_user - duplicate memory region from user space | |
188 | * | |
189 | * @src: source address in user space | |
190 | * @len: number of bytes to copy | |
191 | * | |
a862f68a | 192 | * Return: an ERR_PTR() on failure. Result may be not |
50fd2f29 AV |
193 | * physically contiguous. Use kvfree() to free. |
194 | */ | |
195 | void *vmemdup_user(const void __user *src, size_t len) | |
196 | { | |
197 | void *p; | |
198 | ||
199 | p = kvmalloc(len, GFP_USER); | |
200 | if (!p) | |
201 | return ERR_PTR(-ENOMEM); | |
202 | ||
203 | if (copy_from_user(p, src, len)) { | |
204 | kvfree(p); | |
205 | return ERR_PTR(-EFAULT); | |
206 | } | |
207 | ||
208 | return p; | |
209 | } | |
210 | EXPORT_SYMBOL(vmemdup_user); | |
211 | ||
b86181f1 | 212 | /** |
96840aa0 | 213 | * strndup_user - duplicate an existing string from user space |
96840aa0 DA |
214 | * @s: The string to duplicate |
215 | * @n: Maximum number of bytes to copy, including the trailing NUL. | |
a862f68a | 216 | * |
e9145521 | 217 | * Return: newly allocated copy of @s or an ERR_PTR() in case of error |
96840aa0 DA |
218 | */ |
219 | char *strndup_user(const char __user *s, long n) | |
220 | { | |
221 | char *p; | |
222 | long length; | |
223 | ||
224 | length = strnlen_user(s, n); | |
225 | ||
226 | if (!length) | |
227 | return ERR_PTR(-EFAULT); | |
228 | ||
229 | if (length > n) | |
230 | return ERR_PTR(-EINVAL); | |
231 | ||
90d74045 | 232 | p = memdup_user(s, length); |
96840aa0 | 233 | |
90d74045 JL |
234 | if (IS_ERR(p)) |
235 | return p; | |
96840aa0 DA |
236 | |
237 | p[length - 1] = '\0'; | |
238 | ||
239 | return p; | |
240 | } | |
241 | EXPORT_SYMBOL(strndup_user); | |
16d69265 | 242 | |
e9d408e1 AV |
243 | /** |
244 | * memdup_user_nul - duplicate memory region from user space and NUL-terminate | |
245 | * | |
246 | * @src: source address in user space | |
247 | * @len: number of bytes to copy | |
248 | * | |
a862f68a | 249 | * Return: an ERR_PTR() on failure. |
e9d408e1 AV |
250 | */ |
251 | void *memdup_user_nul(const void __user *src, size_t len) | |
252 | { | |
253 | char *p; | |
254 | ||
255 | /* | |
256 | * Always use GFP_KERNEL, since copy_from_user() can sleep and | |
257 | * cause pagefault, which makes it pointless to use GFP_NOFS | |
258 | * or GFP_ATOMIC. | |
259 | */ | |
260 | p = kmalloc_track_caller(len + 1, GFP_KERNEL); | |
261 | if (!p) | |
262 | return ERR_PTR(-ENOMEM); | |
263 | ||
264 | if (copy_from_user(p, src, len)) { | |
265 | kfree(p); | |
266 | return ERR_PTR(-EFAULT); | |
267 | } | |
268 | p[len] = '\0'; | |
269 | ||
270 | return p; | |
271 | } | |
272 | EXPORT_SYMBOL(memdup_user_nul); | |
273 | ||
6038def0 | 274 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
aba6dfb7 | 275 | struct vm_area_struct *prev) |
6038def0 NK |
276 | { |
277 | struct vm_area_struct *next; | |
278 | ||
279 | vma->vm_prev = prev; | |
280 | if (prev) { | |
281 | next = prev->vm_next; | |
282 | prev->vm_next = vma; | |
283 | } else { | |
aba6dfb7 | 284 | next = mm->mmap; |
6038def0 | 285 | mm->mmap = vma; |
6038def0 NK |
286 | } |
287 | vma->vm_next = next; | |
288 | if (next) | |
289 | next->vm_prev = vma; | |
290 | } | |
291 | ||
1b9fc5b2 WY |
292 | void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma) |
293 | { | |
294 | struct vm_area_struct *prev, *next; | |
295 | ||
296 | next = vma->vm_next; | |
297 | prev = vma->vm_prev; | |
298 | if (prev) | |
299 | prev->vm_next = next; | |
300 | else | |
301 | mm->mmap = next; | |
302 | if (next) | |
303 | next->vm_prev = prev; | |
304 | } | |
305 | ||
b7643757 | 306 | /* Check if the vma is being used as a stack by this task */ |
d17af505 | 307 | int vma_is_stack_for_current(struct vm_area_struct *vma) |
b7643757 | 308 | { |
d17af505 AL |
309 | struct task_struct * __maybe_unused t = current; |
310 | ||
b7643757 SP |
311 | return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); |
312 | } | |
313 | ||
649775be AG |
314 | #ifndef STACK_RND_MASK |
315 | #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ | |
316 | #endif | |
317 | ||
318 | unsigned long randomize_stack_top(unsigned long stack_top) | |
319 | { | |
320 | unsigned long random_variable = 0; | |
321 | ||
322 | if (current->flags & PF_RANDOMIZE) { | |
323 | random_variable = get_random_long(); | |
324 | random_variable &= STACK_RND_MASK; | |
325 | random_variable <<= PAGE_SHIFT; | |
326 | } | |
327 | #ifdef CONFIG_STACK_GROWSUP | |
328 | return PAGE_ALIGN(stack_top) + random_variable; | |
329 | #else | |
330 | return PAGE_ALIGN(stack_top) - random_variable; | |
331 | #endif | |
332 | } | |
333 | ||
67f3977f | 334 | #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT |
e7142bf5 AG |
335 | unsigned long arch_randomize_brk(struct mm_struct *mm) |
336 | { | |
337 | /* Is the current task 32bit ? */ | |
338 | if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task()) | |
339 | return randomize_page(mm->brk, SZ_32M); | |
340 | ||
341 | return randomize_page(mm->brk, SZ_1G); | |
342 | } | |
343 | ||
67f3977f AG |
344 | unsigned long arch_mmap_rnd(void) |
345 | { | |
346 | unsigned long rnd; | |
347 | ||
348 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS | |
349 | if (is_compat_task()) | |
350 | rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); | |
351 | else | |
352 | #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */ | |
353 | rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); | |
354 | ||
355 | return rnd << PAGE_SHIFT; | |
356 | } | |
67f3977f AG |
357 | |
358 | static int mmap_is_legacy(struct rlimit *rlim_stack) | |
359 | { | |
360 | if (current->personality & ADDR_COMPAT_LAYOUT) | |
361 | return 1; | |
362 | ||
363 | if (rlim_stack->rlim_cur == RLIM_INFINITY) | |
364 | return 1; | |
365 | ||
366 | return sysctl_legacy_va_layout; | |
367 | } | |
368 | ||
369 | /* | |
370 | * Leave enough space between the mmap area and the stack to honour ulimit in | |
371 | * the face of randomisation. | |
372 | */ | |
373 | #define MIN_GAP (SZ_128M) | |
374 | #define MAX_GAP (STACK_TOP / 6 * 5) | |
375 | ||
376 | static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) | |
377 | { | |
378 | unsigned long gap = rlim_stack->rlim_cur; | |
379 | unsigned long pad = stack_guard_gap; | |
380 | ||
381 | /* Account for stack randomization if necessary */ | |
382 | if (current->flags & PF_RANDOMIZE) | |
383 | pad += (STACK_RND_MASK << PAGE_SHIFT); | |
384 | ||
385 | /* Values close to RLIM_INFINITY can overflow. */ | |
386 | if (gap + pad > gap) | |
387 | gap += pad; | |
388 | ||
389 | if (gap < MIN_GAP) | |
390 | gap = MIN_GAP; | |
391 | else if (gap > MAX_GAP) | |
392 | gap = MAX_GAP; | |
393 | ||
394 | return PAGE_ALIGN(STACK_TOP - gap - rnd); | |
395 | } | |
396 | ||
397 | void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) | |
398 | { | |
399 | unsigned long random_factor = 0UL; | |
400 | ||
401 | if (current->flags & PF_RANDOMIZE) | |
402 | random_factor = arch_mmap_rnd(); | |
403 | ||
404 | if (mmap_is_legacy(rlim_stack)) { | |
405 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | |
406 | mm->get_unmapped_area = arch_get_unmapped_area; | |
407 | } else { | |
408 | mm->mmap_base = mmap_base(random_factor, rlim_stack); | |
409 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | |
410 | } | |
411 | } | |
412 | #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) | |
8f2af155 | 413 | void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) |
16d69265 AM |
414 | { |
415 | mm->mmap_base = TASK_UNMAPPED_BASE; | |
416 | mm->get_unmapped_area = arch_get_unmapped_area; | |
16d69265 AM |
417 | } |
418 | #endif | |
912985dc | 419 | |
79eb597c DJ |
420 | /** |
421 | * __account_locked_vm - account locked pages to an mm's locked_vm | |
422 | * @mm: mm to account against | |
423 | * @pages: number of pages to account | |
424 | * @inc: %true if @pages should be considered positive, %false if not | |
425 | * @task: task used to check RLIMIT_MEMLOCK | |
426 | * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped | |
427 | * | |
428 | * Assumes @task and @mm are valid (i.e. at least one reference on each), and | |
c1e8d7c6 | 429 | * that mmap_lock is held as writer. |
79eb597c DJ |
430 | * |
431 | * Return: | |
432 | * * 0 on success | |
433 | * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. | |
434 | */ | |
435 | int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, | |
436 | struct task_struct *task, bool bypass_rlim) | |
437 | { | |
438 | unsigned long locked_vm, limit; | |
439 | int ret = 0; | |
440 | ||
42fc5414 | 441 | mmap_assert_write_locked(mm); |
79eb597c DJ |
442 | |
443 | locked_vm = mm->locked_vm; | |
444 | if (inc) { | |
445 | if (!bypass_rlim) { | |
446 | limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; | |
447 | if (locked_vm + pages > limit) | |
448 | ret = -ENOMEM; | |
449 | } | |
450 | if (!ret) | |
451 | mm->locked_vm = locked_vm + pages; | |
452 | } else { | |
453 | WARN_ON_ONCE(pages > locked_vm); | |
454 | mm->locked_vm = locked_vm - pages; | |
455 | } | |
456 | ||
457 | pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, | |
458 | (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, | |
459 | locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), | |
460 | ret ? " - exceeded" : ""); | |
461 | ||
462 | return ret; | |
463 | } | |
464 | EXPORT_SYMBOL_GPL(__account_locked_vm); | |
465 | ||
466 | /** | |
467 | * account_locked_vm - account locked pages to an mm's locked_vm | |
468 | * @mm: mm to account against, may be NULL | |
469 | * @pages: number of pages to account | |
470 | * @inc: %true if @pages should be considered positive, %false if not | |
471 | * | |
472 | * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). | |
473 | * | |
474 | * Return: | |
475 | * * 0 on success, or if mm is NULL | |
476 | * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. | |
477 | */ | |
478 | int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) | |
479 | { | |
480 | int ret; | |
481 | ||
482 | if (pages == 0 || !mm) | |
483 | return 0; | |
484 | ||
d8ed45c5 | 485 | mmap_write_lock(mm); |
79eb597c DJ |
486 | ret = __account_locked_vm(mm, pages, inc, current, |
487 | capable(CAP_IPC_LOCK)); | |
d8ed45c5 | 488 | mmap_write_unlock(mm); |
79eb597c DJ |
489 | |
490 | return ret; | |
491 | } | |
492 | EXPORT_SYMBOL_GPL(account_locked_vm); | |
493 | ||
eb36c587 AV |
494 | unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, |
495 | unsigned long len, unsigned long prot, | |
9fbeb5ab | 496 | unsigned long flag, unsigned long pgoff) |
eb36c587 AV |
497 | { |
498 | unsigned long ret; | |
499 | struct mm_struct *mm = current->mm; | |
41badc15 | 500 | unsigned long populate; |
897ab3e0 | 501 | LIST_HEAD(uf); |
eb36c587 AV |
502 | |
503 | ret = security_mmap_file(file, prot, flag); | |
504 | if (!ret) { | |
d8ed45c5 | 505 | if (mmap_write_lock_killable(mm)) |
9fbeb5ab | 506 | return -EINTR; |
45e55300 PC |
507 | ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate, |
508 | &uf); | |
d8ed45c5 | 509 | mmap_write_unlock(mm); |
897ab3e0 | 510 | userfaultfd_unmap_complete(mm, &uf); |
41badc15 ML |
511 | if (populate) |
512 | mm_populate(ret, populate); | |
eb36c587 AV |
513 | } |
514 | return ret; | |
515 | } | |
516 | ||
517 | unsigned long vm_mmap(struct file *file, unsigned long addr, | |
518 | unsigned long len, unsigned long prot, | |
519 | unsigned long flag, unsigned long offset) | |
520 | { | |
521 | if (unlikely(offset + PAGE_ALIGN(len) < offset)) | |
522 | return -EINVAL; | |
ea53cde0 | 523 | if (unlikely(offset_in_page(offset))) |
eb36c587 AV |
524 | return -EINVAL; |
525 | ||
9fbeb5ab | 526 | return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); |
eb36c587 AV |
527 | } |
528 | EXPORT_SYMBOL(vm_mmap); | |
529 | ||
a7c3e901 MH |
530 | /** |
531 | * kvmalloc_node - attempt to allocate physically contiguous memory, but upon | |
532 | * failure, fall back to non-contiguous (vmalloc) allocation. | |
533 | * @size: size of the request. | |
534 | * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. | |
535 | * @node: numa node to allocate from | |
536 | * | |
537 | * Uses kmalloc to get the memory but if the allocation fails then falls back | |
538 | * to the vmalloc allocator. Use kvfree for freeing the memory. | |
539 | * | |
cc965a29 MH |
540 | * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. |
541 | * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is | |
542 | * preferable to the vmalloc fallback, due to visible performance drawbacks. | |
a7c3e901 | 543 | * |
ce91f6ee MH |
544 | * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not |
545 | * fall back to vmalloc. | |
a862f68a MR |
546 | * |
547 | * Return: pointer to the allocated memory of %NULL in case of failure | |
a7c3e901 MH |
548 | */ |
549 | void *kvmalloc_node(size_t size, gfp_t flags, int node) | |
550 | { | |
551 | gfp_t kmalloc_flags = flags; | |
552 | void *ret; | |
553 | ||
554 | /* | |
555 | * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) | |
556 | * so the given set of flags has to be compatible. | |
557 | */ | |
ce91f6ee MH |
558 | if ((flags & GFP_KERNEL) != GFP_KERNEL) |
559 | return kmalloc_node(size, flags, node); | |
a7c3e901 MH |
560 | |
561 | /* | |
4f4f2ba9 MH |
562 | * We want to attempt a large physically contiguous block first because |
563 | * it is less likely to fragment multiple larger blocks and therefore | |
564 | * contribute to a long term fragmentation less than vmalloc fallback. | |
565 | * However make sure that larger requests are not too disruptive - no | |
566 | * OOM killer and no allocation failure warnings as we have a fallback. | |
a7c3e901 | 567 | */ |
6c5ab651 MH |
568 | if (size > PAGE_SIZE) { |
569 | kmalloc_flags |= __GFP_NOWARN; | |
570 | ||
cc965a29 | 571 | if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) |
6c5ab651 MH |
572 | kmalloc_flags |= __GFP_NORETRY; |
573 | } | |
a7c3e901 MH |
574 | |
575 | ret = kmalloc_node(size, kmalloc_flags, node); | |
576 | ||
577 | /* | |
578 | * It doesn't really make sense to fallback to vmalloc for sub page | |
579 | * requests | |
580 | */ | |
581 | if (ret || size <= PAGE_SIZE) | |
582 | return ret; | |
583 | ||
2b905948 | 584 | return __vmalloc_node(size, 1, flags, node, |
8594a21c | 585 | __builtin_return_address(0)); |
a7c3e901 MH |
586 | } |
587 | EXPORT_SYMBOL(kvmalloc_node); | |
588 | ||
ff4dc772 | 589 | /** |
04b8e946 AM |
590 | * kvfree() - Free memory. |
591 | * @addr: Pointer to allocated memory. | |
ff4dc772 | 592 | * |
04b8e946 AM |
593 | * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). |
594 | * It is slightly more efficient to use kfree() or vfree() if you are certain | |
595 | * that you know which one to use. | |
596 | * | |
52414d33 | 597 | * Context: Either preemptible task context or not-NMI interrupt. |
ff4dc772 | 598 | */ |
39f1f78d AV |
599 | void kvfree(const void *addr) |
600 | { | |
601 | if (is_vmalloc_addr(addr)) | |
602 | vfree(addr); | |
603 | else | |
604 | kfree(addr); | |
605 | } | |
606 | EXPORT_SYMBOL(kvfree); | |
607 | ||
d4eaa283 WL |
608 | /** |
609 | * kvfree_sensitive - Free a data object containing sensitive information. | |
610 | * @addr: address of the data object to be freed. | |
611 | * @len: length of the data object. | |
612 | * | |
613 | * Use the special memzero_explicit() function to clear the content of a | |
614 | * kvmalloc'ed object containing sensitive data to make sure that the | |
615 | * compiler won't optimize out the data clearing. | |
616 | */ | |
617 | void kvfree_sensitive(const void *addr, size_t len) | |
618 | { | |
619 | if (likely(!ZERO_OR_NULL_PTR(addr))) { | |
620 | memzero_explicit((void *)addr, len); | |
621 | kvfree(addr); | |
622 | } | |
623 | } | |
624 | EXPORT_SYMBOL(kvfree_sensitive); | |
625 | ||
e39155ea KS |
626 | static inline void *__page_rmapping(struct page *page) |
627 | { | |
628 | unsigned long mapping; | |
629 | ||
630 | mapping = (unsigned long)page->mapping; | |
631 | mapping &= ~PAGE_MAPPING_FLAGS; | |
632 | ||
633 | return (void *)mapping; | |
634 | } | |
635 | ||
636 | /* Neutral page->mapping pointer to address_space or anon_vma or other */ | |
637 | void *page_rmapping(struct page *page) | |
638 | { | |
639 | page = compound_head(page); | |
640 | return __page_rmapping(page); | |
641 | } | |
642 | ||
1aa8aea5 AM |
643 | /* |
644 | * Return true if this page is mapped into pagetables. | |
645 | * For compound page it returns true if any subpage of compound page is mapped. | |
646 | */ | |
647 | bool page_mapped(struct page *page) | |
648 | { | |
649 | int i; | |
650 | ||
651 | if (likely(!PageCompound(page))) | |
652 | return atomic_read(&page->_mapcount) >= 0; | |
653 | page = compound_head(page); | |
654 | if (atomic_read(compound_mapcount_ptr(page)) >= 0) | |
655 | return true; | |
656 | if (PageHuge(page)) | |
657 | return false; | |
d8c6546b | 658 | for (i = 0; i < compound_nr(page); i++) { |
1aa8aea5 AM |
659 | if (atomic_read(&page[i]._mapcount) >= 0) |
660 | return true; | |
661 | } | |
662 | return false; | |
663 | } | |
664 | EXPORT_SYMBOL(page_mapped); | |
665 | ||
e39155ea KS |
666 | struct anon_vma *page_anon_vma(struct page *page) |
667 | { | |
668 | unsigned long mapping; | |
669 | ||
670 | page = compound_head(page); | |
671 | mapping = (unsigned long)page->mapping; | |
672 | if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) | |
673 | return NULL; | |
674 | return __page_rmapping(page); | |
675 | } | |
676 | ||
9800339b SL |
677 | struct address_space *page_mapping(struct page *page) |
678 | { | |
1c290f64 KS |
679 | struct address_space *mapping; |
680 | ||
681 | page = compound_head(page); | |
9800339b | 682 | |
03e5ac2f MP |
683 | /* This happens if someone calls flush_dcache_page on slab page */ |
684 | if (unlikely(PageSlab(page))) | |
685 | return NULL; | |
686 | ||
33806f06 SL |
687 | if (unlikely(PageSwapCache(page))) { |
688 | swp_entry_t entry; | |
689 | ||
690 | entry.val = page_private(page); | |
e39155ea KS |
691 | return swap_address_space(entry); |
692 | } | |
693 | ||
1c290f64 | 694 | mapping = page->mapping; |
bda807d4 | 695 | if ((unsigned long)mapping & PAGE_MAPPING_ANON) |
e39155ea | 696 | return NULL; |
bda807d4 MK |
697 | |
698 | return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); | |
9800339b | 699 | } |
bda807d4 | 700 | EXPORT_SYMBOL(page_mapping); |
9800339b | 701 | |
cb9f753a YH |
702 | /* |
703 | * For file cache pages, return the address_space, otherwise return NULL | |
704 | */ | |
705 | struct address_space *page_mapping_file(struct page *page) | |
706 | { | |
707 | if (unlikely(PageSwapCache(page))) | |
708 | return NULL; | |
709 | return page_mapping(page); | |
710 | } | |
711 | ||
b20ce5e0 KS |
712 | /* Slow path of page_mapcount() for compound pages */ |
713 | int __page_mapcount(struct page *page) | |
714 | { | |
715 | int ret; | |
716 | ||
717 | ret = atomic_read(&page->_mapcount) + 1; | |
dd78fedd KS |
718 | /* |
719 | * For file THP page->_mapcount contains total number of mapping | |
720 | * of the page: no need to look into compound_mapcount. | |
721 | */ | |
722 | if (!PageAnon(page) && !PageHuge(page)) | |
723 | return ret; | |
b20ce5e0 KS |
724 | page = compound_head(page); |
725 | ret += atomic_read(compound_mapcount_ptr(page)) + 1; | |
726 | if (PageDoubleMap(page)) | |
727 | ret--; | |
728 | return ret; | |
729 | } | |
730 | EXPORT_SYMBOL_GPL(__page_mapcount); | |
731 | ||
39a1aa8e AR |
732 | int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; |
733 | int sysctl_overcommit_ratio __read_mostly = 50; | |
734 | unsigned long sysctl_overcommit_kbytes __read_mostly; | |
735 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; | |
736 | unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ | |
737 | unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ | |
738 | ||
32927393 CH |
739 | int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, |
740 | size_t *lenp, loff_t *ppos) | |
49f0ce5f JM |
741 | { |
742 | int ret; | |
743 | ||
744 | ret = proc_dointvec(table, write, buffer, lenp, ppos); | |
745 | if (ret == 0 && write) | |
746 | sysctl_overcommit_kbytes = 0; | |
747 | return ret; | |
748 | } | |
749 | ||
56f3547b FT |
750 | static void sync_overcommit_as(struct work_struct *dummy) |
751 | { | |
752 | percpu_counter_sync(&vm_committed_as); | |
753 | } | |
754 | ||
755 | int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, | |
756 | size_t *lenp, loff_t *ppos) | |
757 | { | |
758 | struct ctl_table t; | |
759 | int new_policy; | |
760 | int ret; | |
761 | ||
762 | /* | |
763 | * The deviation of sync_overcommit_as could be big with loose policy | |
764 | * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to | |
765 | * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply | |
766 | * with the strict "NEVER", and to avoid possible race condtion (even | |
767 | * though user usually won't too frequently do the switching to policy | |
768 | * OVERCOMMIT_NEVER), the switch is done in the following order: | |
769 | * 1. changing the batch | |
770 | * 2. sync percpu count on each CPU | |
771 | * 3. switch the policy | |
772 | */ | |
773 | if (write) { | |
774 | t = *table; | |
775 | t.data = &new_policy; | |
776 | ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); | |
777 | if (ret) | |
778 | return ret; | |
779 | ||
780 | mm_compute_batch(new_policy); | |
781 | if (new_policy == OVERCOMMIT_NEVER) | |
782 | schedule_on_each_cpu(sync_overcommit_as); | |
783 | sysctl_overcommit_memory = new_policy; | |
784 | } else { | |
785 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | |
786 | } | |
787 | ||
788 | return ret; | |
789 | } | |
790 | ||
32927393 CH |
791 | int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, |
792 | size_t *lenp, loff_t *ppos) | |
49f0ce5f JM |
793 | { |
794 | int ret; | |
795 | ||
796 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | |
797 | if (ret == 0 && write) | |
798 | sysctl_overcommit_ratio = 0; | |
799 | return ret; | |
800 | } | |
801 | ||
00619bcc JM |
802 | /* |
803 | * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used | |
804 | */ | |
805 | unsigned long vm_commit_limit(void) | |
806 | { | |
49f0ce5f JM |
807 | unsigned long allowed; |
808 | ||
809 | if (sysctl_overcommit_kbytes) | |
810 | allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); | |
811 | else | |
ca79b0c2 | 812 | allowed = ((totalram_pages() - hugetlb_total_pages()) |
49f0ce5f JM |
813 | * sysctl_overcommit_ratio / 100); |
814 | allowed += total_swap_pages; | |
815 | ||
816 | return allowed; | |
00619bcc JM |
817 | } |
818 | ||
39a1aa8e AR |
819 | /* |
820 | * Make sure vm_committed_as in one cacheline and not cacheline shared with | |
821 | * other variables. It can be updated by several CPUs frequently. | |
822 | */ | |
823 | struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; | |
824 | ||
825 | /* | |
826 | * The global memory commitment made in the system can be a metric | |
827 | * that can be used to drive ballooning decisions when Linux is hosted | |
828 | * as a guest. On Hyper-V, the host implements a policy engine for dynamically | |
829 | * balancing memory across competing virtual machines that are hosted. | |
830 | * Several metrics drive this policy engine including the guest reported | |
831 | * memory commitment. | |
4e2ee51e FT |
832 | * |
833 | * The time cost of this is very low for small platforms, and for big | |
834 | * platform like a 2S/36C/72T Skylake server, in worst case where | |
835 | * vm_committed_as's spinlock is under severe contention, the time cost | |
836 | * could be about 30~40 microseconds. | |
39a1aa8e AR |
837 | */ |
838 | unsigned long vm_memory_committed(void) | |
839 | { | |
4e2ee51e | 840 | return percpu_counter_sum_positive(&vm_committed_as); |
39a1aa8e AR |
841 | } |
842 | EXPORT_SYMBOL_GPL(vm_memory_committed); | |
843 | ||
844 | /* | |
845 | * Check that a process has enough memory to allocate a new virtual | |
846 | * mapping. 0 means there is enough memory for the allocation to | |
847 | * succeed and -ENOMEM implies there is not. | |
848 | * | |
849 | * We currently support three overcommit policies, which are set via the | |
ad56b738 | 850 | * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst |
39a1aa8e AR |
851 | * |
852 | * Strict overcommit modes added 2002 Feb 26 by Alan Cox. | |
853 | * Additional code 2002 Jul 20 by Robert Love. | |
854 | * | |
855 | * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. | |
856 | * | |
857 | * Note this is a helper function intended to be used by LSMs which | |
858 | * wish to use this logic. | |
859 | */ | |
860 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | |
861 | { | |
8c7829b0 | 862 | long allowed; |
39a1aa8e | 863 | |
39a1aa8e AR |
864 | vm_acct_memory(pages); |
865 | ||
866 | /* | |
867 | * Sometimes we want to use more memory than we have | |
868 | */ | |
869 | if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) | |
870 | return 0; | |
871 | ||
872 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { | |
8c7829b0 | 873 | if (pages > totalram_pages() + total_swap_pages) |
39a1aa8e | 874 | goto error; |
8c7829b0 | 875 | return 0; |
39a1aa8e AR |
876 | } |
877 | ||
878 | allowed = vm_commit_limit(); | |
879 | /* | |
880 | * Reserve some for root | |
881 | */ | |
882 | if (!cap_sys_admin) | |
883 | allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); | |
884 | ||
885 | /* | |
886 | * Don't let a single process grow so big a user can't recover | |
887 | */ | |
888 | if (mm) { | |
8c7829b0 JW |
889 | long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); |
890 | ||
39a1aa8e AR |
891 | allowed -= min_t(long, mm->total_vm / 32, reserve); |
892 | } | |
893 | ||
894 | if (percpu_counter_read_positive(&vm_committed_as) < allowed) | |
895 | return 0; | |
896 | error: | |
897 | vm_unacct_memory(pages); | |
898 | ||
899 | return -ENOMEM; | |
900 | } | |
901 | ||
a9090253 WR |
902 | /** |
903 | * get_cmdline() - copy the cmdline value to a buffer. | |
904 | * @task: the task whose cmdline value to copy. | |
905 | * @buffer: the buffer to copy to. | |
906 | * @buflen: the length of the buffer. Larger cmdline values are truncated | |
907 | * to this length. | |
a862f68a MR |
908 | * |
909 | * Return: the size of the cmdline field copied. Note that the copy does | |
a9090253 WR |
910 | * not guarantee an ending NULL byte. |
911 | */ | |
912 | int get_cmdline(struct task_struct *task, char *buffer, int buflen) | |
913 | { | |
914 | int res = 0; | |
915 | unsigned int len; | |
916 | struct mm_struct *mm = get_task_mm(task); | |
a3b609ef | 917 | unsigned long arg_start, arg_end, env_start, env_end; |
a9090253 WR |
918 | if (!mm) |
919 | goto out; | |
920 | if (!mm->arg_end) | |
921 | goto out_mm; /* Shh! No looking before we're done */ | |
922 | ||
bc81426f | 923 | spin_lock(&mm->arg_lock); |
a3b609ef MG |
924 | arg_start = mm->arg_start; |
925 | arg_end = mm->arg_end; | |
926 | env_start = mm->env_start; | |
927 | env_end = mm->env_end; | |
bc81426f | 928 | spin_unlock(&mm->arg_lock); |
a3b609ef MG |
929 | |
930 | len = arg_end - arg_start; | |
a9090253 WR |
931 | |
932 | if (len > buflen) | |
933 | len = buflen; | |
934 | ||
f307ab6d | 935 | res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); |
a9090253 WR |
936 | |
937 | /* | |
938 | * If the nul at the end of args has been overwritten, then | |
939 | * assume application is using setproctitle(3). | |
940 | */ | |
941 | if (res > 0 && buffer[res-1] != '\0' && len < buflen) { | |
942 | len = strnlen(buffer, res); | |
943 | if (len < res) { | |
944 | res = len; | |
945 | } else { | |
a3b609ef | 946 | len = env_end - env_start; |
a9090253 WR |
947 | if (len > buflen - res) |
948 | len = buflen - res; | |
a3b609ef | 949 | res += access_process_vm(task, env_start, |
f307ab6d LS |
950 | buffer+res, len, |
951 | FOLL_FORCE); | |
a9090253 WR |
952 | res = strnlen(buffer, res); |
953 | } | |
954 | } | |
955 | out_mm: | |
956 | mmput(mm); | |
957 | out: | |
958 | return res; | |
959 | } | |
010c164a | 960 | |
4d1a8a2d | 961 | int __weak memcmp_pages(struct page *page1, struct page *page2) |
010c164a SL |
962 | { |
963 | char *addr1, *addr2; | |
964 | int ret; | |
965 | ||
966 | addr1 = kmap_atomic(page1); | |
967 | addr2 = kmap_atomic(page2); | |
968 | ret = memcmp(addr1, addr2, PAGE_SIZE); | |
969 | kunmap_atomic(addr2); | |
970 | kunmap_atomic(addr1); | |
971 | return ret; | |
972 | } |