]> Git Repo - linux.git/blame - mm/util.c
vmcore: convert copy_oldmem_page() to take an iov_iter
[linux.git] / mm / util.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
16d69265 2#include <linux/mm.h>
30992c97
MM
3#include <linux/slab.h>
4#include <linux/string.h>
3b32123d 5#include <linux/compiler.h>
b95f1b31 6#include <linux/export.h>
96840aa0 7#include <linux/err.h>
3b8f14b4 8#include <linux/sched.h>
6e84f315 9#include <linux/sched/mm.h>
79eb597c 10#include <linux/sched/signal.h>
68db0cf1 11#include <linux/sched/task_stack.h>
eb36c587 12#include <linux/security.h>
9800339b 13#include <linux/swap.h>
33806f06 14#include <linux/swapops.h>
00619bcc
JM
15#include <linux/mman.h>
16#include <linux/hugetlb.h>
39f1f78d 17#include <linux/vmalloc.h>
897ab3e0 18#include <linux/userfaultfd_k.h>
649775be 19#include <linux/elf.h>
67f3977f
AG
20#include <linux/elf-randomize.h>
21#include <linux/personality.h>
649775be 22#include <linux/random.h>
67f3977f
AG
23#include <linux/processor.h>
24#include <linux/sizes.h>
25#include <linux/compat.h>
00619bcc 26
7c0f6ba6 27#include <linux/uaccess.h>
30992c97 28
6038def0
NK
29#include "internal.h"
30
a4bb1e43
AH
31/**
32 * kfree_const - conditionally free memory
33 * @x: pointer to the memory
34 *
35 * Function calls kfree only if @x is not in .rodata section.
36 */
37void kfree_const(const void *x)
38{
39 if (!is_kernel_rodata((unsigned long)x))
40 kfree(x);
41}
42EXPORT_SYMBOL(kfree_const);
43
30992c97 44/**
30992c97 45 * kstrdup - allocate space for and copy an existing string
30992c97
MM
46 * @s: the string to duplicate
47 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
a862f68a
MR
48 *
49 * Return: newly allocated copy of @s or %NULL in case of error
30992c97
MM
50 */
51char *kstrdup(const char *s, gfp_t gfp)
52{
53 size_t len;
54 char *buf;
55
56 if (!s)
57 return NULL;
58
59 len = strlen(s) + 1;
1d2c8eea 60 buf = kmalloc_track_caller(len, gfp);
30992c97
MM
61 if (buf)
62 memcpy(buf, s, len);
63 return buf;
64}
65EXPORT_SYMBOL(kstrdup);
96840aa0 66
a4bb1e43
AH
67/**
68 * kstrdup_const - conditionally duplicate an existing const string
69 * @s: the string to duplicate
70 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
71 *
295a1730
BG
72 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
73 * must not be passed to krealloc().
a862f68a
MR
74 *
75 * Return: source string if it is in .rodata section otherwise
76 * fallback to kstrdup.
a4bb1e43
AH
77 */
78const char *kstrdup_const(const char *s, gfp_t gfp)
79{
80 if (is_kernel_rodata((unsigned long)s))
81 return s;
82
83 return kstrdup(s, gfp);
84}
85EXPORT_SYMBOL(kstrdup_const);
86
1e66df3e
JF
87/**
88 * kstrndup - allocate space for and copy an existing string
89 * @s: the string to duplicate
90 * @max: read at most @max chars from @s
91 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
f3515741
DH
92 *
93 * Note: Use kmemdup_nul() instead if the size is known exactly.
a862f68a
MR
94 *
95 * Return: newly allocated copy of @s or %NULL in case of error
1e66df3e
JF
96 */
97char *kstrndup(const char *s, size_t max, gfp_t gfp)
98{
99 size_t len;
100 char *buf;
101
102 if (!s)
103 return NULL;
104
105 len = strnlen(s, max);
106 buf = kmalloc_track_caller(len+1, gfp);
107 if (buf) {
108 memcpy(buf, s, len);
109 buf[len] = '\0';
110 }
111 return buf;
112}
113EXPORT_SYMBOL(kstrndup);
114
1a2f67b4
AD
115/**
116 * kmemdup - duplicate region of memory
117 *
118 * @src: memory region to duplicate
119 * @len: memory region length
120 * @gfp: GFP mask to use
a862f68a
MR
121 *
122 * Return: newly allocated copy of @src or %NULL in case of error
1a2f67b4
AD
123 */
124void *kmemdup(const void *src, size_t len, gfp_t gfp)
125{
126 void *p;
127
1d2c8eea 128 p = kmalloc_track_caller(len, gfp);
1a2f67b4
AD
129 if (p)
130 memcpy(p, src, len);
131 return p;
132}
133EXPORT_SYMBOL(kmemdup);
134
f3515741
DH
135/**
136 * kmemdup_nul - Create a NUL-terminated string from unterminated data
137 * @s: The data to stringify
138 * @len: The size of the data
139 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
a862f68a
MR
140 *
141 * Return: newly allocated copy of @s with NUL-termination or %NULL in
142 * case of error
f3515741
DH
143 */
144char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
145{
146 char *buf;
147
148 if (!s)
149 return NULL;
150
151 buf = kmalloc_track_caller(len + 1, gfp);
152 if (buf) {
153 memcpy(buf, s, len);
154 buf[len] = '\0';
155 }
156 return buf;
157}
158EXPORT_SYMBOL(kmemdup_nul);
159
610a77e0
LZ
160/**
161 * memdup_user - duplicate memory region from user space
162 *
163 * @src: source address in user space
164 * @len: number of bytes to copy
165 *
a862f68a 166 * Return: an ERR_PTR() on failure. Result is physically
50fd2f29 167 * contiguous, to be freed by kfree().
610a77e0
LZ
168 */
169void *memdup_user(const void __user *src, size_t len)
170{
171 void *p;
172
6c8fcc09 173 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
610a77e0
LZ
174 if (!p)
175 return ERR_PTR(-ENOMEM);
176
177 if (copy_from_user(p, src, len)) {
178 kfree(p);
179 return ERR_PTR(-EFAULT);
180 }
181
182 return p;
183}
184EXPORT_SYMBOL(memdup_user);
185
50fd2f29
AV
186/**
187 * vmemdup_user - duplicate memory region from user space
188 *
189 * @src: source address in user space
190 * @len: number of bytes to copy
191 *
a862f68a 192 * Return: an ERR_PTR() on failure. Result may be not
50fd2f29
AV
193 * physically contiguous. Use kvfree() to free.
194 */
195void *vmemdup_user(const void __user *src, size_t len)
196{
197 void *p;
198
199 p = kvmalloc(len, GFP_USER);
200 if (!p)
201 return ERR_PTR(-ENOMEM);
202
203 if (copy_from_user(p, src, len)) {
204 kvfree(p);
205 return ERR_PTR(-EFAULT);
206 }
207
208 return p;
209}
210EXPORT_SYMBOL(vmemdup_user);
211
b86181f1 212/**
96840aa0 213 * strndup_user - duplicate an existing string from user space
96840aa0
DA
214 * @s: The string to duplicate
215 * @n: Maximum number of bytes to copy, including the trailing NUL.
a862f68a 216 *
e9145521 217 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
96840aa0
DA
218 */
219char *strndup_user(const char __user *s, long n)
220{
221 char *p;
222 long length;
223
224 length = strnlen_user(s, n);
225
226 if (!length)
227 return ERR_PTR(-EFAULT);
228
229 if (length > n)
230 return ERR_PTR(-EINVAL);
231
90d74045 232 p = memdup_user(s, length);
96840aa0 233
90d74045
JL
234 if (IS_ERR(p))
235 return p;
96840aa0
DA
236
237 p[length - 1] = '\0';
238
239 return p;
240}
241EXPORT_SYMBOL(strndup_user);
16d69265 242
e9d408e1
AV
243/**
244 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
245 *
246 * @src: source address in user space
247 * @len: number of bytes to copy
248 *
a862f68a 249 * Return: an ERR_PTR() on failure.
e9d408e1
AV
250 */
251void *memdup_user_nul(const void __user *src, size_t len)
252{
253 char *p;
254
255 /*
256 * Always use GFP_KERNEL, since copy_from_user() can sleep and
257 * cause pagefault, which makes it pointless to use GFP_NOFS
258 * or GFP_ATOMIC.
259 */
260 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
261 if (!p)
262 return ERR_PTR(-ENOMEM);
263
264 if (copy_from_user(p, src, len)) {
265 kfree(p);
266 return ERR_PTR(-EFAULT);
267 }
268 p[len] = '\0';
269
270 return p;
271}
272EXPORT_SYMBOL(memdup_user_nul);
273
6038def0 274void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
aba6dfb7 275 struct vm_area_struct *prev)
6038def0
NK
276{
277 struct vm_area_struct *next;
278
279 vma->vm_prev = prev;
280 if (prev) {
281 next = prev->vm_next;
282 prev->vm_next = vma;
283 } else {
aba6dfb7 284 next = mm->mmap;
6038def0 285 mm->mmap = vma;
6038def0
NK
286 }
287 vma->vm_next = next;
288 if (next)
289 next->vm_prev = vma;
290}
291
1b9fc5b2
WY
292void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
293{
294 struct vm_area_struct *prev, *next;
295
296 next = vma->vm_next;
297 prev = vma->vm_prev;
298 if (prev)
299 prev->vm_next = next;
300 else
301 mm->mmap = next;
302 if (next)
303 next->vm_prev = prev;
304}
305
b7643757 306/* Check if the vma is being used as a stack by this task */
d17af505 307int vma_is_stack_for_current(struct vm_area_struct *vma)
b7643757 308{
d17af505
AL
309 struct task_struct * __maybe_unused t = current;
310
b7643757
SP
311 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
312}
313
295992fb
CK
314/*
315 * Change backing file, only valid to use during initial VMA setup.
316 */
317void vma_set_file(struct vm_area_struct *vma, struct file *file)
318{
319 /* Changing an anonymous vma with this is illegal */
320 get_file(file);
321 swap(vma->vm_file, file);
322 fput(file);
323}
324EXPORT_SYMBOL(vma_set_file);
325
649775be
AG
326#ifndef STACK_RND_MASK
327#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
328#endif
329
330unsigned long randomize_stack_top(unsigned long stack_top)
331{
332 unsigned long random_variable = 0;
333
334 if (current->flags & PF_RANDOMIZE) {
335 random_variable = get_random_long();
336 random_variable &= STACK_RND_MASK;
337 random_variable <<= PAGE_SHIFT;
338 }
339#ifdef CONFIG_STACK_GROWSUP
340 return PAGE_ALIGN(stack_top) + random_variable;
341#else
342 return PAGE_ALIGN(stack_top) - random_variable;
343#endif
344}
345
67f3977f 346#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
e7142bf5
AG
347unsigned long arch_randomize_brk(struct mm_struct *mm)
348{
349 /* Is the current task 32bit ? */
350 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
351 return randomize_page(mm->brk, SZ_32M);
352
353 return randomize_page(mm->brk, SZ_1G);
354}
355
67f3977f
AG
356unsigned long arch_mmap_rnd(void)
357{
358 unsigned long rnd;
359
360#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
361 if (is_compat_task())
362 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
363 else
364#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
365 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
366
367 return rnd << PAGE_SHIFT;
368}
67f3977f
AG
369
370static int mmap_is_legacy(struct rlimit *rlim_stack)
371{
372 if (current->personality & ADDR_COMPAT_LAYOUT)
373 return 1;
374
375 if (rlim_stack->rlim_cur == RLIM_INFINITY)
376 return 1;
377
378 return sysctl_legacy_va_layout;
379}
380
381/*
382 * Leave enough space between the mmap area and the stack to honour ulimit in
383 * the face of randomisation.
384 */
385#define MIN_GAP (SZ_128M)
386#define MAX_GAP (STACK_TOP / 6 * 5)
387
388static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
389{
390 unsigned long gap = rlim_stack->rlim_cur;
391 unsigned long pad = stack_guard_gap;
392
393 /* Account for stack randomization if necessary */
394 if (current->flags & PF_RANDOMIZE)
395 pad += (STACK_RND_MASK << PAGE_SHIFT);
396
397 /* Values close to RLIM_INFINITY can overflow. */
398 if (gap + pad > gap)
399 gap += pad;
400
401 if (gap < MIN_GAP)
402 gap = MIN_GAP;
403 else if (gap > MAX_GAP)
404 gap = MAX_GAP;
405
406 return PAGE_ALIGN(STACK_TOP - gap - rnd);
407}
408
409void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
410{
411 unsigned long random_factor = 0UL;
412
413 if (current->flags & PF_RANDOMIZE)
414 random_factor = arch_mmap_rnd();
415
416 if (mmap_is_legacy(rlim_stack)) {
417 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
418 mm->get_unmapped_area = arch_get_unmapped_area;
419 } else {
420 mm->mmap_base = mmap_base(random_factor, rlim_stack);
421 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
422 }
423}
424#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
8f2af155 425void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
16d69265
AM
426{
427 mm->mmap_base = TASK_UNMAPPED_BASE;
428 mm->get_unmapped_area = arch_get_unmapped_area;
16d69265
AM
429}
430#endif
912985dc 431
79eb597c
DJ
432/**
433 * __account_locked_vm - account locked pages to an mm's locked_vm
434 * @mm: mm to account against
435 * @pages: number of pages to account
436 * @inc: %true if @pages should be considered positive, %false if not
437 * @task: task used to check RLIMIT_MEMLOCK
438 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
439 *
440 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
c1e8d7c6 441 * that mmap_lock is held as writer.
79eb597c
DJ
442 *
443 * Return:
444 * * 0 on success
445 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
446 */
447int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
448 struct task_struct *task, bool bypass_rlim)
449{
450 unsigned long locked_vm, limit;
451 int ret = 0;
452
42fc5414 453 mmap_assert_write_locked(mm);
79eb597c
DJ
454
455 locked_vm = mm->locked_vm;
456 if (inc) {
457 if (!bypass_rlim) {
458 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
459 if (locked_vm + pages > limit)
460 ret = -ENOMEM;
461 }
462 if (!ret)
463 mm->locked_vm = locked_vm + pages;
464 } else {
465 WARN_ON_ONCE(pages > locked_vm);
466 mm->locked_vm = locked_vm - pages;
467 }
468
469 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
470 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
471 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
472 ret ? " - exceeded" : "");
473
474 return ret;
475}
476EXPORT_SYMBOL_GPL(__account_locked_vm);
477
478/**
479 * account_locked_vm - account locked pages to an mm's locked_vm
480 * @mm: mm to account against, may be NULL
481 * @pages: number of pages to account
482 * @inc: %true if @pages should be considered positive, %false if not
483 *
484 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
485 *
486 * Return:
487 * * 0 on success, or if mm is NULL
488 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
489 */
490int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
491{
492 int ret;
493
494 if (pages == 0 || !mm)
495 return 0;
496
d8ed45c5 497 mmap_write_lock(mm);
79eb597c
DJ
498 ret = __account_locked_vm(mm, pages, inc, current,
499 capable(CAP_IPC_LOCK));
d8ed45c5 500 mmap_write_unlock(mm);
79eb597c
DJ
501
502 return ret;
503}
504EXPORT_SYMBOL_GPL(account_locked_vm);
505
eb36c587
AV
506unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
507 unsigned long len, unsigned long prot,
9fbeb5ab 508 unsigned long flag, unsigned long pgoff)
eb36c587
AV
509{
510 unsigned long ret;
511 struct mm_struct *mm = current->mm;
41badc15 512 unsigned long populate;
897ab3e0 513 LIST_HEAD(uf);
eb36c587
AV
514
515 ret = security_mmap_file(file, prot, flag);
516 if (!ret) {
d8ed45c5 517 if (mmap_write_lock_killable(mm))
9fbeb5ab 518 return -EINTR;
45e55300
PC
519 ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
520 &uf);
d8ed45c5 521 mmap_write_unlock(mm);
897ab3e0 522 userfaultfd_unmap_complete(mm, &uf);
41badc15
ML
523 if (populate)
524 mm_populate(ret, populate);
eb36c587
AV
525 }
526 return ret;
527}
528
529unsigned long vm_mmap(struct file *file, unsigned long addr,
530 unsigned long len, unsigned long prot,
531 unsigned long flag, unsigned long offset)
532{
533 if (unlikely(offset + PAGE_ALIGN(len) < offset))
534 return -EINVAL;
ea53cde0 535 if (unlikely(offset_in_page(offset)))
eb36c587
AV
536 return -EINVAL;
537
9fbeb5ab 538 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
eb36c587
AV
539}
540EXPORT_SYMBOL(vm_mmap);
541
a7c3e901
MH
542/**
543 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
544 * failure, fall back to non-contiguous (vmalloc) allocation.
545 * @size: size of the request.
546 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
547 * @node: numa node to allocate from
548 *
549 * Uses kmalloc to get the memory but if the allocation fails then falls back
550 * to the vmalloc allocator. Use kvfree for freeing the memory.
551 *
a421ef30 552 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
cc965a29
MH
553 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
554 * preferable to the vmalloc fallback, due to visible performance drawbacks.
a7c3e901 555 *
a862f68a 556 * Return: pointer to the allocated memory of %NULL in case of failure
a7c3e901
MH
557 */
558void *kvmalloc_node(size_t size, gfp_t flags, int node)
559{
560 gfp_t kmalloc_flags = flags;
561 void *ret;
562
a7c3e901 563 /*
4f4f2ba9
MH
564 * We want to attempt a large physically contiguous block first because
565 * it is less likely to fragment multiple larger blocks and therefore
566 * contribute to a long term fragmentation less than vmalloc fallback.
567 * However make sure that larger requests are not too disruptive - no
568 * OOM killer and no allocation failure warnings as we have a fallback.
a7c3e901 569 */
6c5ab651
MH
570 if (size > PAGE_SIZE) {
571 kmalloc_flags |= __GFP_NOWARN;
572
cc965a29 573 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
6c5ab651 574 kmalloc_flags |= __GFP_NORETRY;
a421ef30
MH
575
576 /* nofail semantic is implemented by the vmalloc fallback */
577 kmalloc_flags &= ~__GFP_NOFAIL;
6c5ab651 578 }
a7c3e901
MH
579
580 ret = kmalloc_node(size, kmalloc_flags, node);
581
582 /*
583 * It doesn't really make sense to fallback to vmalloc for sub page
584 * requests
585 */
586 if (ret || size <= PAGE_SIZE)
587 return ret;
588
7661809d 589 /* Don't even allow crazy sizes */
0708a0af
DB
590 if (unlikely(size > INT_MAX)) {
591 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
7661809d 592 return NULL;
0708a0af 593 }
7661809d 594
9becb688
LT
595 /*
596 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
597 * since the callers already cannot assume anything
598 * about the resulting pointer, and cannot play
599 * protection games.
600 */
601 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
602 flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
603 node, __builtin_return_address(0));
a7c3e901
MH
604}
605EXPORT_SYMBOL(kvmalloc_node);
606
ff4dc772 607/**
04b8e946
AM
608 * kvfree() - Free memory.
609 * @addr: Pointer to allocated memory.
ff4dc772 610 *
04b8e946
AM
611 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
612 * It is slightly more efficient to use kfree() or vfree() if you are certain
613 * that you know which one to use.
614 *
52414d33 615 * Context: Either preemptible task context or not-NMI interrupt.
ff4dc772 616 */
39f1f78d
AV
617void kvfree(const void *addr)
618{
619 if (is_vmalloc_addr(addr))
620 vfree(addr);
621 else
622 kfree(addr);
623}
624EXPORT_SYMBOL(kvfree);
625
d4eaa283
WL
626/**
627 * kvfree_sensitive - Free a data object containing sensitive information.
628 * @addr: address of the data object to be freed.
629 * @len: length of the data object.
630 *
631 * Use the special memzero_explicit() function to clear the content of a
632 * kvmalloc'ed object containing sensitive data to make sure that the
633 * compiler won't optimize out the data clearing.
634 */
635void kvfree_sensitive(const void *addr, size_t len)
636{
637 if (likely(!ZERO_OR_NULL_PTR(addr))) {
638 memzero_explicit((void *)addr, len);
639 kvfree(addr);
640 }
641}
642EXPORT_SYMBOL(kvfree_sensitive);
643
de2860f4
DC
644void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
645{
646 void *newp;
647
648 if (oldsize >= newsize)
649 return (void *)p;
650 newp = kvmalloc(newsize, flags);
651 if (!newp)
652 return NULL;
653 memcpy(newp, p, oldsize);
654 kvfree(p);
655 return newp;
656}
657EXPORT_SYMBOL(kvrealloc);
658
a8749a35
PB
659/**
660 * __vmalloc_array - allocate memory for a virtually contiguous array.
661 * @n: number of elements.
662 * @size: element size.
663 * @flags: the type of memory to allocate (see kmalloc).
664 */
665void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
666{
667 size_t bytes;
668
669 if (unlikely(check_mul_overflow(n, size, &bytes)))
670 return NULL;
671 return __vmalloc(bytes, flags);
672}
673EXPORT_SYMBOL(__vmalloc_array);
674
675/**
676 * vmalloc_array - allocate memory for a virtually contiguous array.
677 * @n: number of elements.
678 * @size: element size.
679 */
680void *vmalloc_array(size_t n, size_t size)
681{
682 return __vmalloc_array(n, size, GFP_KERNEL);
683}
684EXPORT_SYMBOL(vmalloc_array);
685
686/**
687 * __vcalloc - allocate and zero memory for a virtually contiguous array.
688 * @n: number of elements.
689 * @size: element size.
690 * @flags: the type of memory to allocate (see kmalloc).
691 */
692void *__vcalloc(size_t n, size_t size, gfp_t flags)
693{
694 return __vmalloc_array(n, size, flags | __GFP_ZERO);
695}
696EXPORT_SYMBOL(__vcalloc);
697
698/**
699 * vcalloc - allocate and zero memory for a virtually contiguous array.
700 * @n: number of elements.
701 * @size: element size.
702 */
703void *vcalloc(size_t n, size_t size)
704{
705 return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
706}
707EXPORT_SYMBOL(vcalloc);
708
e39155ea
KS
709/* Neutral page->mapping pointer to address_space or anon_vma or other */
710void *page_rmapping(struct page *page)
711{
64601000 712 return folio_raw_mapping(page_folio(page));
e39155ea
KS
713}
714
dd10ab04
MWO
715/**
716 * folio_mapped - Is this folio mapped into userspace?
717 * @folio: The folio.
718 *
719 * Return: True if any page in this folio is referenced by user page tables.
1aa8aea5 720 */
dd10ab04 721bool folio_mapped(struct folio *folio)
1aa8aea5 722{
dd10ab04 723 long i, nr;
1aa8aea5 724
a1efe484 725 if (!folio_test_large(folio))
dd10ab04
MWO
726 return atomic_read(&folio->_mapcount) >= 0;
727 if (atomic_read(folio_mapcount_ptr(folio)) >= 0)
1aa8aea5 728 return true;
dd10ab04 729 if (folio_test_hugetlb(folio))
1aa8aea5 730 return false;
dd10ab04
MWO
731
732 nr = folio_nr_pages(folio);
733 for (i = 0; i < nr; i++) {
734 if (atomic_read(&folio_page(folio, i)->_mapcount) >= 0)
1aa8aea5
AM
735 return true;
736 }
737 return false;
738}
dd10ab04 739EXPORT_SYMBOL(folio_mapped);
1aa8aea5 740
e05b3453 741struct anon_vma *folio_anon_vma(struct folio *folio)
e39155ea 742{
64601000 743 unsigned long mapping = (unsigned long)folio->mapping;
e39155ea 744
e39155ea
KS
745 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
746 return NULL;
64601000 747 return (void *)(mapping - PAGE_MAPPING_ANON);
e39155ea
KS
748}
749
2f52578f
MWO
750/**
751 * folio_mapping - Find the mapping where this folio is stored.
752 * @folio: The folio.
753 *
754 * For folios which are in the page cache, return the mapping that this
755 * page belongs to. Folios in the swap cache return the swap mapping
756 * this page is stored in (which is different from the mapping for the
757 * swap file or swap device where the data is stored).
758 *
759 * You can call this for folios which aren't in the swap cache or page
760 * cache and it will return NULL.
761 */
762struct address_space *folio_mapping(struct folio *folio)
9800339b 763{
1c290f64
KS
764 struct address_space *mapping;
765
03e5ac2f 766 /* This happens if someone calls flush_dcache_page on slab page */
2f52578f 767 if (unlikely(folio_test_slab(folio)))
03e5ac2f
MP
768 return NULL;
769
2f52578f
MWO
770 if (unlikely(folio_test_swapcache(folio)))
771 return swap_address_space(folio_swap_entry(folio));
e39155ea 772
2f52578f 773 mapping = folio->mapping;
bda807d4 774 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
e39155ea 775 return NULL;
bda807d4
MK
776
777 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
9800339b 778}
2f52578f 779EXPORT_SYMBOL(folio_mapping);
9800339b 780
b20ce5e0
KS
781/* Slow path of page_mapcount() for compound pages */
782int __page_mapcount(struct page *page)
783{
784 int ret;
785
786 ret = atomic_read(&page->_mapcount) + 1;
dd78fedd
KS
787 /*
788 * For file THP page->_mapcount contains total number of mapping
789 * of the page: no need to look into compound_mapcount.
790 */
791 if (!PageAnon(page) && !PageHuge(page))
792 return ret;
b20ce5e0
KS
793 page = compound_head(page);
794 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
795 if (PageDoubleMap(page))
796 ret--;
797 return ret;
798}
799EXPORT_SYMBOL_GPL(__page_mapcount);
800
4ba1119c
MWO
801/**
802 * folio_mapcount() - Calculate the number of mappings of this folio.
803 * @folio: The folio.
804 *
805 * A large folio tracks both how many times the entire folio is mapped,
806 * and how many times each individual page in the folio is mapped.
807 * This function calculates the total number of times the folio is
808 * mapped.
809 *
810 * Return: The number of times this folio is mapped.
811 */
812int folio_mapcount(struct folio *folio)
813{
814 int i, compound, nr, ret;
815
816 if (likely(!folio_test_large(folio)))
817 return atomic_read(&folio->_mapcount) + 1;
818
819 compound = folio_entire_mapcount(folio);
820 nr = folio_nr_pages(folio);
821 if (folio_test_hugetlb(folio))
822 return compound;
823 ret = compound;
824 for (i = 0; i < nr; i++)
825 ret += atomic_read(&folio_page(folio, i)->_mapcount) + 1;
826 /* File pages has compound_mapcount included in _mapcount */
827 if (!folio_test_anon(folio))
828 return ret - compound * nr;
829 if (folio_test_double_map(folio))
830 ret -= nr;
831 return ret;
832}
833
715cbfd6
MWO
834/**
835 * folio_copy - Copy the contents of one folio to another.
836 * @dst: Folio to copy to.
837 * @src: Folio to copy from.
838 *
839 * The bytes in the folio represented by @src are copied to @dst.
840 * Assumes the caller has validated that @dst is at least as large as @src.
841 * Can be called in atomic context for order-0 folios, but if the folio is
842 * larger, it may sleep.
843 */
844void folio_copy(struct folio *dst, struct folio *src)
79789db0 845{
715cbfd6
MWO
846 long i = 0;
847 long nr = folio_nr_pages(src);
79789db0 848
715cbfd6
MWO
849 for (;;) {
850 copy_highpage(folio_page(dst, i), folio_page(src, i));
851 if (++i == nr)
852 break;
79789db0 853 cond_resched();
79789db0
MWO
854 }
855}
856
39a1aa8e
AR
857int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
858int sysctl_overcommit_ratio __read_mostly = 50;
859unsigned long sysctl_overcommit_kbytes __read_mostly;
860int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
861unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
862unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
863
32927393
CH
864int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
865 size_t *lenp, loff_t *ppos)
49f0ce5f
JM
866{
867 int ret;
868
869 ret = proc_dointvec(table, write, buffer, lenp, ppos);
870 if (ret == 0 && write)
871 sysctl_overcommit_kbytes = 0;
872 return ret;
873}
874
56f3547b
FT
875static void sync_overcommit_as(struct work_struct *dummy)
876{
877 percpu_counter_sync(&vm_committed_as);
878}
879
880int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
881 size_t *lenp, loff_t *ppos)
882{
883 struct ctl_table t;
bcbda810 884 int new_policy = -1;
56f3547b
FT
885 int ret;
886
887 /*
888 * The deviation of sync_overcommit_as could be big with loose policy
889 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
890 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
31454980 891 * with the strict "NEVER", and to avoid possible race condition (even
56f3547b
FT
892 * though user usually won't too frequently do the switching to policy
893 * OVERCOMMIT_NEVER), the switch is done in the following order:
894 * 1. changing the batch
895 * 2. sync percpu count on each CPU
896 * 3. switch the policy
897 */
898 if (write) {
899 t = *table;
900 t.data = &new_policy;
901 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
bcbda810 902 if (ret || new_policy == -1)
56f3547b
FT
903 return ret;
904
905 mm_compute_batch(new_policy);
906 if (new_policy == OVERCOMMIT_NEVER)
907 schedule_on_each_cpu(sync_overcommit_as);
908 sysctl_overcommit_memory = new_policy;
909 } else {
910 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
911 }
912
913 return ret;
914}
915
32927393
CH
916int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
917 size_t *lenp, loff_t *ppos)
49f0ce5f
JM
918{
919 int ret;
920
921 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
922 if (ret == 0 && write)
923 sysctl_overcommit_ratio = 0;
924 return ret;
925}
926
00619bcc
JM
927/*
928 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
929 */
930unsigned long vm_commit_limit(void)
931{
49f0ce5f
JM
932 unsigned long allowed;
933
934 if (sysctl_overcommit_kbytes)
935 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
936 else
ca79b0c2 937 allowed = ((totalram_pages() - hugetlb_total_pages())
49f0ce5f
JM
938 * sysctl_overcommit_ratio / 100);
939 allowed += total_swap_pages;
940
941 return allowed;
00619bcc
JM
942}
943
39a1aa8e
AR
944/*
945 * Make sure vm_committed_as in one cacheline and not cacheline shared with
946 * other variables. It can be updated by several CPUs frequently.
947 */
948struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
949
950/*
951 * The global memory commitment made in the system can be a metric
952 * that can be used to drive ballooning decisions when Linux is hosted
953 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
954 * balancing memory across competing virtual machines that are hosted.
955 * Several metrics drive this policy engine including the guest reported
956 * memory commitment.
4e2ee51e
FT
957 *
958 * The time cost of this is very low for small platforms, and for big
959 * platform like a 2S/36C/72T Skylake server, in worst case where
960 * vm_committed_as's spinlock is under severe contention, the time cost
961 * could be about 30~40 microseconds.
39a1aa8e
AR
962 */
963unsigned long vm_memory_committed(void)
964{
4e2ee51e 965 return percpu_counter_sum_positive(&vm_committed_as);
39a1aa8e
AR
966}
967EXPORT_SYMBOL_GPL(vm_memory_committed);
968
969/*
970 * Check that a process has enough memory to allocate a new virtual
971 * mapping. 0 means there is enough memory for the allocation to
972 * succeed and -ENOMEM implies there is not.
973 *
974 * We currently support three overcommit policies, which are set via the
ad56b738 975 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
39a1aa8e
AR
976 *
977 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
978 * Additional code 2002 Jul 20 by Robert Love.
979 *
980 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
981 *
982 * Note this is a helper function intended to be used by LSMs which
983 * wish to use this logic.
984 */
985int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
986{
8c7829b0 987 long allowed;
39a1aa8e 988
39a1aa8e
AR
989 vm_acct_memory(pages);
990
991 /*
992 * Sometimes we want to use more memory than we have
993 */
994 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
995 return 0;
996
997 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
8c7829b0 998 if (pages > totalram_pages() + total_swap_pages)
39a1aa8e 999 goto error;
8c7829b0 1000 return 0;
39a1aa8e
AR
1001 }
1002
1003 allowed = vm_commit_limit();
1004 /*
1005 * Reserve some for root
1006 */
1007 if (!cap_sys_admin)
1008 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1009
1010 /*
1011 * Don't let a single process grow so big a user can't recover
1012 */
1013 if (mm) {
8c7829b0
JW
1014 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
1015
39a1aa8e
AR
1016 allowed -= min_t(long, mm->total_vm / 32, reserve);
1017 }
1018
1019 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1020 return 0;
1021error:
1022 vm_unacct_memory(pages);
1023
1024 return -ENOMEM;
1025}
1026
a9090253
WR
1027/**
1028 * get_cmdline() - copy the cmdline value to a buffer.
1029 * @task: the task whose cmdline value to copy.
1030 * @buffer: the buffer to copy to.
1031 * @buflen: the length of the buffer. Larger cmdline values are truncated
1032 * to this length.
a862f68a
MR
1033 *
1034 * Return: the size of the cmdline field copied. Note that the copy does
a9090253
WR
1035 * not guarantee an ending NULL byte.
1036 */
1037int get_cmdline(struct task_struct *task, char *buffer, int buflen)
1038{
1039 int res = 0;
1040 unsigned int len;
1041 struct mm_struct *mm = get_task_mm(task);
a3b609ef 1042 unsigned long arg_start, arg_end, env_start, env_end;
a9090253
WR
1043 if (!mm)
1044 goto out;
1045 if (!mm->arg_end)
1046 goto out_mm; /* Shh! No looking before we're done */
1047
bc81426f 1048 spin_lock(&mm->arg_lock);
a3b609ef
MG
1049 arg_start = mm->arg_start;
1050 arg_end = mm->arg_end;
1051 env_start = mm->env_start;
1052 env_end = mm->env_end;
bc81426f 1053 spin_unlock(&mm->arg_lock);
a3b609ef
MG
1054
1055 len = arg_end - arg_start;
a9090253
WR
1056
1057 if (len > buflen)
1058 len = buflen;
1059
f307ab6d 1060 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
a9090253
WR
1061
1062 /*
1063 * If the nul at the end of args has been overwritten, then
1064 * assume application is using setproctitle(3).
1065 */
1066 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1067 len = strnlen(buffer, res);
1068 if (len < res) {
1069 res = len;
1070 } else {
a3b609ef 1071 len = env_end - env_start;
a9090253
WR
1072 if (len > buflen - res)
1073 len = buflen - res;
a3b609ef 1074 res += access_process_vm(task, env_start,
f307ab6d
LS
1075 buffer+res, len,
1076 FOLL_FORCE);
a9090253
WR
1077 res = strnlen(buffer, res);
1078 }
1079 }
1080out_mm:
1081 mmput(mm);
1082out:
1083 return res;
1084}
010c164a 1085
4d1a8a2d 1086int __weak memcmp_pages(struct page *page1, struct page *page2)
010c164a
SL
1087{
1088 char *addr1, *addr2;
1089 int ret;
1090
1091 addr1 = kmap_atomic(page1);
1092 addr2 = kmap_atomic(page2);
1093 ret = memcmp(addr1, addr2, PAGE_SIZE);
1094 kunmap_atomic(addr2);
1095 kunmap_atomic(addr1);
1096 return ret;
1097}
8e7f37f2 1098
5bb1bb35 1099#ifdef CONFIG_PRINTK
8e7f37f2
PM
1100/**
1101 * mem_dump_obj - Print available provenance information
1102 * @object: object for which to find provenance information.
1103 *
1104 * This function uses pr_cont(), so that the caller is expected to have
1105 * printed out whatever preamble is appropriate. The provenance information
1106 * depends on the type of object and on how much debugging is enabled.
1107 * For example, for a slab-cache object, the slab name is printed, and,
1108 * if available, the return address and stack trace from the allocation
e548eaa1 1109 * and last free path of that object.
8e7f37f2
PM
1110 */
1111void mem_dump_obj(void *object)
1112{
2521781c
JP
1113 const char *type;
1114
98f18083
PM
1115 if (kmem_valid_obj(object)) {
1116 kmem_dump_obj(object);
1117 return;
1118 }
2521781c 1119
98f18083
PM
1120 if (vmalloc_dump_obj(object))
1121 return;
2521781c
JP
1122
1123 if (virt_addr_valid(object))
1124 type = "non-slab/vmalloc memory";
1125 else if (object == NULL)
1126 type = "NULL pointer";
1127 else if (object == ZERO_SIZE_PTR)
1128 type = "zero-size pointer";
1129 else
1130 type = "non-paged memory";
1131
1132 pr_cont(" %s\n", type);
8e7f37f2 1133}
0d3dd2c8 1134EXPORT_SYMBOL_GPL(mem_dump_obj);
5bb1bb35 1135#endif
82840451
DH
1136
1137/*
1138 * A driver might set a page logically offline -- PageOffline() -- and
1139 * turn the page inaccessible in the hypervisor; after that, access to page
1140 * content can be fatal.
1141 *
1142 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1143 * pages after checking PageOffline(); however, these PFN walkers can race
1144 * with drivers that set PageOffline().
1145 *
1146 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1147 * synchronize with such drivers, achieving that a page cannot be set
1148 * PageOffline() while frozen.
1149 *
1150 * page_offline_begin()/page_offline_end() is used by drivers that care about
1151 * such races when setting a page PageOffline().
1152 */
1153static DECLARE_RWSEM(page_offline_rwsem);
1154
1155void page_offline_freeze(void)
1156{
1157 down_read(&page_offline_rwsem);
1158}
1159
1160void page_offline_thaw(void)
1161{
1162 up_read(&page_offline_rwsem);
1163}
1164
1165void page_offline_begin(void)
1166{
1167 down_write(&page_offline_rwsem);
1168}
1169EXPORT_SYMBOL(page_offline_begin);
1170
1171void page_offline_end(void)
1172{
1173 up_write(&page_offline_rwsem);
1174}
1175EXPORT_SYMBOL(page_offline_end);
08b0b005
MWO
1176
1177#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
1178void flush_dcache_folio(struct folio *folio)
1179{
1180 long i, nr = folio_nr_pages(folio);
1181
1182 for (i = 0; i < nr; i++)
1183 flush_dcache_page(folio_page(folio, i));
1184}
1185EXPORT_SYMBOL(flush_dcache_folio);
1186#endif
This page took 1.151034 seconds and 4 git commands to generate.