1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
19 #include <linux/elf.h>
20 #include <linux/elf-randomize.h>
21 #include <linux/personality.h>
22 #include <linux/random.h>
23 #include <linux/processor.h>
24 #include <linux/sizes.h>
25 #include <linux/compat.h>
27 #include <linux/uaccess.h>
33 * kfree_const - conditionally free memory
34 * @x: pointer to the memory
36 * Function calls kfree only if @x is not in .rodata section.
38 void kfree_const(const void *x)
40 if (!is_kernel_rodata((unsigned long)x))
43 EXPORT_SYMBOL(kfree_const);
46 * kstrdup - allocate space for and copy an existing string
47 * @s: the string to duplicate
48 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
50 * Return: newly allocated copy of @s or %NULL in case of error
53 char *kstrdup(const char *s, gfp_t gfp)
62 buf = kmalloc_track_caller(len, gfp);
67 EXPORT_SYMBOL(kstrdup);
70 * kstrdup_const - conditionally duplicate an existing const string
71 * @s: the string to duplicate
72 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
74 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
75 * must not be passed to krealloc().
77 * Return: source string if it is in .rodata section otherwise
78 * fallback to kstrdup.
80 const char *kstrdup_const(const char *s, gfp_t gfp)
82 if (is_kernel_rodata((unsigned long)s))
85 return kstrdup(s, gfp);
87 EXPORT_SYMBOL(kstrdup_const);
90 * kstrndup - allocate space for and copy an existing string
91 * @s: the string to duplicate
92 * @max: read at most @max chars from @s
93 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
95 * Note: Use kmemdup_nul() instead if the size is known exactly.
97 * Return: newly allocated copy of @s or %NULL in case of error
99 char *kstrndup(const char *s, size_t max, gfp_t gfp)
107 len = strnlen(s, max);
108 buf = kmalloc_track_caller(len+1, gfp);
115 EXPORT_SYMBOL(kstrndup);
118 * kmemdup - duplicate region of memory
120 * @src: memory region to duplicate
121 * @len: memory region length
122 * @gfp: GFP mask to use
124 * Return: newly allocated copy of @src or %NULL in case of error,
125 * result is physically contiguous. Use kfree() to free.
127 void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp)
131 p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_);
136 EXPORT_SYMBOL(kmemdup_noprof);
139 * kmemdup_array - duplicate a given array.
141 * @src: array to duplicate.
142 * @count: number of elements to duplicate from array.
143 * @element_size: size of each element of array.
144 * @gfp: GFP mask to use.
146 * Return: duplicated array of @src or %NULL in case of error,
147 * result is physically contiguous. Use kfree() to free.
149 void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
151 return kmemdup(src, size_mul(element_size, count), gfp);
153 EXPORT_SYMBOL(kmemdup_array);
156 * kvmemdup - duplicate region of memory
158 * @src: memory region to duplicate
159 * @len: memory region length
160 * @gfp: GFP mask to use
162 * Return: newly allocated copy of @src or %NULL in case of error,
163 * result may be not physically contiguous. Use kvfree() to free.
165 void *kvmemdup(const void *src, size_t len, gfp_t gfp)
169 p = kvmalloc(len, gfp);
174 EXPORT_SYMBOL(kvmemdup);
177 * kmemdup_nul - Create a NUL-terminated string from unterminated data
178 * @s: The data to stringify
179 * @len: The size of the data
180 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
182 * Return: newly allocated copy of @s with NUL-termination or %NULL in
185 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
192 buf = kmalloc_track_caller(len + 1, gfp);
199 EXPORT_SYMBOL(kmemdup_nul);
202 * memdup_user - duplicate memory region from user space
204 * @src: source address in user space
205 * @len: number of bytes to copy
207 * Return: an ERR_PTR() on failure. Result is physically
208 * contiguous, to be freed by kfree().
210 void *memdup_user(const void __user *src, size_t len)
214 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
216 return ERR_PTR(-ENOMEM);
218 if (copy_from_user(p, src, len)) {
220 return ERR_PTR(-EFAULT);
225 EXPORT_SYMBOL(memdup_user);
228 * vmemdup_user - duplicate memory region from user space
230 * @src: source address in user space
231 * @len: number of bytes to copy
233 * Return: an ERR_PTR() on failure. Result may be not
234 * physically contiguous. Use kvfree() to free.
236 void *vmemdup_user(const void __user *src, size_t len)
240 p = kvmalloc(len, GFP_USER);
242 return ERR_PTR(-ENOMEM);
244 if (copy_from_user(p, src, len)) {
246 return ERR_PTR(-EFAULT);
251 EXPORT_SYMBOL(vmemdup_user);
254 * strndup_user - duplicate an existing string from user space
255 * @s: The string to duplicate
256 * @n: Maximum number of bytes to copy, including the trailing NUL.
258 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
260 char *strndup_user(const char __user *s, long n)
265 length = strnlen_user(s, n);
268 return ERR_PTR(-EFAULT);
271 return ERR_PTR(-EINVAL);
273 p = memdup_user(s, length);
278 p[length - 1] = '\0';
282 EXPORT_SYMBOL(strndup_user);
285 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
287 * @src: source address in user space
288 * @len: number of bytes to copy
290 * Return: an ERR_PTR() on failure.
292 void *memdup_user_nul(const void __user *src, size_t len)
297 * Always use GFP_KERNEL, since copy_from_user() can sleep and
298 * cause pagefault, which makes it pointless to use GFP_NOFS
301 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
303 return ERR_PTR(-ENOMEM);
305 if (copy_from_user(p, src, len)) {
307 return ERR_PTR(-EFAULT);
313 EXPORT_SYMBOL(memdup_user_nul);
315 /* Check if the vma is being used as a stack by this task */
316 int vma_is_stack_for_current(struct vm_area_struct *vma)
318 struct task_struct * __maybe_unused t = current;
320 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
324 * Change backing file, only valid to use during initial VMA setup.
326 void vma_set_file(struct vm_area_struct *vma, struct file *file)
328 /* Changing an anonymous vma with this is illegal */
330 swap(vma->vm_file, file);
333 EXPORT_SYMBOL(vma_set_file);
335 #ifndef STACK_RND_MASK
336 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
339 unsigned long randomize_stack_top(unsigned long stack_top)
341 unsigned long random_variable = 0;
343 if (current->flags & PF_RANDOMIZE) {
344 random_variable = get_random_long();
345 random_variable &= STACK_RND_MASK;
346 random_variable <<= PAGE_SHIFT;
348 #ifdef CONFIG_STACK_GROWSUP
349 return PAGE_ALIGN(stack_top) + random_variable;
351 return PAGE_ALIGN(stack_top) - random_variable;
356 * randomize_page - Generate a random, page aligned address
357 * @start: The smallest acceptable address the caller will take.
358 * @range: The size of the area, starting at @start, within which the
359 * random address must fall.
361 * If @start + @range would overflow, @range is capped.
363 * NOTE: Historical use of randomize_range, which this replaces, presumed that
364 * @start was already page aligned. We now align it regardless.
366 * Return: A page aligned address within [start, start + range). On error,
367 * @start is returned.
369 unsigned long randomize_page(unsigned long start, unsigned long range)
371 if (!PAGE_ALIGNED(start)) {
372 range -= PAGE_ALIGN(start) - start;
373 start = PAGE_ALIGN(start);
376 if (start > ULONG_MAX - range)
377 range = ULONG_MAX - start;
379 range >>= PAGE_SHIFT;
384 return start + (get_random_long() % range << PAGE_SHIFT);
387 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
388 unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
390 /* Is the current task 32bit ? */
391 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
392 return randomize_page(mm->brk, SZ_32M);
394 return randomize_page(mm->brk, SZ_1G);
397 unsigned long arch_mmap_rnd(void)
401 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
402 if (is_compat_task())
403 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
405 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
406 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
408 return rnd << PAGE_SHIFT;
411 static int mmap_is_legacy(struct rlimit *rlim_stack)
413 if (current->personality & ADDR_COMPAT_LAYOUT)
416 /* On parisc the stack always grows up - so a unlimited stack should
417 * not be an indicator to use the legacy memory layout. */
418 if (rlim_stack->rlim_cur == RLIM_INFINITY &&
419 !IS_ENABLED(CONFIG_STACK_GROWSUP))
422 return sysctl_legacy_va_layout;
426 * Leave enough space between the mmap area and the stack to honour ulimit in
427 * the face of randomisation.
429 #define MIN_GAP (SZ_128M)
430 #define MAX_GAP (STACK_TOP / 6 * 5)
432 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
434 #ifdef CONFIG_STACK_GROWSUP
436 * For an upwards growing stack the calculation is much simpler.
437 * Memory for the maximum stack size is reserved at the top of the
438 * task. mmap_base starts directly below the stack and grows
441 return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd);
443 unsigned long gap = rlim_stack->rlim_cur;
444 unsigned long pad = stack_guard_gap;
446 /* Account for stack randomization if necessary */
447 if (current->flags & PF_RANDOMIZE)
448 pad += (STACK_RND_MASK << PAGE_SHIFT);
450 /* Values close to RLIM_INFINITY can overflow. */
456 else if (gap > MAX_GAP)
459 return PAGE_ALIGN(STACK_TOP - gap - rnd);
463 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
465 unsigned long random_factor = 0UL;
467 if (current->flags & PF_RANDOMIZE)
468 random_factor = arch_mmap_rnd();
470 if (mmap_is_legacy(rlim_stack)) {
471 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
472 clear_bit(MMF_TOPDOWN, &mm->flags);
474 mm->mmap_base = mmap_base(random_factor, rlim_stack);
475 set_bit(MMF_TOPDOWN, &mm->flags);
478 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
479 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
481 mm->mmap_base = TASK_UNMAPPED_BASE;
482 clear_bit(MMF_TOPDOWN, &mm->flags);
487 * __account_locked_vm - account locked pages to an mm's locked_vm
488 * @mm: mm to account against
489 * @pages: number of pages to account
490 * @inc: %true if @pages should be considered positive, %false if not
491 * @task: task used to check RLIMIT_MEMLOCK
492 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
494 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
495 * that mmap_lock is held as writer.
499 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
501 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
502 struct task_struct *task, bool bypass_rlim)
504 unsigned long locked_vm, limit;
507 mmap_assert_write_locked(mm);
509 locked_vm = mm->locked_vm;
512 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
513 if (locked_vm + pages > limit)
517 mm->locked_vm = locked_vm + pages;
519 WARN_ON_ONCE(pages > locked_vm);
520 mm->locked_vm = locked_vm - pages;
523 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
524 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
525 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
526 ret ? " - exceeded" : "");
530 EXPORT_SYMBOL_GPL(__account_locked_vm);
533 * account_locked_vm - account locked pages to an mm's locked_vm
534 * @mm: mm to account against, may be NULL
535 * @pages: number of pages to account
536 * @inc: %true if @pages should be considered positive, %false if not
538 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
541 * * 0 on success, or if mm is NULL
542 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
544 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
548 if (pages == 0 || !mm)
552 ret = __account_locked_vm(mm, pages, inc, current,
553 capable(CAP_IPC_LOCK));
554 mmap_write_unlock(mm);
558 EXPORT_SYMBOL_GPL(account_locked_vm);
560 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
561 unsigned long len, unsigned long prot,
562 unsigned long flag, unsigned long pgoff)
565 struct mm_struct *mm = current->mm;
566 unsigned long populate;
569 ret = security_mmap_file(file, prot, flag);
571 if (mmap_write_lock_killable(mm))
573 ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate,
575 mmap_write_unlock(mm);
576 userfaultfd_unmap_complete(mm, &uf);
578 mm_populate(ret, populate);
583 unsigned long vm_mmap(struct file *file, unsigned long addr,
584 unsigned long len, unsigned long prot,
585 unsigned long flag, unsigned long offset)
587 if (unlikely(offset + PAGE_ALIGN(len) < offset))
589 if (unlikely(offset_in_page(offset)))
592 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
594 EXPORT_SYMBOL(vm_mmap);
597 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
598 * failure, fall back to non-contiguous (vmalloc) allocation.
599 * @size: size of the request.
600 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
601 * @node: numa node to allocate from
603 * Uses kmalloc to get the memory but if the allocation fails then falls back
604 * to the vmalloc allocator. Use kvfree for freeing the memory.
606 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
607 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
608 * preferable to the vmalloc fallback, due to visible performance drawbacks.
610 * Return: pointer to the allocated memory of %NULL in case of failure
612 void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node)
614 gfp_t kmalloc_flags = flags;
618 * We want to attempt a large physically contiguous block first because
619 * it is less likely to fragment multiple larger blocks and therefore
620 * contribute to a long term fragmentation less than vmalloc fallback.
621 * However make sure that larger requests are not too disruptive - no
622 * OOM killer and no allocation failure warnings as we have a fallback.
624 if (size > PAGE_SIZE) {
625 kmalloc_flags |= __GFP_NOWARN;
627 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
628 kmalloc_flags |= __GFP_NORETRY;
630 /* nofail semantic is implemented by the vmalloc fallback */
631 kmalloc_flags &= ~__GFP_NOFAIL;
634 ret = kmalloc_node_noprof(size, kmalloc_flags, node);
637 * It doesn't really make sense to fallback to vmalloc for sub page
640 if (ret || size <= PAGE_SIZE)
643 /* non-sleeping allocations are not supported by vmalloc */
644 if (!gfpflags_allow_blocking(flags))
647 /* Don't even allow crazy sizes */
648 if (unlikely(size > INT_MAX)) {
649 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
654 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
655 * since the callers already cannot assume anything
656 * about the resulting pointer, and cannot play
659 return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
660 flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
661 node, __builtin_return_address(0));
663 EXPORT_SYMBOL(kvmalloc_node_noprof);
666 * kvfree() - Free memory.
667 * @addr: Pointer to allocated memory.
669 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
670 * It is slightly more efficient to use kfree() or vfree() if you are certain
671 * that you know which one to use.
673 * Context: Either preemptible task context or not-NMI interrupt.
675 void kvfree(const void *addr)
677 if (is_vmalloc_addr(addr))
682 EXPORT_SYMBOL(kvfree);
685 * kvfree_sensitive - Free a data object containing sensitive information.
686 * @addr: address of the data object to be freed.
687 * @len: length of the data object.
689 * Use the special memzero_explicit() function to clear the content of a
690 * kvmalloc'ed object containing sensitive data to make sure that the
691 * compiler won't optimize out the data clearing.
693 void kvfree_sensitive(const void *addr, size_t len)
695 if (likely(!ZERO_OR_NULL_PTR(addr))) {
696 memzero_explicit((void *)addr, len);
700 EXPORT_SYMBOL(kvfree_sensitive);
702 void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
706 if (oldsize >= newsize)
708 newp = kvmalloc_noprof(newsize, flags);
711 memcpy(newp, p, oldsize);
715 EXPORT_SYMBOL(kvrealloc_noprof);
718 * __vmalloc_array - allocate memory for a virtually contiguous array.
719 * @n: number of elements.
720 * @size: element size.
721 * @flags: the type of memory to allocate (see kmalloc).
723 void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
727 if (unlikely(check_mul_overflow(n, size, &bytes)))
729 return __vmalloc_noprof(bytes, flags);
731 EXPORT_SYMBOL(__vmalloc_array_noprof);
734 * vmalloc_array - allocate memory for a virtually contiguous array.
735 * @n: number of elements.
736 * @size: element size.
738 void *vmalloc_array_noprof(size_t n, size_t size)
740 return __vmalloc_array_noprof(n, size, GFP_KERNEL);
742 EXPORT_SYMBOL(vmalloc_array_noprof);
745 * __vcalloc - allocate and zero memory for a virtually contiguous array.
746 * @n: number of elements.
747 * @size: element size.
748 * @flags: the type of memory to allocate (see kmalloc).
750 void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
752 return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO);
754 EXPORT_SYMBOL(__vcalloc_noprof);
757 * vcalloc - allocate and zero memory for a virtually contiguous array.
758 * @n: number of elements.
759 * @size: element size.
761 void *vcalloc_noprof(size_t n, size_t size)
763 return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO);
765 EXPORT_SYMBOL(vcalloc_noprof);
767 struct anon_vma *folio_anon_vma(struct folio *folio)
769 unsigned long mapping = (unsigned long)folio->mapping;
771 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
773 return (void *)(mapping - PAGE_MAPPING_ANON);
777 * folio_mapping - Find the mapping where this folio is stored.
780 * For folios which are in the page cache, return the mapping that this
781 * page belongs to. Folios in the swap cache return the swap mapping
782 * this page is stored in (which is different from the mapping for the
783 * swap file or swap device where the data is stored).
785 * You can call this for folios which aren't in the swap cache or page
786 * cache and it will return NULL.
788 struct address_space *folio_mapping(struct folio *folio)
790 struct address_space *mapping;
792 /* This happens if someone calls flush_dcache_page on slab page */
793 if (unlikely(folio_test_slab(folio)))
796 if (unlikely(folio_test_swapcache(folio)))
797 return swap_address_space(folio->swap);
799 mapping = folio->mapping;
800 if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
805 EXPORT_SYMBOL(folio_mapping);
808 * folio_copy - Copy the contents of one folio to another.
809 * @dst: Folio to copy to.
810 * @src: Folio to copy from.
812 * The bytes in the folio represented by @src are copied to @dst.
813 * Assumes the caller has validated that @dst is at least as large as @src.
814 * Can be called in atomic context for order-0 folios, but if the folio is
815 * larger, it may sleep.
817 void folio_copy(struct folio *dst, struct folio *src)
820 long nr = folio_nr_pages(src);
823 copy_highpage(folio_page(dst, i), folio_page(src, i));
829 EXPORT_SYMBOL(folio_copy);
831 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
832 int sysctl_overcommit_ratio __read_mostly = 50;
833 unsigned long sysctl_overcommit_kbytes __read_mostly;
834 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
835 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
836 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
838 int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
839 size_t *lenp, loff_t *ppos)
843 ret = proc_dointvec(table, write, buffer, lenp, ppos);
844 if (ret == 0 && write)
845 sysctl_overcommit_kbytes = 0;
849 static void sync_overcommit_as(struct work_struct *dummy)
851 percpu_counter_sync(&vm_committed_as);
854 int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
855 size_t *lenp, loff_t *ppos)
862 * The deviation of sync_overcommit_as could be big with loose policy
863 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
864 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
865 * with the strict "NEVER", and to avoid possible race condition (even
866 * though user usually won't too frequently do the switching to policy
867 * OVERCOMMIT_NEVER), the switch is done in the following order:
868 * 1. changing the batch
869 * 2. sync percpu count on each CPU
870 * 3. switch the policy
874 t.data = &new_policy;
875 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
876 if (ret || new_policy == -1)
879 mm_compute_batch(new_policy);
880 if (new_policy == OVERCOMMIT_NEVER)
881 schedule_on_each_cpu(sync_overcommit_as);
882 sysctl_overcommit_memory = new_policy;
884 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
890 int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
891 size_t *lenp, loff_t *ppos)
895 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
896 if (ret == 0 && write)
897 sysctl_overcommit_ratio = 0;
902 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
904 unsigned long vm_commit_limit(void)
906 unsigned long allowed;
908 if (sysctl_overcommit_kbytes)
909 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
911 allowed = ((totalram_pages() - hugetlb_total_pages())
912 * sysctl_overcommit_ratio / 100);
913 allowed += total_swap_pages;
919 * Make sure vm_committed_as in one cacheline and not cacheline shared with
920 * other variables. It can be updated by several CPUs frequently.
922 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
925 * The global memory commitment made in the system can be a metric
926 * that can be used to drive ballooning decisions when Linux is hosted
927 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
928 * balancing memory across competing virtual machines that are hosted.
929 * Several metrics drive this policy engine including the guest reported
932 * The time cost of this is very low for small platforms, and for big
933 * platform like a 2S/36C/72T Skylake server, in worst case where
934 * vm_committed_as's spinlock is under severe contention, the time cost
935 * could be about 30~40 microseconds.
937 unsigned long vm_memory_committed(void)
939 return percpu_counter_sum_positive(&vm_committed_as);
941 EXPORT_SYMBOL_GPL(vm_memory_committed);
944 * Check that a process has enough memory to allocate a new virtual
945 * mapping. 0 means there is enough memory for the allocation to
946 * succeed and -ENOMEM implies there is not.
948 * We currently support three overcommit policies, which are set via the
949 * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst
951 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
952 * Additional code 2002 Jul 20 by Robert Love.
954 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
956 * Note this is a helper function intended to be used by LSMs which
957 * wish to use this logic.
959 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
962 unsigned long bytes_failed;
964 vm_acct_memory(pages);
967 * Sometimes we want to use more memory than we have
969 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
972 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
973 if (pages > totalram_pages() + total_swap_pages)
978 allowed = vm_commit_limit();
980 * Reserve some for root
983 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
986 * Don't let a single process grow so big a user can't recover
989 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
991 allowed -= min_t(long, mm->total_vm / 32, reserve);
994 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
997 bytes_failed = pages << PAGE_SHIFT;
998 pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n",
999 __func__, current->pid, current->comm, bytes_failed);
1000 vm_unacct_memory(pages);
1006 * get_cmdline() - copy the cmdline value to a buffer.
1007 * @task: the task whose cmdline value to copy.
1008 * @buffer: the buffer to copy to.
1009 * @buflen: the length of the buffer. Larger cmdline values are truncated
1012 * Return: the size of the cmdline field copied. Note that the copy does
1013 * not guarantee an ending NULL byte.
1015 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
1019 struct mm_struct *mm = get_task_mm(task);
1020 unsigned long arg_start, arg_end, env_start, env_end;
1024 goto out_mm; /* Shh! No looking before we're done */
1026 spin_lock(&mm->arg_lock);
1027 arg_start = mm->arg_start;
1028 arg_end = mm->arg_end;
1029 env_start = mm->env_start;
1030 env_end = mm->env_end;
1031 spin_unlock(&mm->arg_lock);
1033 len = arg_end - arg_start;
1038 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
1041 * If the nul at the end of args has been overwritten, then
1042 * assume application is using setproctitle(3).
1044 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1045 len = strnlen(buffer, res);
1049 len = env_end - env_start;
1050 if (len > buflen - res)
1052 res += access_process_vm(task, env_start,
1055 res = strnlen(buffer, res);
1064 int __weak memcmp_pages(struct page *page1, struct page *page2)
1066 char *addr1, *addr2;
1069 addr1 = kmap_local_page(page1);
1070 addr2 = kmap_local_page(page2);
1071 ret = memcmp(addr1, addr2, PAGE_SIZE);
1072 kunmap_local(addr2);
1073 kunmap_local(addr1);
1077 #ifdef CONFIG_PRINTK
1079 * mem_dump_obj - Print available provenance information
1080 * @object: object for which to find provenance information.
1082 * This function uses pr_cont(), so that the caller is expected to have
1083 * printed out whatever preamble is appropriate. The provenance information
1084 * depends on the type of object and on how much debugging is enabled.
1085 * For example, for a slab-cache object, the slab name is printed, and,
1086 * if available, the return address and stack trace from the allocation
1087 * and last free path of that object.
1089 void mem_dump_obj(void *object)
1093 if (kmem_dump_obj(object))
1096 if (vmalloc_dump_obj(object))
1099 if (is_vmalloc_addr(object))
1100 type = "vmalloc memory";
1101 else if (virt_addr_valid(object))
1102 type = "non-slab/vmalloc memory";
1103 else if (object == NULL)
1104 type = "NULL pointer";
1105 else if (object == ZERO_SIZE_PTR)
1106 type = "zero-size pointer";
1108 type = "non-paged memory";
1110 pr_cont(" %s\n", type);
1112 EXPORT_SYMBOL_GPL(mem_dump_obj);
1116 * A driver might set a page logically offline -- PageOffline() -- and
1117 * turn the page inaccessible in the hypervisor; after that, access to page
1118 * content can be fatal.
1120 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1121 * pages after checking PageOffline(); however, these PFN walkers can race
1122 * with drivers that set PageOffline().
1124 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1125 * synchronize with such drivers, achieving that a page cannot be set
1126 * PageOffline() while frozen.
1128 * page_offline_begin()/page_offline_end() is used by drivers that care about
1129 * such races when setting a page PageOffline().
1131 static DECLARE_RWSEM(page_offline_rwsem);
1133 void page_offline_freeze(void)
1135 down_read(&page_offline_rwsem);
1138 void page_offline_thaw(void)
1140 up_read(&page_offline_rwsem);
1143 void page_offline_begin(void)
1145 down_write(&page_offline_rwsem);
1147 EXPORT_SYMBOL(page_offline_begin);
1149 void page_offline_end(void)
1151 up_write(&page_offline_rwsem);
1153 EXPORT_SYMBOL(page_offline_end);
1155 #ifndef flush_dcache_folio
1156 void flush_dcache_folio(struct folio *folio)
1158 long i, nr = folio_nr_pages(folio);
1160 for (i = 0; i < nr; i++)
1161 flush_dcache_page(folio_page(folio, i));
1163 EXPORT_SYMBOL(flush_dcache_folio);