]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
16d69265 | 2 | #include <linux/mm.h> |
30992c97 MM |
3 | #include <linux/slab.h> |
4 | #include <linux/string.h> | |
3b32123d | 5 | #include <linux/compiler.h> |
b95f1b31 | 6 | #include <linux/export.h> |
96840aa0 | 7 | #include <linux/err.h> |
3b8f14b4 | 8 | #include <linux/sched.h> |
6e84f315 | 9 | #include <linux/sched/mm.h> |
79eb597c | 10 | #include <linux/sched/signal.h> |
68db0cf1 | 11 | #include <linux/sched/task_stack.h> |
eb36c587 | 12 | #include <linux/security.h> |
9800339b | 13 | #include <linux/swap.h> |
33806f06 | 14 | #include <linux/swapops.h> |
00619bcc JM |
15 | #include <linux/mman.h> |
16 | #include <linux/hugetlb.h> | |
39f1f78d | 17 | #include <linux/vmalloc.h> |
897ab3e0 | 18 | #include <linux/userfaultfd_k.h> |
649775be | 19 | #include <linux/elf.h> |
67f3977f AG |
20 | #include <linux/elf-randomize.h> |
21 | #include <linux/personality.h> | |
649775be | 22 | #include <linux/random.h> |
67f3977f AG |
23 | #include <linux/processor.h> |
24 | #include <linux/sizes.h> | |
25 | #include <linux/compat.h> | |
00619bcc | 26 | |
7c0f6ba6 | 27 | #include <linux/uaccess.h> |
30992c97 | 28 | |
51104c19 KC |
29 | #include <kunit/visibility.h> |
30 | ||
6038def0 | 31 | #include "internal.h" |
014bb1de | 32 | #include "swap.h" |
6038def0 | 33 | |
a4bb1e43 AH |
34 | /** |
35 | * kfree_const - conditionally free memory | |
36 | * @x: pointer to the memory | |
37 | * | |
38 | * Function calls kfree only if @x is not in .rodata section. | |
39 | */ | |
40 | void kfree_const(const void *x) | |
41 | { | |
42 | if (!is_kernel_rodata((unsigned long)x)) | |
43 | kfree(x); | |
44 | } | |
45 | EXPORT_SYMBOL(kfree_const); | |
46 | ||
30992c97 | 47 | /** |
30992c97 | 48 | * kstrdup - allocate space for and copy an existing string |
30992c97 MM |
49 | * @s: the string to duplicate |
50 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
a862f68a MR |
51 | * |
52 | * Return: newly allocated copy of @s or %NULL in case of error | |
30992c97 | 53 | */ |
2a6772eb | 54 | noinline |
30992c97 MM |
55 | char *kstrdup(const char *s, gfp_t gfp) |
56 | { | |
57 | size_t len; | |
58 | char *buf; | |
59 | ||
60 | if (!s) | |
61 | return NULL; | |
62 | ||
63 | len = strlen(s) + 1; | |
1d2c8eea | 64 | buf = kmalloc_track_caller(len, gfp); |
30992c97 MM |
65 | if (buf) |
66 | memcpy(buf, s, len); | |
67 | return buf; | |
68 | } | |
69 | EXPORT_SYMBOL(kstrdup); | |
96840aa0 | 70 | |
a4bb1e43 AH |
71 | /** |
72 | * kstrdup_const - conditionally duplicate an existing const string | |
73 | * @s: the string to duplicate | |
74 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
75 | * | |
295a1730 BG |
76 | * Note: Strings allocated by kstrdup_const should be freed by kfree_const and |
77 | * must not be passed to krealloc(). | |
a862f68a MR |
78 | * |
79 | * Return: source string if it is in .rodata section otherwise | |
80 | * fallback to kstrdup. | |
a4bb1e43 AH |
81 | */ |
82 | const char *kstrdup_const(const char *s, gfp_t gfp) | |
83 | { | |
84 | if (is_kernel_rodata((unsigned long)s)) | |
85 | return s; | |
86 | ||
87 | return kstrdup(s, gfp); | |
88 | } | |
89 | EXPORT_SYMBOL(kstrdup_const); | |
90 | ||
1e66df3e JF |
91 | /** |
92 | * kstrndup - allocate space for and copy an existing string | |
93 | * @s: the string to duplicate | |
94 | * @max: read at most @max chars from @s | |
95 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
f3515741 DH |
96 | * |
97 | * Note: Use kmemdup_nul() instead if the size is known exactly. | |
a862f68a MR |
98 | * |
99 | * Return: newly allocated copy of @s or %NULL in case of error | |
1e66df3e JF |
100 | */ |
101 | char *kstrndup(const char *s, size_t max, gfp_t gfp) | |
102 | { | |
103 | size_t len; | |
104 | char *buf; | |
105 | ||
106 | if (!s) | |
107 | return NULL; | |
108 | ||
109 | len = strnlen(s, max); | |
110 | buf = kmalloc_track_caller(len+1, gfp); | |
111 | if (buf) { | |
112 | memcpy(buf, s, len); | |
113 | buf[len] = '\0'; | |
114 | } | |
115 | return buf; | |
116 | } | |
117 | EXPORT_SYMBOL(kstrndup); | |
118 | ||
1a2f67b4 AD |
119 | /** |
120 | * kmemdup - duplicate region of memory | |
121 | * | |
122 | * @src: memory region to duplicate | |
123 | * @len: memory region length | |
124 | * @gfp: GFP mask to use | |
a862f68a | 125 | * |
0b7b8704 HS |
126 | * Return: newly allocated copy of @src or %NULL in case of error, |
127 | * result is physically contiguous. Use kfree() to free. | |
1a2f67b4 | 128 | */ |
7bd230a2 | 129 | void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) |
1a2f67b4 AD |
130 | { |
131 | void *p; | |
132 | ||
7bd230a2 | 133 | p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_); |
1a2f67b4 AD |
134 | if (p) |
135 | memcpy(p, src, len); | |
136 | return p; | |
137 | } | |
7bd230a2 | 138 | EXPORT_SYMBOL(kmemdup_noprof); |
1a2f67b4 | 139 | |
7092e9b3 K |
140 | /** |
141 | * kmemdup_array - duplicate a given array. | |
142 | * | |
143 | * @src: array to duplicate. | |
7092e9b3 | 144 | * @count: number of elements to duplicate from array. |
0ee14725 | 145 | * @element_size: size of each element of array. |
7092e9b3 K |
146 | * @gfp: GFP mask to use. |
147 | * | |
148 | * Return: duplicated array of @src or %NULL in case of error, | |
149 | * result is physically contiguous. Use kfree() to free. | |
150 | */ | |
0ee14725 | 151 | void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp) |
7092e9b3 K |
152 | { |
153 | return kmemdup(src, size_mul(element_size, count), gfp); | |
154 | } | |
155 | EXPORT_SYMBOL(kmemdup_array); | |
156 | ||
0b7b8704 HS |
157 | /** |
158 | * kvmemdup - duplicate region of memory | |
159 | * | |
160 | * @src: memory region to duplicate | |
161 | * @len: memory region length | |
162 | * @gfp: GFP mask to use | |
163 | * | |
164 | * Return: newly allocated copy of @src or %NULL in case of error, | |
165 | * result may be not physically contiguous. Use kvfree() to free. | |
166 | */ | |
167 | void *kvmemdup(const void *src, size_t len, gfp_t gfp) | |
168 | { | |
169 | void *p; | |
170 | ||
171 | p = kvmalloc(len, gfp); | |
172 | if (p) | |
173 | memcpy(p, src, len); | |
174 | return p; | |
175 | } | |
176 | EXPORT_SYMBOL(kvmemdup); | |
177 | ||
f3515741 DH |
178 | /** |
179 | * kmemdup_nul - Create a NUL-terminated string from unterminated data | |
180 | * @s: The data to stringify | |
181 | * @len: The size of the data | |
182 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
a862f68a MR |
183 | * |
184 | * Return: newly allocated copy of @s with NUL-termination or %NULL in | |
185 | * case of error | |
f3515741 DH |
186 | */ |
187 | char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) | |
188 | { | |
189 | char *buf; | |
190 | ||
191 | if (!s) | |
192 | return NULL; | |
193 | ||
194 | buf = kmalloc_track_caller(len + 1, gfp); | |
195 | if (buf) { | |
196 | memcpy(buf, s, len); | |
197 | buf[len] = '\0'; | |
198 | } | |
199 | return buf; | |
200 | } | |
201 | EXPORT_SYMBOL(kmemdup_nul); | |
202 | ||
d73778e4 KC |
203 | static kmem_buckets *user_buckets __ro_after_init; |
204 | ||
205 | static int __init init_user_buckets(void) | |
206 | { | |
207 | user_buckets = kmem_buckets_create("memdup_user", 0, 0, INT_MAX, NULL); | |
208 | ||
209 | return 0; | |
210 | } | |
211 | subsys_initcall(init_user_buckets); | |
212 | ||
610a77e0 LZ |
213 | /** |
214 | * memdup_user - duplicate memory region from user space | |
215 | * | |
216 | * @src: source address in user space | |
217 | * @len: number of bytes to copy | |
218 | * | |
a862f68a | 219 | * Return: an ERR_PTR() on failure. Result is physically |
50fd2f29 | 220 | * contiguous, to be freed by kfree(). |
610a77e0 LZ |
221 | */ |
222 | void *memdup_user(const void __user *src, size_t len) | |
223 | { | |
224 | void *p; | |
225 | ||
d73778e4 | 226 | p = kmem_buckets_alloc_track_caller(user_buckets, len, GFP_USER | __GFP_NOWARN); |
610a77e0 LZ |
227 | if (!p) |
228 | return ERR_PTR(-ENOMEM); | |
229 | ||
230 | if (copy_from_user(p, src, len)) { | |
231 | kfree(p); | |
232 | return ERR_PTR(-EFAULT); | |
233 | } | |
234 | ||
235 | return p; | |
236 | } | |
237 | EXPORT_SYMBOL(memdup_user); | |
238 | ||
50fd2f29 AV |
239 | /** |
240 | * vmemdup_user - duplicate memory region from user space | |
241 | * | |
242 | * @src: source address in user space | |
243 | * @len: number of bytes to copy | |
244 | * | |
a862f68a | 245 | * Return: an ERR_PTR() on failure. Result may be not |
50fd2f29 AV |
246 | * physically contiguous. Use kvfree() to free. |
247 | */ | |
248 | void *vmemdup_user(const void __user *src, size_t len) | |
249 | { | |
250 | void *p; | |
251 | ||
d73778e4 | 252 | p = kmem_buckets_valloc(user_buckets, len, GFP_USER); |
50fd2f29 AV |
253 | if (!p) |
254 | return ERR_PTR(-ENOMEM); | |
255 | ||
256 | if (copy_from_user(p, src, len)) { | |
257 | kvfree(p); | |
258 | return ERR_PTR(-EFAULT); | |
259 | } | |
260 | ||
261 | return p; | |
262 | } | |
263 | EXPORT_SYMBOL(vmemdup_user); | |
264 | ||
b86181f1 | 265 | /** |
96840aa0 | 266 | * strndup_user - duplicate an existing string from user space |
96840aa0 DA |
267 | * @s: The string to duplicate |
268 | * @n: Maximum number of bytes to copy, including the trailing NUL. | |
a862f68a | 269 | * |
e9145521 | 270 | * Return: newly allocated copy of @s or an ERR_PTR() in case of error |
96840aa0 DA |
271 | */ |
272 | char *strndup_user(const char __user *s, long n) | |
273 | { | |
274 | char *p; | |
275 | long length; | |
276 | ||
277 | length = strnlen_user(s, n); | |
278 | ||
279 | if (!length) | |
280 | return ERR_PTR(-EFAULT); | |
281 | ||
282 | if (length > n) | |
283 | return ERR_PTR(-EINVAL); | |
284 | ||
90d74045 | 285 | p = memdup_user(s, length); |
96840aa0 | 286 | |
90d74045 JL |
287 | if (IS_ERR(p)) |
288 | return p; | |
96840aa0 DA |
289 | |
290 | p[length - 1] = '\0'; | |
291 | ||
292 | return p; | |
293 | } | |
294 | EXPORT_SYMBOL(strndup_user); | |
16d69265 | 295 | |
e9d408e1 AV |
296 | /** |
297 | * memdup_user_nul - duplicate memory region from user space and NUL-terminate | |
298 | * | |
299 | * @src: source address in user space | |
300 | * @len: number of bytes to copy | |
301 | * | |
a862f68a | 302 | * Return: an ERR_PTR() on failure. |
e9d408e1 AV |
303 | */ |
304 | void *memdup_user_nul(const void __user *src, size_t len) | |
305 | { | |
306 | char *p; | |
307 | ||
308 | /* | |
309 | * Always use GFP_KERNEL, since copy_from_user() can sleep and | |
310 | * cause pagefault, which makes it pointless to use GFP_NOFS | |
311 | * or GFP_ATOMIC. | |
312 | */ | |
313 | p = kmalloc_track_caller(len + 1, GFP_KERNEL); | |
314 | if (!p) | |
315 | return ERR_PTR(-ENOMEM); | |
316 | ||
317 | if (copy_from_user(p, src, len)) { | |
318 | kfree(p); | |
319 | return ERR_PTR(-EFAULT); | |
320 | } | |
321 | p[len] = '\0'; | |
322 | ||
323 | return p; | |
324 | } | |
325 | EXPORT_SYMBOL(memdup_user_nul); | |
326 | ||
b7643757 | 327 | /* Check if the vma is being used as a stack by this task */ |
d17af505 | 328 | int vma_is_stack_for_current(struct vm_area_struct *vma) |
b7643757 | 329 | { |
d17af505 AL |
330 | struct task_struct * __maybe_unused t = current; |
331 | ||
b7643757 SP |
332 | return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); |
333 | } | |
334 | ||
295992fb CK |
335 | /* |
336 | * Change backing file, only valid to use during initial VMA setup. | |
337 | */ | |
338 | void vma_set_file(struct vm_area_struct *vma, struct file *file) | |
339 | { | |
340 | /* Changing an anonymous vma with this is illegal */ | |
341 | get_file(file); | |
342 | swap(vma->vm_file, file); | |
343 | fput(file); | |
344 | } | |
345 | EXPORT_SYMBOL(vma_set_file); | |
346 | ||
649775be AG |
347 | #ifndef STACK_RND_MASK |
348 | #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ | |
349 | #endif | |
350 | ||
351 | unsigned long randomize_stack_top(unsigned long stack_top) | |
352 | { | |
353 | unsigned long random_variable = 0; | |
354 | ||
355 | if (current->flags & PF_RANDOMIZE) { | |
356 | random_variable = get_random_long(); | |
357 | random_variable &= STACK_RND_MASK; | |
358 | random_variable <<= PAGE_SHIFT; | |
359 | } | |
360 | #ifdef CONFIG_STACK_GROWSUP | |
361 | return PAGE_ALIGN(stack_top) + random_variable; | |
362 | #else | |
363 | return PAGE_ALIGN(stack_top) - random_variable; | |
364 | #endif | |
365 | } | |
366 | ||
5ad7dd88 JD |
367 | /** |
368 | * randomize_page - Generate a random, page aligned address | |
369 | * @start: The smallest acceptable address the caller will take. | |
370 | * @range: The size of the area, starting at @start, within which the | |
371 | * random address must fall. | |
372 | * | |
373 | * If @start + @range would overflow, @range is capped. | |
374 | * | |
375 | * NOTE: Historical use of randomize_range, which this replaces, presumed that | |
376 | * @start was already page aligned. We now align it regardless. | |
377 | * | |
378 | * Return: A page aligned address within [start, start + range). On error, | |
379 | * @start is returned. | |
380 | */ | |
381 | unsigned long randomize_page(unsigned long start, unsigned long range) | |
382 | { | |
383 | if (!PAGE_ALIGNED(start)) { | |
384 | range -= PAGE_ALIGN(start) - start; | |
385 | start = PAGE_ALIGN(start); | |
386 | } | |
387 | ||
388 | if (start > ULONG_MAX - range) | |
389 | range = ULONG_MAX - start; | |
390 | ||
391 | range >>= PAGE_SHIFT; | |
392 | ||
393 | if (range == 0) | |
394 | return start; | |
395 | ||
396 | return start + (get_random_long() % range << PAGE_SHIFT); | |
397 | } | |
398 | ||
67f3977f | 399 | #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT |
723820f3 | 400 | unsigned long __weak arch_randomize_brk(struct mm_struct *mm) |
e7142bf5 AG |
401 | { |
402 | /* Is the current task 32bit ? */ | |
403 | if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task()) | |
404 | return randomize_page(mm->brk, SZ_32M); | |
405 | ||
406 | return randomize_page(mm->brk, SZ_1G); | |
407 | } | |
408 | ||
67f3977f AG |
409 | unsigned long arch_mmap_rnd(void) |
410 | { | |
411 | unsigned long rnd; | |
412 | ||
413 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS | |
414 | if (is_compat_task()) | |
415 | rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); | |
416 | else | |
417 | #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */ | |
418 | rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); | |
419 | ||
420 | return rnd << PAGE_SHIFT; | |
421 | } | |
67f3977f AG |
422 | |
423 | static int mmap_is_legacy(struct rlimit *rlim_stack) | |
424 | { | |
425 | if (current->personality & ADDR_COMPAT_LAYOUT) | |
426 | return 1; | |
427 | ||
3033cd43 HD |
428 | /* On parisc the stack always grows up - so a unlimited stack should |
429 | * not be an indicator to use the legacy memory layout. */ | |
430 | if (rlim_stack->rlim_cur == RLIM_INFINITY && | |
431 | !IS_ENABLED(CONFIG_STACK_GROWSUP)) | |
67f3977f AG |
432 | return 1; |
433 | ||
434 | return sysctl_legacy_va_layout; | |
435 | } | |
436 | ||
437 | /* | |
438 | * Leave enough space between the mmap area and the stack to honour ulimit in | |
439 | * the face of randomisation. | |
440 | */ | |
441 | #define MIN_GAP (SZ_128M) | |
442 | #define MAX_GAP (STACK_TOP / 6 * 5) | |
443 | ||
444 | static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) | |
445 | { | |
5f74f820 HD |
446 | #ifdef CONFIG_STACK_GROWSUP |
447 | /* | |
448 | * For an upwards growing stack the calculation is much simpler. | |
449 | * Memory for the maximum stack size is reserved at the top of the | |
450 | * task. mmap_base starts directly below the stack and grows | |
451 | * downwards. | |
452 | */ | |
453 | return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd); | |
454 | #else | |
67f3977f AG |
455 | unsigned long gap = rlim_stack->rlim_cur; |
456 | unsigned long pad = stack_guard_gap; | |
457 | ||
458 | /* Account for stack randomization if necessary */ | |
459 | if (current->flags & PF_RANDOMIZE) | |
460 | pad += (STACK_RND_MASK << PAGE_SHIFT); | |
461 | ||
462 | /* Values close to RLIM_INFINITY can overflow. */ | |
463 | if (gap + pad > gap) | |
464 | gap += pad; | |
465 | ||
466 | if (gap < MIN_GAP) | |
467 | gap = MIN_GAP; | |
468 | else if (gap > MAX_GAP) | |
469 | gap = MAX_GAP; | |
470 | ||
471 | return PAGE_ALIGN(STACK_TOP - gap - rnd); | |
5f74f820 | 472 | #endif |
67f3977f AG |
473 | } |
474 | ||
475 | void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) | |
476 | { | |
477 | unsigned long random_factor = 0UL; | |
478 | ||
479 | if (current->flags & PF_RANDOMIZE) | |
480 | random_factor = arch_mmap_rnd(); | |
481 | ||
482 | if (mmap_is_legacy(rlim_stack)) { | |
483 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | |
529ce23a | 484 | clear_bit(MMF_TOPDOWN, &mm->flags); |
67f3977f AG |
485 | } else { |
486 | mm->mmap_base = mmap_base(random_factor, rlim_stack); | |
529ce23a | 487 | set_bit(MMF_TOPDOWN, &mm->flags); |
67f3977f AG |
488 | } |
489 | } | |
490 | #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) | |
8f2af155 | 491 | void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) |
16d69265 AM |
492 | { |
493 | mm->mmap_base = TASK_UNMAPPED_BASE; | |
529ce23a | 494 | clear_bit(MMF_TOPDOWN, &mm->flags); |
16d69265 AM |
495 | } |
496 | #endif | |
4d6cf248 | 497 | #ifdef CONFIG_MMU |
51104c19 | 498 | EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout); |
4d6cf248 | 499 | #endif |
912985dc | 500 | |
79eb597c DJ |
501 | /** |
502 | * __account_locked_vm - account locked pages to an mm's locked_vm | |
503 | * @mm: mm to account against | |
504 | * @pages: number of pages to account | |
505 | * @inc: %true if @pages should be considered positive, %false if not | |
506 | * @task: task used to check RLIMIT_MEMLOCK | |
507 | * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped | |
508 | * | |
509 | * Assumes @task and @mm are valid (i.e. at least one reference on each), and | |
c1e8d7c6 | 510 | * that mmap_lock is held as writer. |
79eb597c DJ |
511 | * |
512 | * Return: | |
513 | * * 0 on success | |
514 | * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. | |
515 | */ | |
516 | int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, | |
517 | struct task_struct *task, bool bypass_rlim) | |
518 | { | |
519 | unsigned long locked_vm, limit; | |
520 | int ret = 0; | |
521 | ||
42fc5414 | 522 | mmap_assert_write_locked(mm); |
79eb597c DJ |
523 | |
524 | locked_vm = mm->locked_vm; | |
525 | if (inc) { | |
526 | if (!bypass_rlim) { | |
527 | limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; | |
528 | if (locked_vm + pages > limit) | |
529 | ret = -ENOMEM; | |
530 | } | |
531 | if (!ret) | |
532 | mm->locked_vm = locked_vm + pages; | |
533 | } else { | |
534 | WARN_ON_ONCE(pages > locked_vm); | |
535 | mm->locked_vm = locked_vm - pages; | |
536 | } | |
537 | ||
538 | pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, | |
539 | (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, | |
540 | locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), | |
541 | ret ? " - exceeded" : ""); | |
542 | ||
543 | return ret; | |
544 | } | |
545 | EXPORT_SYMBOL_GPL(__account_locked_vm); | |
546 | ||
547 | /** | |
548 | * account_locked_vm - account locked pages to an mm's locked_vm | |
549 | * @mm: mm to account against, may be NULL | |
550 | * @pages: number of pages to account | |
551 | * @inc: %true if @pages should be considered positive, %false if not | |
552 | * | |
553 | * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). | |
554 | * | |
555 | * Return: | |
556 | * * 0 on success, or if mm is NULL | |
557 | * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. | |
558 | */ | |
559 | int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) | |
560 | { | |
561 | int ret; | |
562 | ||
563 | if (pages == 0 || !mm) | |
564 | return 0; | |
565 | ||
d8ed45c5 | 566 | mmap_write_lock(mm); |
79eb597c DJ |
567 | ret = __account_locked_vm(mm, pages, inc, current, |
568 | capable(CAP_IPC_LOCK)); | |
d8ed45c5 | 569 | mmap_write_unlock(mm); |
79eb597c DJ |
570 | |
571 | return ret; | |
572 | } | |
573 | EXPORT_SYMBOL_GPL(account_locked_vm); | |
574 | ||
eb36c587 AV |
575 | unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, |
576 | unsigned long len, unsigned long prot, | |
9fbeb5ab | 577 | unsigned long flag, unsigned long pgoff) |
eb36c587 AV |
578 | { |
579 | unsigned long ret; | |
580 | struct mm_struct *mm = current->mm; | |
41badc15 | 581 | unsigned long populate; |
897ab3e0 | 582 | LIST_HEAD(uf); |
eb36c587 AV |
583 | |
584 | ret = security_mmap_file(file, prot, flag); | |
585 | if (!ret) { | |
d8ed45c5 | 586 | if (mmap_write_lock_killable(mm)) |
9fbeb5ab | 587 | return -EINTR; |
592b5fad | 588 | ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate, |
45e55300 | 589 | &uf); |
d8ed45c5 | 590 | mmap_write_unlock(mm); |
897ab3e0 | 591 | userfaultfd_unmap_complete(mm, &uf); |
41badc15 ML |
592 | if (populate) |
593 | mm_populate(ret, populate); | |
eb36c587 AV |
594 | } |
595 | return ret; | |
596 | } | |
597 | ||
598 | unsigned long vm_mmap(struct file *file, unsigned long addr, | |
599 | unsigned long len, unsigned long prot, | |
600 | unsigned long flag, unsigned long offset) | |
601 | { | |
602 | if (unlikely(offset + PAGE_ALIGN(len) < offset)) | |
603 | return -EINVAL; | |
ea53cde0 | 604 | if (unlikely(offset_in_page(offset))) |
eb36c587 AV |
605 | return -EINVAL; |
606 | ||
9fbeb5ab | 607 | return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); |
eb36c587 AV |
608 | } |
609 | EXPORT_SYMBOL(vm_mmap); | |
610 | ||
a7c3e901 | 611 | /** |
2e8000b8 | 612 | * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon |
a7c3e901 MH |
613 | * failure, fall back to non-contiguous (vmalloc) allocation. |
614 | * @size: size of the request. | |
2e8000b8 | 615 | * @b: which set of kmalloc buckets to allocate from. |
a7c3e901 MH |
616 | * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. |
617 | * @node: numa node to allocate from | |
618 | * | |
619 | * Uses kmalloc to get the memory but if the allocation fails then falls back | |
620 | * to the vmalloc allocator. Use kvfree for freeing the memory. | |
621 | * | |
a421ef30 | 622 | * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier. |
cc965a29 MH |
623 | * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is |
624 | * preferable to the vmalloc fallback, due to visible performance drawbacks. | |
a7c3e901 | 625 | * |
a862f68a | 626 | * Return: pointer to the allocated memory of %NULL in case of failure |
a7c3e901 | 627 | */ |
2e8000b8 | 628 | void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) |
a7c3e901 MH |
629 | { |
630 | gfp_t kmalloc_flags = flags; | |
631 | void *ret; | |
632 | ||
a7c3e901 | 633 | /* |
4f4f2ba9 MH |
634 | * We want to attempt a large physically contiguous block first because |
635 | * it is less likely to fragment multiple larger blocks and therefore | |
636 | * contribute to a long term fragmentation less than vmalloc fallback. | |
637 | * However make sure that larger requests are not too disruptive - no | |
638 | * OOM killer and no allocation failure warnings as we have a fallback. | |
a7c3e901 | 639 | */ |
6c5ab651 MH |
640 | if (size > PAGE_SIZE) { |
641 | kmalloc_flags |= __GFP_NOWARN; | |
642 | ||
cc965a29 | 643 | if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) |
6c5ab651 | 644 | kmalloc_flags |= __GFP_NORETRY; |
a421ef30 MH |
645 | |
646 | /* nofail semantic is implemented by the vmalloc fallback */ | |
647 | kmalloc_flags &= ~__GFP_NOFAIL; | |
6c5ab651 | 648 | } |
a7c3e901 | 649 | |
2e8000b8 | 650 | ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b), kmalloc_flags, node); |
a7c3e901 MH |
651 | |
652 | /* | |
653 | * It doesn't really make sense to fallback to vmalloc for sub page | |
654 | * requests | |
655 | */ | |
656 | if (ret || size <= PAGE_SIZE) | |
657 | return ret; | |
658 | ||
30c19366 FW |
659 | /* non-sleeping allocations are not supported by vmalloc */ |
660 | if (!gfpflags_allow_blocking(flags)) | |
661 | return NULL; | |
662 | ||
7661809d | 663 | /* Don't even allow crazy sizes */ |
0708a0af DB |
664 | if (unlikely(size > INT_MAX)) { |
665 | WARN_ON_ONCE(!(flags & __GFP_NOWARN)); | |
7661809d | 666 | return NULL; |
0708a0af | 667 | } |
7661809d | 668 | |
9becb688 LT |
669 | /* |
670 | * kvmalloc() can always use VM_ALLOW_HUGE_VMAP, | |
671 | * since the callers already cannot assume anything | |
672 | * about the resulting pointer, and cannot play | |
673 | * protection games. | |
674 | */ | |
88ae5fb7 | 675 | return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END, |
9becb688 LT |
676 | flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, |
677 | node, __builtin_return_address(0)); | |
a7c3e901 | 678 | } |
2e8000b8 | 679 | EXPORT_SYMBOL(__kvmalloc_node_noprof); |
a7c3e901 | 680 | |
ff4dc772 | 681 | /** |
04b8e946 AM |
682 | * kvfree() - Free memory. |
683 | * @addr: Pointer to allocated memory. | |
ff4dc772 | 684 | * |
04b8e946 AM |
685 | * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). |
686 | * It is slightly more efficient to use kfree() or vfree() if you are certain | |
687 | * that you know which one to use. | |
688 | * | |
52414d33 | 689 | * Context: Either preemptible task context or not-NMI interrupt. |
ff4dc772 | 690 | */ |
39f1f78d AV |
691 | void kvfree(const void *addr) |
692 | { | |
693 | if (is_vmalloc_addr(addr)) | |
694 | vfree(addr); | |
695 | else | |
696 | kfree(addr); | |
697 | } | |
698 | EXPORT_SYMBOL(kvfree); | |
699 | ||
d4eaa283 WL |
700 | /** |
701 | * kvfree_sensitive - Free a data object containing sensitive information. | |
702 | * @addr: address of the data object to be freed. | |
703 | * @len: length of the data object. | |
704 | * | |
705 | * Use the special memzero_explicit() function to clear the content of a | |
706 | * kvmalloc'ed object containing sensitive data to make sure that the | |
707 | * compiler won't optimize out the data clearing. | |
708 | */ | |
709 | void kvfree_sensitive(const void *addr, size_t len) | |
710 | { | |
711 | if (likely(!ZERO_OR_NULL_PTR(addr))) { | |
712 | memzero_explicit((void *)addr, len); | |
713 | kvfree(addr); | |
714 | } | |
715 | } | |
716 | EXPORT_SYMBOL(kvfree_sensitive); | |
717 | ||
7bd230a2 | 718 | void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags) |
de2860f4 DC |
719 | { |
720 | void *newp; | |
721 | ||
722 | if (oldsize >= newsize) | |
723 | return (void *)p; | |
94159835 | 724 | newp = kvmalloc_noprof(newsize, flags); |
de2860f4 DC |
725 | if (!newp) |
726 | return NULL; | |
727 | memcpy(newp, p, oldsize); | |
728 | kvfree(p); | |
729 | return newp; | |
730 | } | |
7bd230a2 | 731 | EXPORT_SYMBOL(kvrealloc_noprof); |
de2860f4 | 732 | |
a8749a35 PB |
733 | /** |
734 | * __vmalloc_array - allocate memory for a virtually contiguous array. | |
735 | * @n: number of elements. | |
736 | * @size: element size. | |
737 | * @flags: the type of memory to allocate (see kmalloc). | |
738 | */ | |
88ae5fb7 | 739 | void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) |
a8749a35 PB |
740 | { |
741 | size_t bytes; | |
742 | ||
743 | if (unlikely(check_mul_overflow(n, size, &bytes))) | |
744 | return NULL; | |
94159835 | 745 | return __vmalloc_noprof(bytes, flags); |
a8749a35 | 746 | } |
88ae5fb7 | 747 | EXPORT_SYMBOL(__vmalloc_array_noprof); |
a8749a35 PB |
748 | |
749 | /** | |
750 | * vmalloc_array - allocate memory for a virtually contiguous array. | |
751 | * @n: number of elements. | |
752 | * @size: element size. | |
753 | */ | |
88ae5fb7 | 754 | void *vmalloc_array_noprof(size_t n, size_t size) |
a8749a35 | 755 | { |
94159835 | 756 | return __vmalloc_array_noprof(n, size, GFP_KERNEL); |
a8749a35 | 757 | } |
88ae5fb7 | 758 | EXPORT_SYMBOL(vmalloc_array_noprof); |
a8749a35 PB |
759 | |
760 | /** | |
761 | * __vcalloc - allocate and zero memory for a virtually contiguous array. | |
762 | * @n: number of elements. | |
763 | * @size: element size. | |
764 | * @flags: the type of memory to allocate (see kmalloc). | |
765 | */ | |
88ae5fb7 | 766 | void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) |
a8749a35 | 767 | { |
94159835 | 768 | return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO); |
a8749a35 | 769 | } |
88ae5fb7 | 770 | EXPORT_SYMBOL(__vcalloc_noprof); |
a8749a35 PB |
771 | |
772 | /** | |
773 | * vcalloc - allocate and zero memory for a virtually contiguous array. | |
774 | * @n: number of elements. | |
775 | * @size: element size. | |
776 | */ | |
88ae5fb7 | 777 | void *vcalloc_noprof(size_t n, size_t size) |
a8749a35 | 778 | { |
94159835 | 779 | return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO); |
a8749a35 | 780 | } |
88ae5fb7 | 781 | EXPORT_SYMBOL(vcalloc_noprof); |
a8749a35 | 782 | |
e05b3453 | 783 | struct anon_vma *folio_anon_vma(struct folio *folio) |
e39155ea | 784 | { |
64601000 | 785 | unsigned long mapping = (unsigned long)folio->mapping; |
e39155ea | 786 | |
e39155ea KS |
787 | if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) |
788 | return NULL; | |
64601000 | 789 | return (void *)(mapping - PAGE_MAPPING_ANON); |
e39155ea KS |
790 | } |
791 | ||
2f52578f MWO |
792 | /** |
793 | * folio_mapping - Find the mapping where this folio is stored. | |
794 | * @folio: The folio. | |
795 | * | |
796 | * For folios which are in the page cache, return the mapping that this | |
797 | * page belongs to. Folios in the swap cache return the swap mapping | |
798 | * this page is stored in (which is different from the mapping for the | |
799 | * swap file or swap device where the data is stored). | |
800 | * | |
801 | * You can call this for folios which aren't in the swap cache or page | |
802 | * cache and it will return NULL. | |
803 | */ | |
804 | struct address_space *folio_mapping(struct folio *folio) | |
9800339b | 805 | { |
1c290f64 KS |
806 | struct address_space *mapping; |
807 | ||
03e5ac2f | 808 | /* This happens if someone calls flush_dcache_page on slab page */ |
2f52578f | 809 | if (unlikely(folio_test_slab(folio))) |
03e5ac2f MP |
810 | return NULL; |
811 | ||
2f52578f | 812 | if (unlikely(folio_test_swapcache(folio))) |
3d2c9087 | 813 | return swap_address_space(folio->swap); |
e39155ea | 814 | |
2f52578f | 815 | mapping = folio->mapping; |
68f2736a | 816 | if ((unsigned long)mapping & PAGE_MAPPING_FLAGS) |
e39155ea | 817 | return NULL; |
bda807d4 | 818 | |
68f2736a | 819 | return mapping; |
9800339b | 820 | } |
2f52578f | 821 | EXPORT_SYMBOL(folio_mapping); |
9800339b | 822 | |
715cbfd6 MWO |
823 | /** |
824 | * folio_copy - Copy the contents of one folio to another. | |
825 | * @dst: Folio to copy to. | |
826 | * @src: Folio to copy from. | |
827 | * | |
828 | * The bytes in the folio represented by @src are copied to @dst. | |
829 | * Assumes the caller has validated that @dst is at least as large as @src. | |
830 | * Can be called in atomic context for order-0 folios, but if the folio is | |
831 | * larger, it may sleep. | |
832 | */ | |
833 | void folio_copy(struct folio *dst, struct folio *src) | |
79789db0 | 834 | { |
715cbfd6 MWO |
835 | long i = 0; |
836 | long nr = folio_nr_pages(src); | |
79789db0 | 837 | |
715cbfd6 MWO |
838 | for (;;) { |
839 | copy_highpage(folio_page(dst, i), folio_page(src, i)); | |
840 | if (++i == nr) | |
841 | break; | |
79789db0 | 842 | cond_resched(); |
79789db0 MWO |
843 | } |
844 | } | |
4093602d | 845 | EXPORT_SYMBOL(folio_copy); |
79789db0 | 846 | |
02f4ee5a KW |
847 | int folio_mc_copy(struct folio *dst, struct folio *src) |
848 | { | |
849 | long nr = folio_nr_pages(src); | |
850 | long i = 0; | |
851 | ||
852 | for (;;) { | |
853 | if (copy_mc_highpage(folio_page(dst, i), folio_page(src, i))) | |
854 | return -EHWPOISON; | |
855 | if (++i == nr) | |
856 | break; | |
857 | cond_resched(); | |
858 | } | |
859 | ||
860 | return 0; | |
861 | } | |
862 | EXPORT_SYMBOL(folio_mc_copy); | |
863 | ||
39a1aa8e AR |
864 | int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; |
865 | int sysctl_overcommit_ratio __read_mostly = 50; | |
866 | unsigned long sysctl_overcommit_kbytes __read_mostly; | |
867 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; | |
868 | unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ | |
869 | unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ | |
870 | ||
78eb4ea2 | 871 | int overcommit_ratio_handler(const struct ctl_table *table, int write, void *buffer, |
32927393 | 872 | size_t *lenp, loff_t *ppos) |
49f0ce5f JM |
873 | { |
874 | int ret; | |
875 | ||
876 | ret = proc_dointvec(table, write, buffer, lenp, ppos); | |
877 | if (ret == 0 && write) | |
878 | sysctl_overcommit_kbytes = 0; | |
879 | return ret; | |
880 | } | |
881 | ||
56f3547b FT |
882 | static void sync_overcommit_as(struct work_struct *dummy) |
883 | { | |
884 | percpu_counter_sync(&vm_committed_as); | |
885 | } | |
886 | ||
78eb4ea2 | 887 | int overcommit_policy_handler(const struct ctl_table *table, int write, void *buffer, |
56f3547b FT |
888 | size_t *lenp, loff_t *ppos) |
889 | { | |
890 | struct ctl_table t; | |
bcbda810 | 891 | int new_policy = -1; |
56f3547b FT |
892 | int ret; |
893 | ||
894 | /* | |
895 | * The deviation of sync_overcommit_as could be big with loose policy | |
896 | * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to | |
897 | * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply | |
31454980 | 898 | * with the strict "NEVER", and to avoid possible race condition (even |
56f3547b FT |
899 | * though user usually won't too frequently do the switching to policy |
900 | * OVERCOMMIT_NEVER), the switch is done in the following order: | |
901 | * 1. changing the batch | |
902 | * 2. sync percpu count on each CPU | |
903 | * 3. switch the policy | |
904 | */ | |
905 | if (write) { | |
906 | t = *table; | |
907 | t.data = &new_policy; | |
908 | ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); | |
bcbda810 | 909 | if (ret || new_policy == -1) |
56f3547b FT |
910 | return ret; |
911 | ||
912 | mm_compute_batch(new_policy); | |
913 | if (new_policy == OVERCOMMIT_NEVER) | |
914 | schedule_on_each_cpu(sync_overcommit_as); | |
915 | sysctl_overcommit_memory = new_policy; | |
916 | } else { | |
917 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | |
918 | } | |
919 | ||
920 | return ret; | |
921 | } | |
922 | ||
78eb4ea2 | 923 | int overcommit_kbytes_handler(const struct ctl_table *table, int write, void *buffer, |
32927393 | 924 | size_t *lenp, loff_t *ppos) |
49f0ce5f JM |
925 | { |
926 | int ret; | |
927 | ||
928 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | |
929 | if (ret == 0 && write) | |
930 | sysctl_overcommit_ratio = 0; | |
931 | return ret; | |
932 | } | |
933 | ||
00619bcc JM |
934 | /* |
935 | * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used | |
936 | */ | |
937 | unsigned long vm_commit_limit(void) | |
938 | { | |
49f0ce5f JM |
939 | unsigned long allowed; |
940 | ||
941 | if (sysctl_overcommit_kbytes) | |
942 | allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); | |
943 | else | |
ca79b0c2 | 944 | allowed = ((totalram_pages() - hugetlb_total_pages()) |
49f0ce5f JM |
945 | * sysctl_overcommit_ratio / 100); |
946 | allowed += total_swap_pages; | |
947 | ||
948 | return allowed; | |
00619bcc JM |
949 | } |
950 | ||
39a1aa8e AR |
951 | /* |
952 | * Make sure vm_committed_as in one cacheline and not cacheline shared with | |
953 | * other variables. It can be updated by several CPUs frequently. | |
954 | */ | |
955 | struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; | |
956 | ||
957 | /* | |
958 | * The global memory commitment made in the system can be a metric | |
959 | * that can be used to drive ballooning decisions when Linux is hosted | |
960 | * as a guest. On Hyper-V, the host implements a policy engine for dynamically | |
961 | * balancing memory across competing virtual machines that are hosted. | |
962 | * Several metrics drive this policy engine including the guest reported | |
963 | * memory commitment. | |
4e2ee51e FT |
964 | * |
965 | * The time cost of this is very low for small platforms, and for big | |
966 | * platform like a 2S/36C/72T Skylake server, in worst case where | |
967 | * vm_committed_as's spinlock is under severe contention, the time cost | |
968 | * could be about 30~40 microseconds. | |
39a1aa8e AR |
969 | */ |
970 | unsigned long vm_memory_committed(void) | |
971 | { | |
4e2ee51e | 972 | return percpu_counter_sum_positive(&vm_committed_as); |
39a1aa8e AR |
973 | } |
974 | EXPORT_SYMBOL_GPL(vm_memory_committed); | |
975 | ||
976 | /* | |
977 | * Check that a process has enough memory to allocate a new virtual | |
978 | * mapping. 0 means there is enough memory for the allocation to | |
979 | * succeed and -ENOMEM implies there is not. | |
980 | * | |
981 | * We currently support three overcommit policies, which are set via the | |
ee65728e | 982 | * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst |
39a1aa8e AR |
983 | * |
984 | * Strict overcommit modes added 2002 Feb 26 by Alan Cox. | |
985 | * Additional code 2002 Jul 20 by Robert Love. | |
986 | * | |
987 | * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. | |
988 | * | |
989 | * Note this is a helper function intended to be used by LSMs which | |
990 | * wish to use this logic. | |
991 | */ | |
992 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | |
993 | { | |
8c7829b0 | 994 | long allowed; |
f5eec036 | 995 | unsigned long bytes_failed; |
39a1aa8e | 996 | |
39a1aa8e AR |
997 | vm_acct_memory(pages); |
998 | ||
999 | /* | |
1000 | * Sometimes we want to use more memory than we have | |
1001 | */ | |
1002 | if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) | |
1003 | return 0; | |
1004 | ||
1005 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { | |
8c7829b0 | 1006 | if (pages > totalram_pages() + total_swap_pages) |
39a1aa8e | 1007 | goto error; |
8c7829b0 | 1008 | return 0; |
39a1aa8e AR |
1009 | } |
1010 | ||
1011 | allowed = vm_commit_limit(); | |
1012 | /* | |
1013 | * Reserve some for root | |
1014 | */ | |
1015 | if (!cap_sys_admin) | |
1016 | allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); | |
1017 | ||
1018 | /* | |
1019 | * Don't let a single process grow so big a user can't recover | |
1020 | */ | |
1021 | if (mm) { | |
8c7829b0 JW |
1022 | long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); |
1023 | ||
39a1aa8e AR |
1024 | allowed -= min_t(long, mm->total_vm / 32, reserve); |
1025 | } | |
1026 | ||
1027 | if (percpu_counter_read_positive(&vm_committed_as) < allowed) | |
1028 | return 0; | |
1029 | error: | |
f5eec036 MC |
1030 | bytes_failed = pages << PAGE_SHIFT; |
1031 | pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n", | |
1032 | __func__, current->pid, current->comm, bytes_failed); | |
39a1aa8e AR |
1033 | vm_unacct_memory(pages); |
1034 | ||
1035 | return -ENOMEM; | |
1036 | } | |
1037 | ||
a9090253 WR |
1038 | /** |
1039 | * get_cmdline() - copy the cmdline value to a buffer. | |
1040 | * @task: the task whose cmdline value to copy. | |
1041 | * @buffer: the buffer to copy to. | |
1042 | * @buflen: the length of the buffer. Larger cmdline values are truncated | |
1043 | * to this length. | |
a862f68a MR |
1044 | * |
1045 | * Return: the size of the cmdline field copied. Note that the copy does | |
a9090253 WR |
1046 | * not guarantee an ending NULL byte. |
1047 | */ | |
1048 | int get_cmdline(struct task_struct *task, char *buffer, int buflen) | |
1049 | { | |
1050 | int res = 0; | |
1051 | unsigned int len; | |
1052 | struct mm_struct *mm = get_task_mm(task); | |
a3b609ef | 1053 | unsigned long arg_start, arg_end, env_start, env_end; |
a9090253 WR |
1054 | if (!mm) |
1055 | goto out; | |
1056 | if (!mm->arg_end) | |
1057 | goto out_mm; /* Shh! No looking before we're done */ | |
1058 | ||
bc81426f | 1059 | spin_lock(&mm->arg_lock); |
a3b609ef MG |
1060 | arg_start = mm->arg_start; |
1061 | arg_end = mm->arg_end; | |
1062 | env_start = mm->env_start; | |
1063 | env_end = mm->env_end; | |
bc81426f | 1064 | spin_unlock(&mm->arg_lock); |
a3b609ef MG |
1065 | |
1066 | len = arg_end - arg_start; | |
a9090253 WR |
1067 | |
1068 | if (len > buflen) | |
1069 | len = buflen; | |
1070 | ||
f307ab6d | 1071 | res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); |
a9090253 WR |
1072 | |
1073 | /* | |
1074 | * If the nul at the end of args has been overwritten, then | |
1075 | * assume application is using setproctitle(3). | |
1076 | */ | |
1077 | if (res > 0 && buffer[res-1] != '\0' && len < buflen) { | |
1078 | len = strnlen(buffer, res); | |
1079 | if (len < res) { | |
1080 | res = len; | |
1081 | } else { | |
a3b609ef | 1082 | len = env_end - env_start; |
a9090253 WR |
1083 | if (len > buflen - res) |
1084 | len = buflen - res; | |
a3b609ef | 1085 | res += access_process_vm(task, env_start, |
f307ab6d LS |
1086 | buffer+res, len, |
1087 | FOLL_FORCE); | |
a9090253 WR |
1088 | res = strnlen(buffer, res); |
1089 | } | |
1090 | } | |
1091 | out_mm: | |
1092 | mmput(mm); | |
1093 | out: | |
1094 | return res; | |
1095 | } | |
010c164a | 1096 | |
4d1a8a2d | 1097 | int __weak memcmp_pages(struct page *page1, struct page *page2) |
010c164a SL |
1098 | { |
1099 | char *addr1, *addr2; | |
1100 | int ret; | |
1101 | ||
2f753762 FDF |
1102 | addr1 = kmap_local_page(page1); |
1103 | addr2 = kmap_local_page(page2); | |
010c164a | 1104 | ret = memcmp(addr1, addr2, PAGE_SIZE); |
2f753762 FDF |
1105 | kunmap_local(addr2); |
1106 | kunmap_local(addr1); | |
010c164a SL |
1107 | return ret; |
1108 | } | |
8e7f37f2 | 1109 | |
5bb1bb35 | 1110 | #ifdef CONFIG_PRINTK |
8e7f37f2 PM |
1111 | /** |
1112 | * mem_dump_obj - Print available provenance information | |
1113 | * @object: object for which to find provenance information. | |
1114 | * | |
1115 | * This function uses pr_cont(), so that the caller is expected to have | |
1116 | * printed out whatever preamble is appropriate. The provenance information | |
1117 | * depends on the type of object and on how much debugging is enabled. | |
1118 | * For example, for a slab-cache object, the slab name is printed, and, | |
1119 | * if available, the return address and stack trace from the allocation | |
e548eaa1 | 1120 | * and last free path of that object. |
8e7f37f2 PM |
1121 | */ |
1122 | void mem_dump_obj(void *object) | |
1123 | { | |
2521781c JP |
1124 | const char *type; |
1125 | ||
6e284c55 | 1126 | if (kmem_dump_obj(object)) |
98f18083 | 1127 | return; |
2521781c | 1128 | |
98f18083 PM |
1129 | if (vmalloc_dump_obj(object)) |
1130 | return; | |
2521781c | 1131 | |
c83ad36a Z |
1132 | if (is_vmalloc_addr(object)) |
1133 | type = "vmalloc memory"; | |
1134 | else if (virt_addr_valid(object)) | |
2521781c JP |
1135 | type = "non-slab/vmalloc memory"; |
1136 | else if (object == NULL) | |
1137 | type = "NULL pointer"; | |
1138 | else if (object == ZERO_SIZE_PTR) | |
1139 | type = "zero-size pointer"; | |
1140 | else | |
1141 | type = "non-paged memory"; | |
1142 | ||
1143 | pr_cont(" %s\n", type); | |
8e7f37f2 | 1144 | } |
0d3dd2c8 | 1145 | EXPORT_SYMBOL_GPL(mem_dump_obj); |
5bb1bb35 | 1146 | #endif |
82840451 DH |
1147 | |
1148 | /* | |
1149 | * A driver might set a page logically offline -- PageOffline() -- and | |
1150 | * turn the page inaccessible in the hypervisor; after that, access to page | |
1151 | * content can be fatal. | |
1152 | * | |
1153 | * Some special PFN walkers -- i.e., /proc/kcore -- read content of random | |
1154 | * pages after checking PageOffline(); however, these PFN walkers can race | |
1155 | * with drivers that set PageOffline(). | |
1156 | * | |
1157 | * page_offline_freeze()/page_offline_thaw() allows for a subsystem to | |
1158 | * synchronize with such drivers, achieving that a page cannot be set | |
1159 | * PageOffline() while frozen. | |
1160 | * | |
1161 | * page_offline_begin()/page_offline_end() is used by drivers that care about | |
1162 | * such races when setting a page PageOffline(). | |
1163 | */ | |
1164 | static DECLARE_RWSEM(page_offline_rwsem); | |
1165 | ||
1166 | void page_offline_freeze(void) | |
1167 | { | |
1168 | down_read(&page_offline_rwsem); | |
1169 | } | |
1170 | ||
1171 | void page_offline_thaw(void) | |
1172 | { | |
1173 | up_read(&page_offline_rwsem); | |
1174 | } | |
1175 | ||
1176 | void page_offline_begin(void) | |
1177 | { | |
1178 | down_write(&page_offline_rwsem); | |
1179 | } | |
1180 | EXPORT_SYMBOL(page_offline_begin); | |
1181 | ||
1182 | void page_offline_end(void) | |
1183 | { | |
1184 | up_write(&page_offline_rwsem); | |
1185 | } | |
1186 | EXPORT_SYMBOL(page_offline_end); | |
08b0b005 | 1187 | |
29d26f12 | 1188 | #ifndef flush_dcache_folio |
08b0b005 MWO |
1189 | void flush_dcache_folio(struct folio *folio) |
1190 | { | |
1191 | long i, nr = folio_nr_pages(folio); | |
1192 | ||
1193 | for (i = 0; i < nr; i++) | |
1194 | flush_dcache_page(folio_page(folio, i)); | |
1195 | } | |
1196 | EXPORT_SYMBOL(flush_dcache_folio); | |
1197 | #endif |