]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
16d69265 | 2 | #include <linux/mm.h> |
30992c97 MM |
3 | #include <linux/slab.h> |
4 | #include <linux/string.h> | |
3b32123d | 5 | #include <linux/compiler.h> |
b95f1b31 | 6 | #include <linux/export.h> |
96840aa0 | 7 | #include <linux/err.h> |
3b8f14b4 | 8 | #include <linux/sched.h> |
6e84f315 | 9 | #include <linux/sched/mm.h> |
79eb597c | 10 | #include <linux/sched/signal.h> |
68db0cf1 | 11 | #include <linux/sched/task_stack.h> |
eb36c587 | 12 | #include <linux/security.h> |
9800339b | 13 | #include <linux/swap.h> |
33806f06 | 14 | #include <linux/swapops.h> |
00619bcc JM |
15 | #include <linux/mman.h> |
16 | #include <linux/hugetlb.h> | |
39f1f78d | 17 | #include <linux/vmalloc.h> |
897ab3e0 | 18 | #include <linux/userfaultfd_k.h> |
00619bcc | 19 | |
7c0f6ba6 | 20 | #include <linux/uaccess.h> |
30992c97 | 21 | |
6038def0 NK |
22 | #include "internal.h" |
23 | ||
a4bb1e43 AH |
24 | /** |
25 | * kfree_const - conditionally free memory | |
26 | * @x: pointer to the memory | |
27 | * | |
28 | * Function calls kfree only if @x is not in .rodata section. | |
29 | */ | |
30 | void kfree_const(const void *x) | |
31 | { | |
32 | if (!is_kernel_rodata((unsigned long)x)) | |
33 | kfree(x); | |
34 | } | |
35 | EXPORT_SYMBOL(kfree_const); | |
36 | ||
30992c97 | 37 | /** |
30992c97 | 38 | * kstrdup - allocate space for and copy an existing string |
30992c97 MM |
39 | * @s: the string to duplicate |
40 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
a862f68a MR |
41 | * |
42 | * Return: newly allocated copy of @s or %NULL in case of error | |
30992c97 MM |
43 | */ |
44 | char *kstrdup(const char *s, gfp_t gfp) | |
45 | { | |
46 | size_t len; | |
47 | char *buf; | |
48 | ||
49 | if (!s) | |
50 | return NULL; | |
51 | ||
52 | len = strlen(s) + 1; | |
1d2c8eea | 53 | buf = kmalloc_track_caller(len, gfp); |
30992c97 MM |
54 | if (buf) |
55 | memcpy(buf, s, len); | |
56 | return buf; | |
57 | } | |
58 | EXPORT_SYMBOL(kstrdup); | |
96840aa0 | 59 | |
a4bb1e43 AH |
60 | /** |
61 | * kstrdup_const - conditionally duplicate an existing const string | |
62 | * @s: the string to duplicate | |
63 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
64 | * | |
a862f68a MR |
65 | * Note: Strings allocated by kstrdup_const should be freed by kfree_const. |
66 | * | |
67 | * Return: source string if it is in .rodata section otherwise | |
68 | * fallback to kstrdup. | |
a4bb1e43 AH |
69 | */ |
70 | const char *kstrdup_const(const char *s, gfp_t gfp) | |
71 | { | |
72 | if (is_kernel_rodata((unsigned long)s)) | |
73 | return s; | |
74 | ||
75 | return kstrdup(s, gfp); | |
76 | } | |
77 | EXPORT_SYMBOL(kstrdup_const); | |
78 | ||
1e66df3e JF |
79 | /** |
80 | * kstrndup - allocate space for and copy an existing string | |
81 | * @s: the string to duplicate | |
82 | * @max: read at most @max chars from @s | |
83 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
f3515741 DH |
84 | * |
85 | * Note: Use kmemdup_nul() instead if the size is known exactly. | |
a862f68a MR |
86 | * |
87 | * Return: newly allocated copy of @s or %NULL in case of error | |
1e66df3e JF |
88 | */ |
89 | char *kstrndup(const char *s, size_t max, gfp_t gfp) | |
90 | { | |
91 | size_t len; | |
92 | char *buf; | |
93 | ||
94 | if (!s) | |
95 | return NULL; | |
96 | ||
97 | len = strnlen(s, max); | |
98 | buf = kmalloc_track_caller(len+1, gfp); | |
99 | if (buf) { | |
100 | memcpy(buf, s, len); | |
101 | buf[len] = '\0'; | |
102 | } | |
103 | return buf; | |
104 | } | |
105 | EXPORT_SYMBOL(kstrndup); | |
106 | ||
1a2f67b4 AD |
107 | /** |
108 | * kmemdup - duplicate region of memory | |
109 | * | |
110 | * @src: memory region to duplicate | |
111 | * @len: memory region length | |
112 | * @gfp: GFP mask to use | |
a862f68a MR |
113 | * |
114 | * Return: newly allocated copy of @src or %NULL in case of error | |
1a2f67b4 AD |
115 | */ |
116 | void *kmemdup(const void *src, size_t len, gfp_t gfp) | |
117 | { | |
118 | void *p; | |
119 | ||
1d2c8eea | 120 | p = kmalloc_track_caller(len, gfp); |
1a2f67b4 AD |
121 | if (p) |
122 | memcpy(p, src, len); | |
123 | return p; | |
124 | } | |
125 | EXPORT_SYMBOL(kmemdup); | |
126 | ||
f3515741 DH |
127 | /** |
128 | * kmemdup_nul - Create a NUL-terminated string from unterminated data | |
129 | * @s: The data to stringify | |
130 | * @len: The size of the data | |
131 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory | |
a862f68a MR |
132 | * |
133 | * Return: newly allocated copy of @s with NUL-termination or %NULL in | |
134 | * case of error | |
f3515741 DH |
135 | */ |
136 | char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) | |
137 | { | |
138 | char *buf; | |
139 | ||
140 | if (!s) | |
141 | return NULL; | |
142 | ||
143 | buf = kmalloc_track_caller(len + 1, gfp); | |
144 | if (buf) { | |
145 | memcpy(buf, s, len); | |
146 | buf[len] = '\0'; | |
147 | } | |
148 | return buf; | |
149 | } | |
150 | EXPORT_SYMBOL(kmemdup_nul); | |
151 | ||
610a77e0 LZ |
152 | /** |
153 | * memdup_user - duplicate memory region from user space | |
154 | * | |
155 | * @src: source address in user space | |
156 | * @len: number of bytes to copy | |
157 | * | |
a862f68a | 158 | * Return: an ERR_PTR() on failure. Result is physically |
50fd2f29 | 159 | * contiguous, to be freed by kfree(). |
610a77e0 LZ |
160 | */ |
161 | void *memdup_user(const void __user *src, size_t len) | |
162 | { | |
163 | void *p; | |
164 | ||
6c8fcc09 | 165 | p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); |
610a77e0 LZ |
166 | if (!p) |
167 | return ERR_PTR(-ENOMEM); | |
168 | ||
169 | if (copy_from_user(p, src, len)) { | |
170 | kfree(p); | |
171 | return ERR_PTR(-EFAULT); | |
172 | } | |
173 | ||
174 | return p; | |
175 | } | |
176 | EXPORT_SYMBOL(memdup_user); | |
177 | ||
50fd2f29 AV |
178 | /** |
179 | * vmemdup_user - duplicate memory region from user space | |
180 | * | |
181 | * @src: source address in user space | |
182 | * @len: number of bytes to copy | |
183 | * | |
a862f68a | 184 | * Return: an ERR_PTR() on failure. Result may be not |
50fd2f29 AV |
185 | * physically contiguous. Use kvfree() to free. |
186 | */ | |
187 | void *vmemdup_user(const void __user *src, size_t len) | |
188 | { | |
189 | void *p; | |
190 | ||
191 | p = kvmalloc(len, GFP_USER); | |
192 | if (!p) | |
193 | return ERR_PTR(-ENOMEM); | |
194 | ||
195 | if (copy_from_user(p, src, len)) { | |
196 | kvfree(p); | |
197 | return ERR_PTR(-EFAULT); | |
198 | } | |
199 | ||
200 | return p; | |
201 | } | |
202 | EXPORT_SYMBOL(vmemdup_user); | |
203 | ||
b86181f1 | 204 | /** |
96840aa0 | 205 | * strndup_user - duplicate an existing string from user space |
96840aa0 DA |
206 | * @s: The string to duplicate |
207 | * @n: Maximum number of bytes to copy, including the trailing NUL. | |
a862f68a | 208 | * |
e9145521 | 209 | * Return: newly allocated copy of @s or an ERR_PTR() in case of error |
96840aa0 DA |
210 | */ |
211 | char *strndup_user(const char __user *s, long n) | |
212 | { | |
213 | char *p; | |
214 | long length; | |
215 | ||
216 | length = strnlen_user(s, n); | |
217 | ||
218 | if (!length) | |
219 | return ERR_PTR(-EFAULT); | |
220 | ||
221 | if (length > n) | |
222 | return ERR_PTR(-EINVAL); | |
223 | ||
90d74045 | 224 | p = memdup_user(s, length); |
96840aa0 | 225 | |
90d74045 JL |
226 | if (IS_ERR(p)) |
227 | return p; | |
96840aa0 DA |
228 | |
229 | p[length - 1] = '\0'; | |
230 | ||
231 | return p; | |
232 | } | |
233 | EXPORT_SYMBOL(strndup_user); | |
16d69265 | 234 | |
e9d408e1 AV |
235 | /** |
236 | * memdup_user_nul - duplicate memory region from user space and NUL-terminate | |
237 | * | |
238 | * @src: source address in user space | |
239 | * @len: number of bytes to copy | |
240 | * | |
a862f68a | 241 | * Return: an ERR_PTR() on failure. |
e9d408e1 AV |
242 | */ |
243 | void *memdup_user_nul(const void __user *src, size_t len) | |
244 | { | |
245 | char *p; | |
246 | ||
247 | /* | |
248 | * Always use GFP_KERNEL, since copy_from_user() can sleep and | |
249 | * cause pagefault, which makes it pointless to use GFP_NOFS | |
250 | * or GFP_ATOMIC. | |
251 | */ | |
252 | p = kmalloc_track_caller(len + 1, GFP_KERNEL); | |
253 | if (!p) | |
254 | return ERR_PTR(-ENOMEM); | |
255 | ||
256 | if (copy_from_user(p, src, len)) { | |
257 | kfree(p); | |
258 | return ERR_PTR(-EFAULT); | |
259 | } | |
260 | p[len] = '\0'; | |
261 | ||
262 | return p; | |
263 | } | |
264 | EXPORT_SYMBOL(memdup_user_nul); | |
265 | ||
6038def0 NK |
266 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
267 | struct vm_area_struct *prev, struct rb_node *rb_parent) | |
268 | { | |
269 | struct vm_area_struct *next; | |
270 | ||
271 | vma->vm_prev = prev; | |
272 | if (prev) { | |
273 | next = prev->vm_next; | |
274 | prev->vm_next = vma; | |
275 | } else { | |
276 | mm->mmap = vma; | |
277 | if (rb_parent) | |
278 | next = rb_entry(rb_parent, | |
279 | struct vm_area_struct, vm_rb); | |
280 | else | |
281 | next = NULL; | |
282 | } | |
283 | vma->vm_next = next; | |
284 | if (next) | |
285 | next->vm_prev = vma; | |
286 | } | |
287 | ||
b7643757 | 288 | /* Check if the vma is being used as a stack by this task */ |
d17af505 | 289 | int vma_is_stack_for_current(struct vm_area_struct *vma) |
b7643757 | 290 | { |
d17af505 AL |
291 | struct task_struct * __maybe_unused t = current; |
292 | ||
b7643757 SP |
293 | return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); |
294 | } | |
295 | ||
efc1a3b1 | 296 | #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) |
8f2af155 | 297 | void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) |
16d69265 AM |
298 | { |
299 | mm->mmap_base = TASK_UNMAPPED_BASE; | |
300 | mm->get_unmapped_area = arch_get_unmapped_area; | |
16d69265 AM |
301 | } |
302 | #endif | |
912985dc | 303 | |
79eb597c DJ |
304 | /** |
305 | * __account_locked_vm - account locked pages to an mm's locked_vm | |
306 | * @mm: mm to account against | |
307 | * @pages: number of pages to account | |
308 | * @inc: %true if @pages should be considered positive, %false if not | |
309 | * @task: task used to check RLIMIT_MEMLOCK | |
310 | * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped | |
311 | * | |
312 | * Assumes @task and @mm are valid (i.e. at least one reference on each), and | |
313 | * that mmap_sem is held as writer. | |
314 | * | |
315 | * Return: | |
316 | * * 0 on success | |
317 | * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. | |
318 | */ | |
319 | int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, | |
320 | struct task_struct *task, bool bypass_rlim) | |
321 | { | |
322 | unsigned long locked_vm, limit; | |
323 | int ret = 0; | |
324 | ||
325 | lockdep_assert_held_write(&mm->mmap_sem); | |
326 | ||
327 | locked_vm = mm->locked_vm; | |
328 | if (inc) { | |
329 | if (!bypass_rlim) { | |
330 | limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; | |
331 | if (locked_vm + pages > limit) | |
332 | ret = -ENOMEM; | |
333 | } | |
334 | if (!ret) | |
335 | mm->locked_vm = locked_vm + pages; | |
336 | } else { | |
337 | WARN_ON_ONCE(pages > locked_vm); | |
338 | mm->locked_vm = locked_vm - pages; | |
339 | } | |
340 | ||
341 | pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, | |
342 | (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, | |
343 | locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), | |
344 | ret ? " - exceeded" : ""); | |
345 | ||
346 | return ret; | |
347 | } | |
348 | EXPORT_SYMBOL_GPL(__account_locked_vm); | |
349 | ||
350 | /** | |
351 | * account_locked_vm - account locked pages to an mm's locked_vm | |
352 | * @mm: mm to account against, may be NULL | |
353 | * @pages: number of pages to account | |
354 | * @inc: %true if @pages should be considered positive, %false if not | |
355 | * | |
356 | * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). | |
357 | * | |
358 | * Return: | |
359 | * * 0 on success, or if mm is NULL | |
360 | * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. | |
361 | */ | |
362 | int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) | |
363 | { | |
364 | int ret; | |
365 | ||
366 | if (pages == 0 || !mm) | |
367 | return 0; | |
368 | ||
369 | down_write(&mm->mmap_sem); | |
370 | ret = __account_locked_vm(mm, pages, inc, current, | |
371 | capable(CAP_IPC_LOCK)); | |
372 | up_write(&mm->mmap_sem); | |
373 | ||
374 | return ret; | |
375 | } | |
376 | EXPORT_SYMBOL_GPL(account_locked_vm); | |
377 | ||
eb36c587 AV |
378 | unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, |
379 | unsigned long len, unsigned long prot, | |
9fbeb5ab | 380 | unsigned long flag, unsigned long pgoff) |
eb36c587 AV |
381 | { |
382 | unsigned long ret; | |
383 | struct mm_struct *mm = current->mm; | |
41badc15 | 384 | unsigned long populate; |
897ab3e0 | 385 | LIST_HEAD(uf); |
eb36c587 AV |
386 | |
387 | ret = security_mmap_file(file, prot, flag); | |
388 | if (!ret) { | |
9fbeb5ab MH |
389 | if (down_write_killable(&mm->mmap_sem)) |
390 | return -EINTR; | |
bebeb3d6 | 391 | ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, |
897ab3e0 | 392 | &populate, &uf); |
eb36c587 | 393 | up_write(&mm->mmap_sem); |
897ab3e0 | 394 | userfaultfd_unmap_complete(mm, &uf); |
41badc15 ML |
395 | if (populate) |
396 | mm_populate(ret, populate); | |
eb36c587 AV |
397 | } |
398 | return ret; | |
399 | } | |
400 | ||
401 | unsigned long vm_mmap(struct file *file, unsigned long addr, | |
402 | unsigned long len, unsigned long prot, | |
403 | unsigned long flag, unsigned long offset) | |
404 | { | |
405 | if (unlikely(offset + PAGE_ALIGN(len) < offset)) | |
406 | return -EINVAL; | |
ea53cde0 | 407 | if (unlikely(offset_in_page(offset))) |
eb36c587 AV |
408 | return -EINVAL; |
409 | ||
9fbeb5ab | 410 | return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); |
eb36c587 AV |
411 | } |
412 | EXPORT_SYMBOL(vm_mmap); | |
413 | ||
a7c3e901 MH |
414 | /** |
415 | * kvmalloc_node - attempt to allocate physically contiguous memory, but upon | |
416 | * failure, fall back to non-contiguous (vmalloc) allocation. | |
417 | * @size: size of the request. | |
418 | * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. | |
419 | * @node: numa node to allocate from | |
420 | * | |
421 | * Uses kmalloc to get the memory but if the allocation fails then falls back | |
422 | * to the vmalloc allocator. Use kvfree for freeing the memory. | |
423 | * | |
cc965a29 MH |
424 | * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. |
425 | * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is | |
426 | * preferable to the vmalloc fallback, due to visible performance drawbacks. | |
a7c3e901 | 427 | * |
ce91f6ee MH |
428 | * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not |
429 | * fall back to vmalloc. | |
a862f68a MR |
430 | * |
431 | * Return: pointer to the allocated memory of %NULL in case of failure | |
a7c3e901 MH |
432 | */ |
433 | void *kvmalloc_node(size_t size, gfp_t flags, int node) | |
434 | { | |
435 | gfp_t kmalloc_flags = flags; | |
436 | void *ret; | |
437 | ||
438 | /* | |
439 | * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) | |
440 | * so the given set of flags has to be compatible. | |
441 | */ | |
ce91f6ee MH |
442 | if ((flags & GFP_KERNEL) != GFP_KERNEL) |
443 | return kmalloc_node(size, flags, node); | |
a7c3e901 MH |
444 | |
445 | /* | |
4f4f2ba9 MH |
446 | * We want to attempt a large physically contiguous block first because |
447 | * it is less likely to fragment multiple larger blocks and therefore | |
448 | * contribute to a long term fragmentation less than vmalloc fallback. | |
449 | * However make sure that larger requests are not too disruptive - no | |
450 | * OOM killer and no allocation failure warnings as we have a fallback. | |
a7c3e901 | 451 | */ |
6c5ab651 MH |
452 | if (size > PAGE_SIZE) { |
453 | kmalloc_flags |= __GFP_NOWARN; | |
454 | ||
cc965a29 | 455 | if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) |
6c5ab651 MH |
456 | kmalloc_flags |= __GFP_NORETRY; |
457 | } | |
a7c3e901 MH |
458 | |
459 | ret = kmalloc_node(size, kmalloc_flags, node); | |
460 | ||
461 | /* | |
462 | * It doesn't really make sense to fallback to vmalloc for sub page | |
463 | * requests | |
464 | */ | |
465 | if (ret || size <= PAGE_SIZE) | |
466 | return ret; | |
467 | ||
8594a21c MH |
468 | return __vmalloc_node_flags_caller(size, node, flags, |
469 | __builtin_return_address(0)); | |
a7c3e901 MH |
470 | } |
471 | EXPORT_SYMBOL(kvmalloc_node); | |
472 | ||
ff4dc772 | 473 | /** |
04b8e946 AM |
474 | * kvfree() - Free memory. |
475 | * @addr: Pointer to allocated memory. | |
ff4dc772 | 476 | * |
04b8e946 AM |
477 | * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). |
478 | * It is slightly more efficient to use kfree() or vfree() if you are certain | |
479 | * that you know which one to use. | |
480 | * | |
52414d33 | 481 | * Context: Either preemptible task context or not-NMI interrupt. |
ff4dc772 | 482 | */ |
39f1f78d AV |
483 | void kvfree(const void *addr) |
484 | { | |
485 | if (is_vmalloc_addr(addr)) | |
486 | vfree(addr); | |
487 | else | |
488 | kfree(addr); | |
489 | } | |
490 | EXPORT_SYMBOL(kvfree); | |
491 | ||
e39155ea KS |
492 | static inline void *__page_rmapping(struct page *page) |
493 | { | |
494 | unsigned long mapping; | |
495 | ||
496 | mapping = (unsigned long)page->mapping; | |
497 | mapping &= ~PAGE_MAPPING_FLAGS; | |
498 | ||
499 | return (void *)mapping; | |
500 | } | |
501 | ||
502 | /* Neutral page->mapping pointer to address_space or anon_vma or other */ | |
503 | void *page_rmapping(struct page *page) | |
504 | { | |
505 | page = compound_head(page); | |
506 | return __page_rmapping(page); | |
507 | } | |
508 | ||
1aa8aea5 AM |
509 | /* |
510 | * Return true if this page is mapped into pagetables. | |
511 | * For compound page it returns true if any subpage of compound page is mapped. | |
512 | */ | |
513 | bool page_mapped(struct page *page) | |
514 | { | |
515 | int i; | |
516 | ||
517 | if (likely(!PageCompound(page))) | |
518 | return atomic_read(&page->_mapcount) >= 0; | |
519 | page = compound_head(page); | |
520 | if (atomic_read(compound_mapcount_ptr(page)) >= 0) | |
521 | return true; | |
522 | if (PageHuge(page)) | |
523 | return false; | |
8ab88c71 | 524 | for (i = 0; i < (1 << compound_order(page)); i++) { |
1aa8aea5 AM |
525 | if (atomic_read(&page[i]._mapcount) >= 0) |
526 | return true; | |
527 | } | |
528 | return false; | |
529 | } | |
530 | EXPORT_SYMBOL(page_mapped); | |
531 | ||
e39155ea KS |
532 | struct anon_vma *page_anon_vma(struct page *page) |
533 | { | |
534 | unsigned long mapping; | |
535 | ||
536 | page = compound_head(page); | |
537 | mapping = (unsigned long)page->mapping; | |
538 | if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) | |
539 | return NULL; | |
540 | return __page_rmapping(page); | |
541 | } | |
542 | ||
9800339b SL |
543 | struct address_space *page_mapping(struct page *page) |
544 | { | |
1c290f64 KS |
545 | struct address_space *mapping; |
546 | ||
547 | page = compound_head(page); | |
9800339b | 548 | |
03e5ac2f MP |
549 | /* This happens if someone calls flush_dcache_page on slab page */ |
550 | if (unlikely(PageSlab(page))) | |
551 | return NULL; | |
552 | ||
33806f06 SL |
553 | if (unlikely(PageSwapCache(page))) { |
554 | swp_entry_t entry; | |
555 | ||
556 | entry.val = page_private(page); | |
e39155ea KS |
557 | return swap_address_space(entry); |
558 | } | |
559 | ||
1c290f64 | 560 | mapping = page->mapping; |
bda807d4 | 561 | if ((unsigned long)mapping & PAGE_MAPPING_ANON) |
e39155ea | 562 | return NULL; |
bda807d4 MK |
563 | |
564 | return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); | |
9800339b | 565 | } |
bda807d4 | 566 | EXPORT_SYMBOL(page_mapping); |
9800339b | 567 | |
cb9f753a YH |
568 | /* |
569 | * For file cache pages, return the address_space, otherwise return NULL | |
570 | */ | |
571 | struct address_space *page_mapping_file(struct page *page) | |
572 | { | |
573 | if (unlikely(PageSwapCache(page))) | |
574 | return NULL; | |
575 | return page_mapping(page); | |
576 | } | |
577 | ||
b20ce5e0 KS |
578 | /* Slow path of page_mapcount() for compound pages */ |
579 | int __page_mapcount(struct page *page) | |
580 | { | |
581 | int ret; | |
582 | ||
583 | ret = atomic_read(&page->_mapcount) + 1; | |
dd78fedd KS |
584 | /* |
585 | * For file THP page->_mapcount contains total number of mapping | |
586 | * of the page: no need to look into compound_mapcount. | |
587 | */ | |
588 | if (!PageAnon(page) && !PageHuge(page)) | |
589 | return ret; | |
b20ce5e0 KS |
590 | page = compound_head(page); |
591 | ret += atomic_read(compound_mapcount_ptr(page)) + 1; | |
592 | if (PageDoubleMap(page)) | |
593 | ret--; | |
594 | return ret; | |
595 | } | |
596 | EXPORT_SYMBOL_GPL(__page_mapcount); | |
597 | ||
39a1aa8e AR |
598 | int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; |
599 | int sysctl_overcommit_ratio __read_mostly = 50; | |
600 | unsigned long sysctl_overcommit_kbytes __read_mostly; | |
601 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; | |
602 | unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ | |
603 | unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ | |
604 | ||
49f0ce5f JM |
605 | int overcommit_ratio_handler(struct ctl_table *table, int write, |
606 | void __user *buffer, size_t *lenp, | |
607 | loff_t *ppos) | |
608 | { | |
609 | int ret; | |
610 | ||
611 | ret = proc_dointvec(table, write, buffer, lenp, ppos); | |
612 | if (ret == 0 && write) | |
613 | sysctl_overcommit_kbytes = 0; | |
614 | return ret; | |
615 | } | |
616 | ||
617 | int overcommit_kbytes_handler(struct ctl_table *table, int write, | |
618 | void __user *buffer, size_t *lenp, | |
619 | loff_t *ppos) | |
620 | { | |
621 | int ret; | |
622 | ||
623 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | |
624 | if (ret == 0 && write) | |
625 | sysctl_overcommit_ratio = 0; | |
626 | return ret; | |
627 | } | |
628 | ||
00619bcc JM |
629 | /* |
630 | * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used | |
631 | */ | |
632 | unsigned long vm_commit_limit(void) | |
633 | { | |
49f0ce5f JM |
634 | unsigned long allowed; |
635 | ||
636 | if (sysctl_overcommit_kbytes) | |
637 | allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); | |
638 | else | |
ca79b0c2 | 639 | allowed = ((totalram_pages() - hugetlb_total_pages()) |
49f0ce5f JM |
640 | * sysctl_overcommit_ratio / 100); |
641 | allowed += total_swap_pages; | |
642 | ||
643 | return allowed; | |
00619bcc JM |
644 | } |
645 | ||
39a1aa8e AR |
646 | /* |
647 | * Make sure vm_committed_as in one cacheline and not cacheline shared with | |
648 | * other variables. It can be updated by several CPUs frequently. | |
649 | */ | |
650 | struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; | |
651 | ||
652 | /* | |
653 | * The global memory commitment made in the system can be a metric | |
654 | * that can be used to drive ballooning decisions when Linux is hosted | |
655 | * as a guest. On Hyper-V, the host implements a policy engine for dynamically | |
656 | * balancing memory across competing virtual machines that are hosted. | |
657 | * Several metrics drive this policy engine including the guest reported | |
658 | * memory commitment. | |
659 | */ | |
660 | unsigned long vm_memory_committed(void) | |
661 | { | |
662 | return percpu_counter_read_positive(&vm_committed_as); | |
663 | } | |
664 | EXPORT_SYMBOL_GPL(vm_memory_committed); | |
665 | ||
666 | /* | |
667 | * Check that a process has enough memory to allocate a new virtual | |
668 | * mapping. 0 means there is enough memory for the allocation to | |
669 | * succeed and -ENOMEM implies there is not. | |
670 | * | |
671 | * We currently support three overcommit policies, which are set via the | |
ad56b738 | 672 | * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst |
39a1aa8e AR |
673 | * |
674 | * Strict overcommit modes added 2002 Feb 26 by Alan Cox. | |
675 | * Additional code 2002 Jul 20 by Robert Love. | |
676 | * | |
677 | * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. | |
678 | * | |
679 | * Note this is a helper function intended to be used by LSMs which | |
680 | * wish to use this logic. | |
681 | */ | |
682 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | |
683 | { | |
8c7829b0 | 684 | long allowed; |
39a1aa8e AR |
685 | |
686 | VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) < | |
687 | -(s64)vm_committed_as_batch * num_online_cpus(), | |
688 | "memory commitment underflow"); | |
689 | ||
690 | vm_acct_memory(pages); | |
691 | ||
692 | /* | |
693 | * Sometimes we want to use more memory than we have | |
694 | */ | |
695 | if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) | |
696 | return 0; | |
697 | ||
698 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { | |
8c7829b0 | 699 | if (pages > totalram_pages() + total_swap_pages) |
39a1aa8e | 700 | goto error; |
8c7829b0 | 701 | return 0; |
39a1aa8e AR |
702 | } |
703 | ||
704 | allowed = vm_commit_limit(); | |
705 | /* | |
706 | * Reserve some for root | |
707 | */ | |
708 | if (!cap_sys_admin) | |
709 | allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); | |
710 | ||
711 | /* | |
712 | * Don't let a single process grow so big a user can't recover | |
713 | */ | |
714 | if (mm) { | |
8c7829b0 JW |
715 | long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); |
716 | ||
39a1aa8e AR |
717 | allowed -= min_t(long, mm->total_vm / 32, reserve); |
718 | } | |
719 | ||
720 | if (percpu_counter_read_positive(&vm_committed_as) < allowed) | |
721 | return 0; | |
722 | error: | |
723 | vm_unacct_memory(pages); | |
724 | ||
725 | return -ENOMEM; | |
726 | } | |
727 | ||
a9090253 WR |
728 | /** |
729 | * get_cmdline() - copy the cmdline value to a buffer. | |
730 | * @task: the task whose cmdline value to copy. | |
731 | * @buffer: the buffer to copy to. | |
732 | * @buflen: the length of the buffer. Larger cmdline values are truncated | |
733 | * to this length. | |
a862f68a MR |
734 | * |
735 | * Return: the size of the cmdline field copied. Note that the copy does | |
a9090253 WR |
736 | * not guarantee an ending NULL byte. |
737 | */ | |
738 | int get_cmdline(struct task_struct *task, char *buffer, int buflen) | |
739 | { | |
740 | int res = 0; | |
741 | unsigned int len; | |
742 | struct mm_struct *mm = get_task_mm(task); | |
a3b609ef | 743 | unsigned long arg_start, arg_end, env_start, env_end; |
a9090253 WR |
744 | if (!mm) |
745 | goto out; | |
746 | if (!mm->arg_end) | |
747 | goto out_mm; /* Shh! No looking before we're done */ | |
748 | ||
bc81426f | 749 | spin_lock(&mm->arg_lock); |
a3b609ef MG |
750 | arg_start = mm->arg_start; |
751 | arg_end = mm->arg_end; | |
752 | env_start = mm->env_start; | |
753 | env_end = mm->env_end; | |
bc81426f | 754 | spin_unlock(&mm->arg_lock); |
a3b609ef MG |
755 | |
756 | len = arg_end - arg_start; | |
a9090253 WR |
757 | |
758 | if (len > buflen) | |
759 | len = buflen; | |
760 | ||
f307ab6d | 761 | res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); |
a9090253 WR |
762 | |
763 | /* | |
764 | * If the nul at the end of args has been overwritten, then | |
765 | * assume application is using setproctitle(3). | |
766 | */ | |
767 | if (res > 0 && buffer[res-1] != '\0' && len < buflen) { | |
768 | len = strnlen(buffer, res); | |
769 | if (len < res) { | |
770 | res = len; | |
771 | } else { | |
a3b609ef | 772 | len = env_end - env_start; |
a9090253 WR |
773 | if (len > buflen - res) |
774 | len = buflen - res; | |
a3b609ef | 775 | res += access_process_vm(task, env_start, |
f307ab6d LS |
776 | buffer+res, len, |
777 | FOLL_FORCE); | |
a9090253 WR |
778 | res = strnlen(buffer, res); |
779 | } | |
780 | } | |
781 | out_mm: | |
782 | mmput(mm); | |
783 | out: | |
784 | return res; | |
785 | } |