2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/compiler.h>
5 #include <linux/export.h>
7 #include <linux/sched.h>
8 #include <linux/security.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
11 #include <linux/mman.h>
12 #include <linux/hugetlb.h>
13 #include <linux/vmalloc.h>
15 #include <asm/sections.h>
16 #include <asm/uaccess.h>
20 static inline int is_kernel_rodata(unsigned long addr)
22 return addr >= (unsigned long)__start_rodata &&
23 addr < (unsigned long)__end_rodata;
27 * kfree_const - conditionally free memory
28 * @x: pointer to the memory
30 * Function calls kfree only if @x is not in .rodata section.
32 void kfree_const(const void *x)
34 if (!is_kernel_rodata((unsigned long)x))
37 EXPORT_SYMBOL(kfree_const);
40 * kstrdup - allocate space for and copy an existing string
41 * @s: the string to duplicate
42 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
44 char *kstrdup(const char *s, gfp_t gfp)
53 buf = kmalloc_track_caller(len, gfp);
58 EXPORT_SYMBOL(kstrdup);
61 * kstrdup_const - conditionally duplicate an existing const string
62 * @s: the string to duplicate
63 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
65 * Function returns source string if it is in .rodata section otherwise it
66 * fallbacks to kstrdup.
67 * Strings allocated by kstrdup_const should be freed by kfree_const.
69 const char *kstrdup_const(const char *s, gfp_t gfp)
71 if (is_kernel_rodata((unsigned long)s))
74 return kstrdup(s, gfp);
76 EXPORT_SYMBOL(kstrdup_const);
79 * kstrndup - allocate space for and copy an existing string
80 * @s: the string to duplicate
81 * @max: read at most @max chars from @s
82 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
84 char *kstrndup(const char *s, size_t max, gfp_t gfp)
92 len = strnlen(s, max);
93 buf = kmalloc_track_caller(len+1, gfp);
100 EXPORT_SYMBOL(kstrndup);
103 * kmemdup - duplicate region of memory
105 * @src: memory region to duplicate
106 * @len: memory region length
107 * @gfp: GFP mask to use
109 void *kmemdup(const void *src, size_t len, gfp_t gfp)
113 p = kmalloc_track_caller(len, gfp);
118 EXPORT_SYMBOL(kmemdup);
121 * memdup_user - duplicate memory region from user space
123 * @src: source address in user space
124 * @len: number of bytes to copy
126 * Returns an ERR_PTR() on failure.
128 void *memdup_user(const void __user *src, size_t len)
133 * Always use GFP_KERNEL, since copy_from_user() can sleep and
134 * cause pagefault, which makes it pointless to use GFP_NOFS
137 p = kmalloc_track_caller(len, GFP_KERNEL);
139 return ERR_PTR(-ENOMEM);
141 if (copy_from_user(p, src, len)) {
143 return ERR_PTR(-EFAULT);
148 EXPORT_SYMBOL(memdup_user);
151 * strndup_user - duplicate an existing string from user space
152 * @s: The string to duplicate
153 * @n: Maximum number of bytes to copy, including the trailing NUL.
155 char *strndup_user(const char __user *s, long n)
160 length = strnlen_user(s, n);
163 return ERR_PTR(-EFAULT);
166 return ERR_PTR(-EINVAL);
168 p = memdup_user(s, length);
173 p[length - 1] = '\0';
177 EXPORT_SYMBOL(strndup_user);
180 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
182 * @src: source address in user space
183 * @len: number of bytes to copy
185 * Returns an ERR_PTR() on failure.
187 void *memdup_user_nul(const void __user *src, size_t len)
192 * Always use GFP_KERNEL, since copy_from_user() can sleep and
193 * cause pagefault, which makes it pointless to use GFP_NOFS
196 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
198 return ERR_PTR(-ENOMEM);
200 if (copy_from_user(p, src, len)) {
202 return ERR_PTR(-EFAULT);
208 EXPORT_SYMBOL(memdup_user_nul);
210 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
211 struct vm_area_struct *prev, struct rb_node *rb_parent)
213 struct vm_area_struct *next;
217 next = prev->vm_next;
222 next = rb_entry(rb_parent,
223 struct vm_area_struct, vm_rb);
232 /* Check if the vma is being used as a stack by this task */
233 static int vm_is_stack_for_task(struct task_struct *t,
234 struct vm_area_struct *vma)
236 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
240 * Check if the vma is being used as a stack.
241 * If is_group is non-zero, check in the entire thread group or else
242 * just check in the current task. Returns the task_struct of the task
243 * that the vma is stack for. Must be called under rcu_read_lock().
245 struct task_struct *task_of_stack(struct task_struct *task,
246 struct vm_area_struct *vma, bool in_group)
248 if (vm_is_stack_for_task(task, vma))
252 struct task_struct *t;
254 for_each_thread(task, t) {
255 if (vm_is_stack_for_task(t, vma))
263 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
264 void arch_pick_mmap_layout(struct mm_struct *mm)
266 mm->mmap_base = TASK_UNMAPPED_BASE;
267 mm->get_unmapped_area = arch_get_unmapped_area;
272 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
273 * back to the regular GUP.
274 * If the architecture not support this function, simply return with no
277 int __weak __get_user_pages_fast(unsigned long start,
278 int nr_pages, int write, struct page **pages)
282 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
285 * get_user_pages_fast() - pin user pages in memory
286 * @start: starting user address
287 * @nr_pages: number of pages from start to pin
288 * @write: whether pages will be written to
289 * @pages: array that receives pointers to the pages pinned.
290 * Should be at least nr_pages long.
292 * Returns number of pages pinned. This may be fewer than the number
293 * requested. If nr_pages is 0 or negative, returns 0. If no pages
294 * were pinned, returns -errno.
296 * get_user_pages_fast provides equivalent functionality to get_user_pages,
297 * operating on current and current->mm, with force=0 and vma=NULL. However
298 * unlike get_user_pages, it must be called without mmap_sem held.
300 * get_user_pages_fast may take mmap_sem and page table locks, so no
301 * assumptions can be made about lack of locking. get_user_pages_fast is to be
302 * implemented in a way that is advantageous (vs get_user_pages()) when the
303 * user memory area is already faulted in and present in ptes. However if the
304 * pages have to be faulted in, it may turn out to be slightly slower so
305 * callers need to carefully consider what to use. On many architectures,
306 * get_user_pages_fast simply falls back to get_user_pages.
308 int __weak get_user_pages_fast(unsigned long start,
309 int nr_pages, int write, struct page **pages)
311 struct mm_struct *mm = current->mm;
312 return get_user_pages_unlocked(current, mm, start, nr_pages,
315 EXPORT_SYMBOL_GPL(get_user_pages_fast);
317 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
318 unsigned long len, unsigned long prot,
319 unsigned long flag, unsigned long pgoff)
322 struct mm_struct *mm = current->mm;
323 unsigned long populate;
325 ret = security_mmap_file(file, prot, flag);
327 down_write(&mm->mmap_sem);
328 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
330 up_write(&mm->mmap_sem);
332 mm_populate(ret, populate);
337 unsigned long vm_mmap(struct file *file, unsigned long addr,
338 unsigned long len, unsigned long prot,
339 unsigned long flag, unsigned long offset)
341 if (unlikely(offset + PAGE_ALIGN(len) < offset))
343 if (unlikely(offset_in_page(offset)))
346 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
348 EXPORT_SYMBOL(vm_mmap);
350 void kvfree(const void *addr)
352 if (is_vmalloc_addr(addr))
357 EXPORT_SYMBOL(kvfree);
359 static inline void *__page_rmapping(struct page *page)
361 unsigned long mapping;
363 mapping = (unsigned long)page->mapping;
364 mapping &= ~PAGE_MAPPING_FLAGS;
366 return (void *)mapping;
369 /* Neutral page->mapping pointer to address_space or anon_vma or other */
370 void *page_rmapping(struct page *page)
372 page = compound_head(page);
373 return __page_rmapping(page);
376 struct anon_vma *page_anon_vma(struct page *page)
378 unsigned long mapping;
380 page = compound_head(page);
381 mapping = (unsigned long)page->mapping;
382 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
384 return __page_rmapping(page);
387 struct address_space *page_mapping(struct page *page)
389 struct address_space *mapping;
391 page = compound_head(page);
393 /* This happens if someone calls flush_dcache_page on slab page */
394 if (unlikely(PageSlab(page)))
397 if (unlikely(PageSwapCache(page))) {
400 entry.val = page_private(page);
401 return swap_address_space(entry);
404 mapping = page->mapping;
405 if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
410 /* Slow path of page_mapcount() for compound pages */
411 int __page_mapcount(struct page *page)
415 ret = atomic_read(&page->_mapcount) + 1;
416 page = compound_head(page);
417 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
418 if (PageDoubleMap(page))
422 EXPORT_SYMBOL_GPL(__page_mapcount);
424 int overcommit_ratio_handler(struct ctl_table *table, int write,
425 void __user *buffer, size_t *lenp,
430 ret = proc_dointvec(table, write, buffer, lenp, ppos);
431 if (ret == 0 && write)
432 sysctl_overcommit_kbytes = 0;
436 int overcommit_kbytes_handler(struct ctl_table *table, int write,
437 void __user *buffer, size_t *lenp,
442 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
443 if (ret == 0 && write)
444 sysctl_overcommit_ratio = 0;
449 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
451 unsigned long vm_commit_limit(void)
453 unsigned long allowed;
455 if (sysctl_overcommit_kbytes)
456 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
458 allowed = ((totalram_pages - hugetlb_total_pages())
459 * sysctl_overcommit_ratio / 100);
460 allowed += total_swap_pages;
466 * get_cmdline() - copy the cmdline value to a buffer.
467 * @task: the task whose cmdline value to copy.
468 * @buffer: the buffer to copy to.
469 * @buflen: the length of the buffer. Larger cmdline values are truncated
471 * Returns the size of the cmdline field copied. Note that the copy does
472 * not guarantee an ending NULL byte.
474 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
478 struct mm_struct *mm = get_task_mm(task);
479 unsigned long arg_start, arg_end, env_start, env_end;
483 goto out_mm; /* Shh! No looking before we're done */
485 down_read(&mm->mmap_sem);
486 arg_start = mm->arg_start;
487 arg_end = mm->arg_end;
488 env_start = mm->env_start;
489 env_end = mm->env_end;
490 up_read(&mm->mmap_sem);
492 len = arg_end - arg_start;
497 res = access_process_vm(task, arg_start, buffer, len, 0);
500 * If the nul at the end of args has been overwritten, then
501 * assume application is using setproctitle(3).
503 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
504 len = strnlen(buffer, res);
508 len = env_end - env_start;
509 if (len > buflen - res)
511 res += access_process_vm(task, env_start,
513 res = strnlen(buffer, res);