]> Git Repo - J-linux.git/blob - arch/sh/kernel/vsyscall/vsyscall.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / arch / sh / kernel / vsyscall / vsyscall.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * arch/sh/kernel/vsyscall/vsyscall.c
4  *
5  *  Copyright (C) 2006 Paul Mundt
6  *
7  * vDSO randomization
8  * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
9  */
10 #include <linux/mm.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/gfp.h>
14 #include <linux/module.h>
15 #include <linux/elf.h>
16 #include <linux/sched.h>
17 #include <linux/err.h>
18
19 /*
20  * Should the kernel map a VDSO page into processes and pass its
21  * address down to glibc upon exec()?
22  */
23 unsigned int __read_mostly vdso_enabled = 1;
24 EXPORT_SYMBOL_GPL(vdso_enabled);
25
26 static int __init vdso_setup(char *s)
27 {
28         vdso_enabled = simple_strtoul(s, NULL, 0);
29         return 1;
30 }
31 __setup("vdso=", vdso_setup);
32
33 /*
34  * These symbols are defined by vsyscall.o to mark the bounds
35  * of the ELF DSO images included therein.
36  */
37 extern const char vsyscall_trapa_start, vsyscall_trapa_end;
38 static struct page *syscall_pages[1];
39 static struct vm_special_mapping vdso_mapping = {
40         .name = "[vdso]",
41         .pages = syscall_pages,
42 };
43
44 int __init vsyscall_init(void)
45 {
46         void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
47         syscall_pages[0] = virt_to_page(syscall_page);
48
49         /*
50          * XXX: Map this page to a fixmap entry if we get around
51          * to adding the page to ELF core dumps
52          */
53
54         memcpy(syscall_page,
55                &vsyscall_trapa_start,
56                &vsyscall_trapa_end - &vsyscall_trapa_start);
57
58         return 0;
59 }
60
61 /* Setup a VMA at program startup for the vsyscall page */
62 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
63 {
64         struct mm_struct *mm = current->mm;
65         struct vm_area_struct *vma;
66         unsigned long addr;
67         int ret;
68
69         if (mmap_write_lock_killable(mm))
70                 return -EINTR;
71
72         addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
73         if (IS_ERR_VALUE(addr)) {
74                 ret = addr;
75                 goto up_fail;
76         }
77
78         vdso_mapping.pages = syscall_pages;
79         vma = _install_special_mapping(mm, addr, PAGE_SIZE,
80                                       VM_READ | VM_EXEC |
81                                       VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
82                                       &vdso_mapping);
83         ret = PTR_ERR(vma);
84         if (IS_ERR(vma))
85                 goto up_fail;
86
87         current->mm->context.vdso = (void *)addr;
88         ret = 0;
89
90 up_fail:
91         mmap_write_unlock(mm);
92         return ret;
93 }
94
95 const char *arch_vma_name(struct vm_area_struct *vma)
96 {
97         if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
98                 return "[vdso]";
99
100         return NULL;
101 }
This page took 0.031498 seconds and 4 git commands to generate.