]> Git Repo - linux.git/blame - arch/x86/vdso/vma.c
Merge branch 'x86/cleanups' into perf/uprobes
[linux.git] / arch / x86 / vdso / vma.c
CommitLineData
2aae950b
AK
1/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6#include <linux/mm.h>
4e950f6f 7#include <linux/err.h>
2aae950b 8#include <linux/sched.h>
5a0e3ad6 9#include <linux/slab.h>
2aae950b
AK
10#include <linux/init.h>
11#include <linux/random.h>
3fa89ca7 12#include <linux/elf.h>
2aae950b
AK
13#include <asm/vsyscall.h>
14#include <asm/vgtod.h>
15#include <asm/proto.h>
7f3646aa 16#include <asm/vdso.h>
aafade24 17#include <asm/page.h>
2aae950b 18
e6b0edef 19unsigned int __read_mostly vdso_enabled = 1;
7f3646aa
RM
20
21extern char vdso_start[], vdso_end[];
2aae950b
AK
22extern unsigned short vdso_sync_cpuid;
23
aafade24 24extern struct page *vdso_pages[];
369c9920 25static unsigned vdso_size;
2aae950b 26
1a21d4e0
L
27#ifdef CONFIG_X86_X32_ABI
28extern char vdsox32_start[], vdsox32_end[];
29extern struct page *vdsox32_pages[];
30static unsigned vdsox32_size;
31
32static void __init patch_vdsox32(void *vdso, size_t len)
33{
34 Elf32_Ehdr *hdr = vdso;
35 Elf32_Shdr *sechdrs, *alt_sec = 0;
36 char *secstrings;
37 void *alt_data;
38 int i;
39
40 BUG_ON(len < sizeof(Elf32_Ehdr));
41 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
42
43 sechdrs = (void *)hdr + hdr->e_shoff;
44 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
45
46 for (i = 1; i < hdr->e_shnum; i++) {
47 Elf32_Shdr *shdr = &sechdrs[i];
48 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
49 alt_sec = shdr;
50 goto found;
51 }
52 }
53
54 /* If we get here, it's probably a bug. */
55 pr_warning("patch_vdsox32: .altinstructions not found\n");
56 return; /* nothing to patch */
57
58found:
59 alt_data = (void *)hdr + alt_sec->sh_offset;
60 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
61}
62#endif
63
64static void __init patch_vdso64(void *vdso, size_t len)
1b3f2a72
AL
65{
66 Elf64_Ehdr *hdr = vdso;
67 Elf64_Shdr *sechdrs, *alt_sec = 0;
68 char *secstrings;
69 void *alt_data;
70 int i;
71
72 BUG_ON(len < sizeof(Elf64_Ehdr));
73 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
74
75 sechdrs = (void *)hdr + hdr->e_shoff;
76 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
77
78 for (i = 1; i < hdr->e_shnum; i++) {
79 Elf64_Shdr *shdr = &sechdrs[i];
80 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
81 alt_sec = shdr;
82 goto found;
83 }
84 }
85
86 /* If we get here, it's probably a bug. */
1a21d4e0 87 pr_warning("patch_vdso64: .altinstructions not found\n");
1b3f2a72
AL
88 return; /* nothing to patch */
89
90found:
91 alt_data = (void *)hdr + alt_sec->sh_offset;
92 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
93}
94
aafade24 95static int __init init_vdso(void)
2aae950b
AK
96{
97 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
98 int i;
2aae950b 99
1a21d4e0 100 patch_vdso64(vdso_start, vdso_end - vdso_start);
1b3f2a72 101
369c9920 102 vdso_size = npages << PAGE_SHIFT;
aafade24
AL
103 for (i = 0; i < npages; i++)
104 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
2aae950b 105
1a21d4e0
L
106#ifdef CONFIG_X86_X32_ABI
107 patch_vdsox32(vdsox32_start, vdsox32_end - vdsox32_start);
108 npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
109 vdsox32_size = npages << PAGE_SHIFT;
110 for (i = 0; i < npages; i++)
111 vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
112#endif
113
2aae950b 114 return 0;
2aae950b 115}
aafade24 116subsys_initcall(init_vdso);
2aae950b
AK
117
118struct linux_binprm;
119
120/* Put the vdso above the (randomized) stack with another randomized offset.
121 This way there is no hole in the middle of address space.
122 To save memory make sure it is still in the same PTE as the stack top.
123 This doesn't give that many random bits */
124static unsigned long vdso_addr(unsigned long start, unsigned len)
125{
126 unsigned long addr, end;
127 unsigned offset;
128 end = (start + PMD_SIZE - 1) & PMD_MASK;
d9517346
IM
129 if (end >= TASK_SIZE_MAX)
130 end = TASK_SIZE_MAX;
2aae950b
AK
131 end -= len;
132 /* This loses some more bits than a modulo, but is cheaper */
133 offset = get_random_int() & (PTRS_PER_PTE - 1);
134 addr = start + (offset << PAGE_SHIFT);
135 if (addr >= end)
136 addr = end;
dfb09f9b
BP
137
138 /*
139 * page-align it here so that get_unmapped_area doesn't
140 * align it wrongfully again to the next page. addr can come in 4K
141 * unaligned here as a result of stack start randomization.
142 */
143 addr = PAGE_ALIGN(addr);
144 addr = align_addr(addr, NULL, ALIGN_VDSO);
145
2aae950b
AK
146 return addr;
147}
148
149/* Setup a VMA at program startup for the vsyscall page.
150 Not called for compat tasks */
1a21d4e0
L
151static int setup_additional_pages(struct linux_binprm *bprm,
152 int uses_interp,
153 struct page **pages,
154 unsigned size)
2aae950b
AK
155{
156 struct mm_struct *mm = current->mm;
157 unsigned long addr;
158 int ret;
2aae950b
AK
159
160 if (!vdso_enabled)
161 return 0;
162
163 down_write(&mm->mmap_sem);
1a21d4e0
L
164 addr = vdso_addr(mm->start_stack, size);
165 addr = get_unmapped_area(NULL, addr, size, 0, 0);
2aae950b
AK
166 if (IS_ERR_VALUE(addr)) {
167 ret = addr;
168 goto up_fail;
169 }
170
f7b6eb3f
PZ
171 current->mm->context.vdso = (void *)addr;
172
1a21d4e0 173 ret = install_special_mapping(mm, addr, size,
2aae950b
AK
174 VM_READ|VM_EXEC|
175 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
176 VM_ALWAYSDUMP,
1a21d4e0 177 pages);
f7b6eb3f
PZ
178 if (ret) {
179 current->mm->context.vdso = NULL;
2aae950b 180 goto up_fail;
f7b6eb3f 181 }
2aae950b 182
2aae950b
AK
183up_fail:
184 up_write(&mm->mmap_sem);
185 return ret;
186}
187
1a21d4e0
L
188int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
189{
22e842d4
PA
190 return setup_additional_pages(bprm, uses_interp, vdso_pages,
191 vdso_size);
1a21d4e0
L
192}
193
194#ifdef CONFIG_X86_X32_ABI
195int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
196{
22e842d4
PA
197 return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
198 vdsox32_size);
1a21d4e0
L
199}
200#endif
201
2aae950b
AK
202static __init int vdso_setup(char *s)
203{
204 vdso_enabled = simple_strtoul(s, NULL, 0);
205 return 0;
206}
207__setup("vdso=", vdso_setup);
This page took 0.50096 seconds and 4 git commands to generate.