]> Git Repo - linux.git/blob - arch/riscv/kernel/vdso.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[linux.git] / arch / riscv / kernel / vdso.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
4  *                    <[email protected]>
5  * Copyright (C) 2012 ARM Limited
6  * Copyright (C) 2015 Regents of the University of California
7  */
8
9 #include <linux/elf.h>
10 #include <linux/mm.h>
11 #include <linux/slab.h>
12 #include <linux/binfmts.h>
13 #include <linux/err.h>
14 #include <asm/page.h>
15 #include <asm/vdso.h>
16 #include <linux/time_namespace.h>
17 #include <vdso/datapage.h>
18
19 enum vvar_pages {
20         VVAR_DATA_PAGE_OFFSET,
21         VVAR_TIMENS_PAGE_OFFSET,
22         VVAR_NR_PAGES,
23 };
24
25 enum rv_vdso_map {
26         RV_VDSO_MAP_VVAR,
27         RV_VDSO_MAP_VDSO,
28 };
29
30 #define VVAR_SIZE  (VVAR_NR_PAGES << PAGE_SHIFT)
31
32 /*
33  * The vDSO data page.
34  */
35 static union {
36         struct vdso_data        data;
37         u8                      page[PAGE_SIZE];
38 } vdso_data_store __page_aligned_data;
39 struct vdso_data *vdso_data = &vdso_data_store.data;
40
41 struct __vdso_info {
42         const char *name;
43         const char *vdso_code_start;
44         const char *vdso_code_end;
45         unsigned long vdso_pages;
46         /* Data Mapping */
47         struct vm_special_mapping *dm;
48         /* Code Mapping */
49         struct vm_special_mapping *cm;
50 };
51
52 static struct __vdso_info vdso_info;
53 #ifdef CONFIG_COMPAT
54 static struct __vdso_info compat_vdso_info;
55 #endif
56
57 static int vdso_mremap(const struct vm_special_mapping *sm,
58                        struct vm_area_struct *new_vma)
59 {
60         current->mm->context.vdso = (void *)new_vma->vm_start;
61
62         return 0;
63 }
64
65 static void __init __vdso_init(struct __vdso_info *vdso_info)
66 {
67         unsigned int i;
68         struct page **vdso_pagelist;
69         unsigned long pfn;
70
71         if (memcmp(vdso_info->vdso_code_start, "\177ELF", 4))
72                 panic("vDSO is not a valid ELF object!\n");
73
74         vdso_info->vdso_pages = (
75                 vdso_info->vdso_code_end -
76                 vdso_info->vdso_code_start) >>
77                 PAGE_SHIFT;
78
79         vdso_pagelist = kcalloc(vdso_info->vdso_pages,
80                                 sizeof(struct page *),
81                                 GFP_KERNEL);
82         if (vdso_pagelist == NULL)
83                 panic("vDSO kcalloc failed!\n");
84
85         /* Grab the vDSO code pages. */
86         pfn = sym_to_pfn(vdso_info->vdso_code_start);
87
88         for (i = 0; i < vdso_info->vdso_pages; i++)
89                 vdso_pagelist[i] = pfn_to_page(pfn + i);
90
91         vdso_info->cm->pages = vdso_pagelist;
92 }
93
94 #ifdef CONFIG_TIME_NS
95 struct vdso_data *arch_get_vdso_data(void *vvar_page)
96 {
97         return (struct vdso_data *)(vvar_page);
98 }
99
100 /*
101  * The vvar mapping contains data for a specific time namespace, so when a task
102  * changes namespace we must unmap its vvar data for the old namespace.
103  * Subsequent faults will map in data for the new namespace.
104  *
105  * For more details see timens_setup_vdso_data().
106  */
107 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
108 {
109         struct mm_struct *mm = task->mm;
110         struct vm_area_struct *vma;
111         VMA_ITERATOR(vmi, mm, 0);
112
113         mmap_read_lock(mm);
114
115         for_each_vma(vmi, vma) {
116                 if (vma_is_special_mapping(vma, vdso_info.dm))
117                         zap_vma_pages(vma);
118 #ifdef CONFIG_COMPAT
119                 if (vma_is_special_mapping(vma, compat_vdso_info.dm))
120                         zap_vma_pages(vma);
121 #endif
122         }
123
124         mmap_read_unlock(mm);
125         return 0;
126 }
127 #endif
128
129 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
130                              struct vm_area_struct *vma, struct vm_fault *vmf)
131 {
132         struct page *timens_page = find_timens_vvar_page(vma);
133         unsigned long pfn;
134
135         switch (vmf->pgoff) {
136         case VVAR_DATA_PAGE_OFFSET:
137                 if (timens_page)
138                         pfn = page_to_pfn(timens_page);
139                 else
140                         pfn = sym_to_pfn(vdso_data);
141                 break;
142 #ifdef CONFIG_TIME_NS
143         case VVAR_TIMENS_PAGE_OFFSET:
144                 /*
145                  * If a task belongs to a time namespace then a namespace
146                  * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
147                  * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
148                  * offset.
149                  * See also the comment near timens_setup_vdso_data().
150                  */
151                 if (!timens_page)
152                         return VM_FAULT_SIGBUS;
153                 pfn = sym_to_pfn(vdso_data);
154                 break;
155 #endif /* CONFIG_TIME_NS */
156         default:
157                 return VM_FAULT_SIGBUS;
158         }
159
160         return vmf_insert_pfn(vma, vmf->address, pfn);
161 }
162
163 static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = {
164         [RV_VDSO_MAP_VVAR] = {
165                 .name   = "[vvar]",
166                 .fault = vvar_fault,
167         },
168         [RV_VDSO_MAP_VDSO] = {
169                 .name   = "[vdso]",
170                 .mremap = vdso_mremap,
171         },
172 };
173
174 static struct __vdso_info vdso_info __ro_after_init = {
175         .name = "vdso",
176         .vdso_code_start = vdso_start,
177         .vdso_code_end = vdso_end,
178         .dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR],
179         .cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO],
180 };
181
182 #ifdef CONFIG_COMPAT
183 static struct vm_special_mapping rv_compat_vdso_maps[] __ro_after_init = {
184         [RV_VDSO_MAP_VVAR] = {
185                 .name   = "[vvar]",
186                 .fault = vvar_fault,
187         },
188         [RV_VDSO_MAP_VDSO] = {
189                 .name   = "[vdso]",
190                 .mremap = vdso_mremap,
191         },
192 };
193
194 static struct __vdso_info compat_vdso_info __ro_after_init = {
195         .name = "compat_vdso",
196         .vdso_code_start = compat_vdso_start,
197         .vdso_code_end = compat_vdso_end,
198         .dm = &rv_compat_vdso_maps[RV_VDSO_MAP_VVAR],
199         .cm = &rv_compat_vdso_maps[RV_VDSO_MAP_VDSO],
200 };
201 #endif
202
203 static int __init vdso_init(void)
204 {
205         __vdso_init(&vdso_info);
206 #ifdef CONFIG_COMPAT
207         __vdso_init(&compat_vdso_info);
208 #endif
209
210         return 0;
211 }
212 arch_initcall(vdso_init);
213
214 static int __setup_additional_pages(struct mm_struct *mm,
215                                     struct linux_binprm *bprm,
216                                     int uses_interp,
217                                     struct __vdso_info *vdso_info)
218 {
219         unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
220         void *ret;
221
222         BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
223
224         vdso_text_len = vdso_info->vdso_pages << PAGE_SHIFT;
225         /* Be sure to map the data page */
226         vdso_mapping_len = vdso_text_len + VVAR_SIZE;
227
228         vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
229         if (IS_ERR_VALUE(vdso_base)) {
230                 ret = ERR_PTR(vdso_base);
231                 goto up_fail;
232         }
233
234         ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE,
235                 (VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info->dm);
236         if (IS_ERR(ret))
237                 goto up_fail;
238
239         vdso_base += VVAR_SIZE;
240         mm->context.vdso = (void *)vdso_base;
241
242         ret =
243            _install_special_mapping(mm, vdso_base, vdso_text_len,
244                 (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
245                 vdso_info->cm);
246
247         if (IS_ERR(ret))
248                 goto up_fail;
249
250         return 0;
251
252 up_fail:
253         mm->context.vdso = NULL;
254         return PTR_ERR(ret);
255 }
256
257 #ifdef CONFIG_COMPAT
258 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
259                                        int uses_interp)
260 {
261         struct mm_struct *mm = current->mm;
262         int ret;
263
264         if (mmap_write_lock_killable(mm))
265                 return -EINTR;
266
267         ret = __setup_additional_pages(mm, bprm, uses_interp,
268                                                         &compat_vdso_info);
269         mmap_write_unlock(mm);
270
271         return ret;
272 }
273 #endif
274
275 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
276 {
277         struct mm_struct *mm = current->mm;
278         int ret;
279
280         if (mmap_write_lock_killable(mm))
281                 return -EINTR;
282
283         ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso_info);
284         mmap_write_unlock(mm);
285
286         return ret;
287 }
This page took 0.049416 seconds and 4 git commands to generate.