1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright IBM Corp. 2008
9 #include <linux/binfmts.h>
10 #include <linux/compat.h>
11 #include <linux/elf.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/smp.h>
18 #include <linux/time_namespace.h>
19 #include <linux/random.h>
20 #include <vdso/datapage.h>
23 extern char vdso64_start[], vdso64_end[];
24 extern char vdso32_start[], vdso32_end[];
26 static struct vm_special_mapping vvar_mapping;
28 static union vdso_data_store vdso_data_store __page_aligned_data;
30 struct vdso_data *vdso_data = vdso_data_store.data;
33 VVAR_DATA_PAGE_OFFSET,
34 VVAR_TIMENS_PAGE_OFFSET,
39 struct vdso_data *arch_get_vdso_data(void *vvar_page)
41 return (struct vdso_data *)(vvar_page);
45 * The VVAR page layout depends on whether a task belongs to the root or
46 * non-root time namespace. Whenever a task changes its namespace, the VVAR
47 * page tables are cleared and then they will be re-faulted with a
48 * corresponding layout.
49 * See also the comment near timens_setup_vdso_data() for details.
51 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
53 struct mm_struct *mm = task->mm;
54 VMA_ITERATOR(vmi, mm, 0);
55 struct vm_area_struct *vma;
58 for_each_vma(vmi, vma) {
59 if (!vma_is_special_mapping(vma, &vvar_mapping))
69 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
70 struct vm_area_struct *vma, struct vm_fault *vmf)
72 struct page *timens_page = find_timens_vvar_page(vma);
73 unsigned long addr, pfn;
77 case VVAR_DATA_PAGE_OFFSET:
78 pfn = virt_to_pfn(vdso_data);
81 * Fault in VVAR page too, since it will be accessed
82 * to get clock data anyway.
84 addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
85 err = vmf_insert_pfn(vma, addr, pfn);
86 if (unlikely(err & VM_FAULT_ERROR))
88 pfn = page_to_pfn(timens_page);
92 case VVAR_TIMENS_PAGE_OFFSET:
94 * If a task belongs to a time namespace then a namespace
95 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
96 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
98 * See also the comment near timens_setup_vdso_data().
101 return VM_FAULT_SIGBUS;
102 pfn = virt_to_pfn(vdso_data);
104 #endif /* CONFIG_TIME_NS */
106 return VM_FAULT_SIGBUS;
108 return vmf_insert_pfn(vma, vmf->address, pfn);
111 static int vdso_mremap(const struct vm_special_mapping *sm,
112 struct vm_area_struct *vma)
114 current->mm->context.vdso_base = vma->vm_start;
118 static struct vm_special_mapping vvar_mapping = {
123 static struct vm_special_mapping vdso64_mapping = {
125 .mremap = vdso_mremap,
128 static struct vm_special_mapping vdso32_mapping = {
130 .mremap = vdso_mremap,
133 int vdso_getcpu_init(void)
135 set_tod_programmable_field(smp_processor_id());
138 early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
140 static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
142 unsigned long vvar_start, vdso_text_start, vdso_text_len;
143 struct vm_special_mapping *vdso_mapping;
144 struct mm_struct *mm = current->mm;
145 struct vm_area_struct *vma;
148 BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
149 if (mmap_write_lock_killable(mm))
152 if (is_compat_task()) {
153 vdso_text_len = vdso32_end - vdso32_start;
154 vdso_mapping = &vdso32_mapping;
156 vdso_text_len = vdso64_end - vdso64_start;
157 vdso_mapping = &vdso64_mapping;
159 vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
161 if (IS_ERR_VALUE(vvar_start))
163 vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
164 VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
170 vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
171 /* VM_MAYWRITE for COW so gdb can set breakpoints */
172 vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
174 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
177 do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
180 current->mm->context.vdso_base = vdso_text_start;
184 mmap_write_unlock(mm);
188 static unsigned long vdso_addr(unsigned long start, unsigned long len)
190 unsigned long addr, end, offset;
193 * Round up the start address. It can start out unaligned as a result
194 * of stack start randomization.
196 start = PAGE_ALIGN(start);
198 /* Round the lowest possible end address up to a PMD boundary. */
199 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
200 if (end >= VDSO_BASE)
205 offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
206 addr = start + (offset << PAGE_SHIFT);
213 unsigned long vdso_size(void)
215 unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
217 if (is_compat_task())
218 size += vdso32_end - vdso32_start;
220 size += vdso64_end - vdso64_start;
221 return PAGE_ALIGN(size);
224 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
226 unsigned long addr = VDSO_BASE;
227 unsigned long size = vdso_size();
229 if (current->flags & PF_RANDOMIZE)
230 addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
231 return map_vdso(addr, size);
234 static struct page ** __init vdso_setup_pages(void *start, void *end)
236 int pages = (end - start) >> PAGE_SHIFT;
237 struct page **pagelist;
240 pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
242 panic("%s: Cannot allocate page list for VDSO", __func__);
243 for (i = 0; i < pages; i++)
244 pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
248 static int __init vdso_init(void)
250 vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
251 if (IS_ENABLED(CONFIG_COMPAT))
252 vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
255 arch_initcall(vdso_init);