1 /* SPDX-License-Identifier: GPL-2.0 */
9 #include <sys/syscall.h>
15 #include <sys/ucontext.h>
24 #include "../kselftest.h"
27 #define TOTAL_TESTS 13
40 # define SYS_getcpu 309
42 # define SYS_getcpu 318
46 /* max length of lines in /proc/self/maps - anything longer is skipped here */
47 #define MAPS_LINE_LEN 128
49 /* vsyscalls and vDSO */
50 bool vsyscall_map_r = false, vsyscall_map_x = false;
52 typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
53 const gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000);
56 typedef int (*vgettime_t)(clockid_t, struct timespec *);
57 vgettime_t vdso_gettime;
59 typedef long (*time_func_t)(time_t *t);
60 const time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400);
61 time_func_t vdso_time;
63 typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
64 const getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
67 static void init_vdso(void)
69 void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
71 vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
73 ksft_print_msg("[WARN] failed to find vDSO\n");
77 vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday");
79 ksft_print_msg("[WARN] failed to find gettimeofday in vDSO\n");
81 vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
83 ksft_print_msg("[WARN] failed to find clock_gettime in vDSO\n");
85 vdso_time = (time_func_t)dlsym(vdso, "__vdso_time");
87 ksft_print_msg("[WARN] failed to find time in vDSO\n");
89 vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
91 ksft_print_msg("[WARN] failed to find getcpu in vDSO\n");
95 static inline long sys_gtod(struct timeval *tv, struct timezone *tz)
97 return syscall(SYS_gettimeofday, tv, tz);
100 static inline long sys_time(time_t *t)
102 return syscall(SYS_time, t);
105 static inline long sys_getcpu(unsigned * cpu, unsigned * node,
108 return syscall(SYS_getcpu, cpu, node, cache);
111 static double tv_diff(const struct timeval *a, const struct timeval *b)
113 return (double)(a->tv_sec - b->tv_sec) +
114 (double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6;
117 static void check_gtod(const struct timeval *tv_sys1,
118 const struct timeval *tv_sys2,
119 const struct timezone *tz_sys,
121 const struct timeval *tv_other,
122 const struct timezone *tz_other)
126 if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest ||
127 tz_sys->tz_dsttime != tz_other->tz_dsttime))
128 ksft_print_msg("%s tz mismatch\n", which);
130 d1 = tv_diff(tv_other, tv_sys1);
131 d2 = tv_diff(tv_sys2, tv_other);
133 ksft_print_msg("%s time offsets: %lf %lf\n", which, d1, d2);
135 ksft_test_result(!(d1 < 0 || d2 < 0), "%s gettimeofday()'s timeval\n", which);
138 static void test_gtod(void)
140 struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys;
141 struct timezone tz_sys, tz_vdso, tz_vsys;
145 ksft_print_msg("test gettimeofday()\n");
147 if (sys_gtod(&tv_sys1, &tz_sys) != 0)
148 ksft_exit_fail_msg("syscall gettimeofday: %s\n", strerror(errno));
150 ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso);
152 ret_vsys = vgtod(&tv_vsys, &tz_vsys);
153 if (sys_gtod(&tv_sys2, &tz_sys) != 0)
154 ksft_exit_fail_msg("syscall gettimeofday: %s\n", strerror(errno));
158 check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso);
160 ksft_test_result_fail("vDSO gettimeofday() failed: %ld\n", ret_vdso);
162 ksft_test_result_skip("vdso_gtod isn't set\n");
165 if (vsyscall_map_x) {
167 check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys);
169 ksft_test_result_fail("vsys gettimeofday() failed: %ld\n", ret_vsys);
171 ksft_test_result_skip("vsyscall_map_x isn't set\n");
175 static void test_time(void)
177 long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0;
178 long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1;
180 ksft_print_msg("test time()\n");
181 t_sys1 = sys_time(&t2_sys1);
183 t_vdso = vdso_time(&t2_vdso);
185 t_vsys = vtime(&t2_vsys);
186 t_sys2 = sys_time(&t2_sys2);
187 if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) {
188 ksft_print_msg("syscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n",
189 t_sys1, t2_sys1, t_sys2, t2_sys2);
190 ksft_test_result_skip("vdso_time\n");
191 ksft_test_result_skip("vdso_time\n");
196 if (t_vdso < 0 || t_vdso != t2_vdso)
197 ksft_test_result_fail("vDSO failed (ret:%ld output:%ld)\n",
199 else if (t_vdso < t_sys1 || t_vdso > t_sys2)
200 ksft_test_result_fail("vDSO returned the wrong time (%ld %ld %ld)\n",
201 t_sys1, t_vdso, t_sys2);
203 ksft_test_result_pass("vDSO time() is okay\n");
205 ksft_test_result_skip("vdso_time isn't set\n");
208 if (vsyscall_map_x) {
209 if (t_vsys < 0 || t_vsys != t2_vsys)
210 ksft_test_result_fail("vsyscall failed (ret:%ld output:%ld)\n",
212 else if (t_vsys < t_sys1 || t_vsys > t_sys2)
213 ksft_test_result_fail("vsyscall returned the wrong time (%ld %ld %ld)\n",
214 t_sys1, t_vsys, t_sys2);
216 ksft_test_result_pass("vsyscall time() is okay\n");
218 ksft_test_result_skip("vsyscall_map_x isn't set\n");
222 static void test_getcpu(int cpu)
224 unsigned int cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys;
225 long ret_sys, ret_vdso = -1, ret_vsys = -1;
226 unsigned int node = 0;
227 bool have_node = false;
230 ksft_print_msg("getcpu() on CPU %d\n", cpu);
233 CPU_SET(cpu, &cpuset);
234 if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
235 ksft_print_msg("failed to force CPU %d\n", cpu);
236 ksft_test_result_skip("vdso_getcpu\n");
237 ksft_test_result_skip("vsyscall_map_x\n");
242 ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0);
244 ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0);
246 ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0);
250 ksft_print_msg("syscall reported CPU %u but should be %d\n",
259 ksft_test_result_fail("vDSO getcpu() failed\n");
266 if (cpu_vdso != cpu || node_vdso != node) {
268 ksft_print_msg("vDSO reported CPU %u but should be %d\n",
270 if (node_vdso != node)
271 ksft_print_msg("vDSO reported node %u but should be %u\n",
273 ksft_test_result_fail("Wrong values\n");
275 ksft_test_result_pass("vDSO reported correct CPU and node\n");
279 ksft_test_result_skip("vdso_getcpu isn't set\n");
282 if (vsyscall_map_x) {
284 ksft_test_result_fail("vsyscall getcpu() failed\n");
291 if (cpu_vsys != cpu || node_vsys != node) {
293 ksft_print_msg("vsyscall reported CPU %u but should be %d\n",
295 if (node_vsys != node)
296 ksft_print_msg("vsyscall reported node %u but should be %u\n",
298 ksft_test_result_fail("Wrong values\n");
300 ksft_test_result_pass("vsyscall reported correct CPU and node\n");
304 ksft_test_result_skip("vsyscall_map_x isn't set\n");
310 static jmp_buf jmpbuf;
311 static volatile unsigned long segv_err;
313 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
318 memset(&sa, 0, sizeof(sa));
319 sa.sa_sigaction = handler;
320 sa.sa_flags = SA_SIGINFO | flags;
321 sigemptyset(&sa.sa_mask);
322 if (sigaction(sig, &sa, 0))
323 ksft_exit_fail_msg("sigaction failed\n");
326 static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
328 ucontext_t *ctx = (ucontext_t *)ctx_void;
330 segv_err = ctx->uc_mcontext.gregs[REG_ERR];
331 siglongjmp(jmpbuf, 1);
334 static void test_vsys_r(void)
336 ksft_print_msg("Checking read access to the vsyscall page\n");
338 if (sigsetjmp(jmpbuf, 1) == 0) {
339 *(volatile int *)0xffffffffff600000;
345 if (can_read && !vsyscall_map_r)
346 ksft_test_result_fail("We have read access, but we shouldn't\n");
347 else if (!can_read && vsyscall_map_r)
348 ksft_test_result_fail("We don't have read access, but we should\n");
350 ksft_test_result_pass("We have read access\n");
352 ksft_test_result_pass("We do not have read access: #PF(0x%lx)\n", segv_err);
355 static void test_vsys_x(void)
357 if (vsyscall_map_x) {
358 /* We already tested this adequately. */
359 ksft_test_result_pass("vsyscall_map_x is true\n");
363 ksft_print_msg("Make sure that vsyscalls really page fault\n");
366 if (sigsetjmp(jmpbuf, 1) == 0) {
374 ksft_test_result_fail("Executing the vsyscall did not page fault\n");
375 else if (segv_err & (1 << 4)) /* INSTR */
376 ksft_test_result_pass("Executing the vsyscall page failed: #PF(0x%lx)\n",
379 ksft_test_result_fail("Execution failed with the wrong error: #PF(0x%lx)\n",
384 * Debuggers expect ptrace() to be able to peek at the vsyscall page.
385 * Use process_vm_readv() as a proxy for ptrace() to test this. We
386 * want it to work in the vsyscall=emulate case and to fail in the
387 * vsyscall=xonly case.
389 * It's worth noting that this ABI is a bit nutty. write(2) can't
390 * read from the vsyscall page on any kernel version or mode. The
391 * fact that ptrace() ever worked was a nice courtesy of old kernels,
392 * but the code to support it is fairly gross.
394 static void test_process_vm_readv(void)
397 struct iovec local, remote;
400 ksft_print_msg("process_vm_readv() from vsyscall page\n");
402 local.iov_base = buf;
403 local.iov_len = 4096;
404 remote.iov_base = (void *)0xffffffffff600000;
405 remote.iov_len = 4096;
406 ret = process_vm_readv(getpid(), &local, 1, &remote, 1, 0);
409 * We expect process_vm_readv() to work if and only if the
410 * vsyscall page is readable.
412 ksft_test_result(!vsyscall_map_r,
413 "process_vm_readv() failed (ret = %d, errno = %d)\n", ret, errno);
418 ksft_test_result(!memcmp(buf, remote.iov_base, sizeof(buf)), "Read data\n");
420 ksft_test_result_fail("process_rm_readv() succeeded, but it should have failed in this configuration\n");
423 static void init_vsys(void)
427 char line[MAPS_LINE_LEN];
430 maps = fopen("/proc/self/maps", "r");
432 ksft_test_result_skip("Could not open /proc/self/maps -- assuming vsyscall is r-x\n");
433 vsyscall_map_r = true;
437 while (fgets(line, MAPS_LINE_LEN, maps)) {
440 char name[MAPS_LINE_LEN];
442 /* sscanf() is safe here as strlen(name) >= strlen(line) */
443 if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s",
444 &start, &end, &r, &x, name) != 5)
447 if (strcmp(name, "[vsyscall]"))
450 ksft_print_msg("vsyscall map: %s", line);
452 if (start != (void *)0xffffffffff600000 ||
453 end != (void *)0xffffffffff601000) {
454 ksft_print_msg("address range is nonsense\n");
458 ksft_print_msg("vsyscall permissions are %c-%c\n", r, x);
459 vsyscall_map_r = (r == 'r');
460 vsyscall_map_x = (x == 'x');
469 ksft_print_msg("no vsyscall map in /proc/self/maps\n");
470 vsyscall_map_r = false;
471 vsyscall_map_x = false;
474 ksft_test_result(!nerrs, "vsyscall map\n");
477 static volatile sig_atomic_t num_vsyscall_traps;
479 static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
481 ucontext_t *ctx = (ucontext_t *)ctx_void;
482 unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP];
484 if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0)
485 num_vsyscall_traps++;
488 static void test_emulation(void)
493 if (!vsyscall_map_x) {
494 ksft_test_result_skip("vsyscall_map_x isn't set\n");
498 ksft_print_msg("checking that vsyscalls are emulated\n");
499 sethandler(SIGTRAP, sigtrap, 0);
500 set_eflags(get_eflags() | X86_EFLAGS_TF);
502 set_eflags(get_eflags() & ~X86_EFLAGS_TF);
505 * If vsyscalls are emulated, we expect a single trap in the
506 * vsyscall page -- the call instruction will trap with RIP
507 * pointing to the entry point before emulation takes over.
508 * In native mode, we expect two traps, since whatever code
509 * the vsyscall page contains will be more than just a ret
512 is_native = (num_vsyscall_traps > 1);
514 ksft_test_result(!is_native, "vsyscalls are %s (%d instructions in vsyscall page)\n",
515 (is_native ? "native" : "emulated"), (int)num_vsyscall_traps);
519 int main(int argc, char **argv)
521 int total_tests = TOTAL_TESTS;
524 ksft_set_plan(total_tests);
537 sethandler(SIGSEGV, sigsegv, 0);
540 test_process_vm_readv();