1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
18 #include <asm/unistd.h>
19 #include <as-layout.h>
21 #include <kern_util.h>
24 #include <ptrace_user.h>
25 #include <registers.h>
27 #include <sysdep/stub.h>
28 #include <linux/threads.h>
29 #include <timetravel.h>
30 #include "../internal.h"
32 int is_skas_winch(int pid, int fd, void *data)
34 return pid == getpgrp();
37 static const char *ptrace_reg_name(int idx)
39 #define R(n) case HOST_##n: return #n
62 #elif defined(__i386__)
85 static int ptrace_dump_regs(int pid)
87 unsigned long regs[MAX_REG_NR];
90 if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
93 printk(UM_KERN_ERR "Stub registers -\n");
94 for (i = 0; i < ARRAY_SIZE(regs); i++) {
95 const char *regname = ptrace_reg_name(i);
97 printk(UM_KERN_ERR "\t%s\t(%2d): %lx\n", regname, i, regs[i]);
104 * Signals that are OK to receive in the stub - we'll just continue it.
105 * SIGWINCH will happen when UML is inside a detached screen.
107 #define STUB_SIG_MASK ((1 << SIGALRM) | (1 << SIGWINCH))
109 /* Signals that the stub will finish with - anything else is an error */
110 #define STUB_DONE_MASK (1 << SIGTRAP)
112 void wait_stub_done(int pid)
117 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
118 if ((n < 0) || !WIFSTOPPED(status))
121 if (((1 << WSTOPSIG(status)) & STUB_SIG_MASK) == 0)
124 err = ptrace(PTRACE_CONT, pid, 0, 0);
126 printk(UM_KERN_ERR "%s : continue failed, errno = %d\n",
132 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
136 err = ptrace_dump_regs(pid);
138 printk(UM_KERN_ERR "Failed to get registers from stub, errno = %d\n",
140 printk(UM_KERN_ERR "%s : failed to wait for SIGTRAP, pid = %d, n = %d, errno = %d, status = 0x%x\n",
141 __func__, pid, n, errno, status);
145 extern unsigned long current_stub_stack(void);
147 static void get_skas_faultinfo(int pid, struct faultinfo *fi)
151 err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
153 printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
154 "errno = %d\n", pid, errno);
160 * faultinfo is prepared by the stub_segv_handler at start of
161 * the stub stack page. We just have to copy it.
163 memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
166 static void handle_segv(int pid, struct uml_pt_regs *regs)
168 get_skas_faultinfo(pid, ®s->faultinfo);
169 segv(regs->faultinfo, 0, 1, NULL);
172 static void handle_trap(int pid, struct uml_pt_regs *regs)
174 if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END))
177 handle_syscall(regs);
180 extern char __syscall_stub_start[];
182 static int stub_exe_fd;
184 #ifndef CLOSE_RANGE_CLOEXEC
185 #define CLOSE_RANGE_CLOEXEC (1U << 2)
188 static int userspace_tramp(void *stack)
190 char *const argv[] = { "uml-userspace", NULL };
192 unsigned long long offset;
193 struct stub_init_data init_data = {
194 .stub_start = STUB_START,
195 .segv_handler = STUB_CODE +
196 (unsigned long) stub_segv_handler -
197 (unsigned long) __syscall_stub_start,
199 struct iomem_region *iomem;
202 init_data.stub_code_fd = phys_mapping(uml_to_phys(__syscall_stub_start),
204 init_data.stub_code_offset = MMAP_OFFSET(offset);
206 init_data.stub_data_fd = phys_mapping(uml_to_phys(stack), &offset);
207 init_data.stub_data_offset = MMAP_OFFSET(offset);
210 * Avoid leaking unneeded FDs to the stub by setting CLOEXEC on all FDs
211 * and then unsetting it on all memory related FDs.
212 * This is not strictly necessary from a safety perspective.
214 syscall(__NR_close_range, 0, ~0U, CLOSE_RANGE_CLOEXEC);
216 fcntl(init_data.stub_data_fd, F_SETFD, 0);
217 for (iomem = iomem_regions; iomem; iomem = iomem->next)
218 fcntl(iomem->fd, F_SETFD, 0);
220 /* Create a pipe for init_data (no CLOEXEC) and dup2 to STDIN */
224 if (dup2(pipe_fds[0], 0) < 0)
228 /* Write init_data and close write side */
229 ret = write(pipe_fds[1], &init_data, sizeof(init_data));
232 if (ret != sizeof(init_data))
235 /* Raw execveat for compatibility with older libc versions */
236 syscall(__NR_execveat, stub_exe_fd, (unsigned long)"",
237 (unsigned long)argv, NULL, AT_EMPTY_PATH);
242 extern char stub_exe_start[];
243 extern char stub_exe_end[];
245 extern char *tempdir;
247 #define STUB_EXE_NAME_TEMPLATE "/uml-userspace-XXXXXX"
250 #define MFD_EXEC 0x0010U
253 static int __init init_stub_exe_fd(void)
256 char *tmpfile = NULL;
258 stub_exe_fd = memfd_create("uml-userspace",
259 MFD_EXEC | MFD_CLOEXEC | MFD_ALLOW_SEALING);
261 if (stub_exe_fd < 0) {
262 printk(UM_KERN_INFO "Could not create executable memfd, using temporary file!");
264 tmpfile = malloc(strlen(tempdir) +
265 strlen(STUB_EXE_NAME_TEMPLATE) + 1);
267 panic("Failed to allocate memory for stub binary name");
269 strcpy(tmpfile, tempdir);
270 strcat(tmpfile, STUB_EXE_NAME_TEMPLATE);
272 stub_exe_fd = mkstemp(tmpfile);
274 panic("Could not create temporary file for stub binary: %d",
278 while (written < stub_exe_end - stub_exe_start) {
279 ssize_t res = write(stub_exe_fd, stub_exe_start + written,
280 stub_exe_end - stub_exe_start - written);
287 panic("Failed write stub binary: %d", -errno);
294 fcntl(stub_exe_fd, F_ADD_SEALS,
295 F_SEAL_WRITE | F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_SEAL);
297 if (fchmod(stub_exe_fd, 00500) < 0) {
299 panic("Could not make stub binary executable: %d",
304 stub_exe_fd = open(tmpfile, O_RDONLY | O_CLOEXEC | O_NOFOLLOW);
305 if (stub_exe_fd < 0) {
307 panic("Could not reopen stub binary: %d", -errno);
316 __initcall(init_stub_exe_fd);
318 int userspace_pid[NR_CPUS];
321 * start_userspace() - prepare a new userspace process
322 * @stub_stack: pointer to the stub stack.
324 * Setups a new temporary stack page that is used while userspace_tramp() runs
325 * Clones the kernel process into a new userspace process, with FDs only.
327 * Return: When positive: the process id of the new userspace process,
328 * when negative: an error number.
329 * FIXME: can PIDs become negative?!
331 int start_userspace(unsigned long stub_stack)
335 int pid, status, n, err;
337 /* setup a temporary stack page */
338 stack = mmap(NULL, UM_KERN_PAGE_SIZE,
339 PROT_READ | PROT_WRITE | PROT_EXEC,
340 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
341 if (stack == MAP_FAILED) {
343 printk(UM_KERN_ERR "%s : mmap failed, errno = %d\n",
348 /* set stack pointer to the end of the stack page, so it can grow downwards */
349 sp = (unsigned long)stack + UM_KERN_PAGE_SIZE;
351 /* clone into new userspace process */
352 pid = clone(userspace_tramp, (void *) sp,
353 CLONE_VFORK | CLONE_VM | SIGCHLD,
357 printk(UM_KERN_ERR "%s : clone failed, errno = %d\n",
363 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
366 printk(UM_KERN_ERR "%s : wait failed, errno = %d\n",
370 } while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGALRM));
372 if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) {
374 printk(UM_KERN_ERR "%s : expected SIGSTOP, got status = %d\n",
379 if (ptrace(PTRACE_SETOPTIONS, pid, NULL,
380 (void *) PTRACE_O_TRACESYSGOOD) < 0) {
382 printk(UM_KERN_ERR "%s : PTRACE_SETOPTIONS failed, errno = %d\n",
387 if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) {
389 printk(UM_KERN_ERR "%s : munmap failed, errno = %d\n",
397 os_kill_ptraced_process(pid, 1);
401 int unscheduled_userspace_iterations;
402 extern unsigned long tt_extra_sched_jiffies;
404 void userspace(struct uml_pt_regs *regs)
406 int err, status, op, pid = userspace_pid[0];
409 /* Handle any immediate reschedules or signals */
414 * When we are in time-travel mode, userspace can theoretically
415 * do a *lot* of work without being scheduled. The problem with
416 * this is that it will prevent kernel bookkeeping (primarily
417 * the RCU) from running and this can for example cause OOM
420 * This code accounts a jiffie against the scheduling clock
421 * after the defined userspace iterations in the same thread.
422 * By doing so the situation is effectively prevented.
424 if (time_travel_mode == TT_MODE_INFCPU ||
425 time_travel_mode == TT_MODE_EXTERNAL) {
426 #ifdef CONFIG_UML_MAX_USERSPACE_ITERATIONS
427 if (CONFIG_UML_MAX_USERSPACE_ITERATIONS &&
428 unscheduled_userspace_iterations++ >
429 CONFIG_UML_MAX_USERSPACE_ITERATIONS) {
430 tt_extra_sched_jiffies += 1;
431 unscheduled_userspace_iterations = 0;
436 time_travel_print_bc_msg();
440 /* Flush out any pending syscalls */
441 err = syscall_stub_flush(current_mm_id());
446 printk(UM_KERN_ERR "%s - Error flushing stub syscalls: %d",
452 * This can legitimately fail if the process loads a
453 * bogus value into a segment register. It will
454 * segfault and PTRACE_GETREGS will read that value
455 * out of the process. However, PTRACE_SETREGS will
456 * fail. In this case, there is nothing to do but
457 * just kill the process.
459 if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp)) {
460 printk(UM_KERN_ERR "%s - ptrace set regs failed, errno = %d\n",
465 if (put_fp_registers(pid, regs->fp)) {
466 printk(UM_KERN_ERR "%s - ptrace set fp regs failed, errno = %d\n",
471 if (singlestepping())
472 op = PTRACE_SYSEMU_SINGLESTEP;
476 if (ptrace(op, pid, 0, 0)) {
477 printk(UM_KERN_ERR "%s - ptrace continue failed, op = %d, errno = %d\n",
478 __func__, op, errno);
482 CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
484 printk(UM_KERN_ERR "%s - wait failed, errno = %d\n",
490 if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) {
491 printk(UM_KERN_ERR "%s - PTRACE_GETREGS failed, errno = %d\n",
496 if (get_fp_registers(pid, regs->fp)) {
497 printk(UM_KERN_ERR "%s - get_fp_registers failed, errno = %d\n",
502 UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
504 if (WIFSTOPPED(status)) {
505 int sig = WSTOPSIG(status);
507 /* These signal handlers need the si argument.
508 * The SIGIO and SIGALARM handlers which constitute the
509 * majority of invocations, do not use it.
518 ptrace(PTRACE_GETSIGINFO, pid, 0, (struct siginfo *)&si);
524 if (PTRACE_FULL_FAULTINFO) {
525 get_skas_faultinfo(pid,
527 (*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
530 else handle_segv(pid, regs);
533 handle_trap(pid, regs);
536 relay_signal(SIGTRAP, (struct siginfo *)&si, regs);
545 block_signals_trace();
546 (*sig_info[sig])(sig, (struct siginfo *)&si, regs);
547 unblock_signals_trace();
550 printk(UM_KERN_ERR "%s - child stopped with signal %d\n",
554 pid = userspace_pid[0];
557 /* Avoid -ERESTARTSYS handling in host */
558 if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
559 PT_SYSCALL_NR(regs->gp) = -1;
564 void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
566 (*buf)[0].JB_IP = (unsigned long) handler;
567 (*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
571 #define INIT_JMP_NEW_THREAD 0
572 #define INIT_JMP_CALLBACK 1
573 #define INIT_JMP_HALT 2
574 #define INIT_JMP_REBOOT 3
576 void switch_threads(jmp_buf *me, jmp_buf *you)
578 unscheduled_userspace_iterations = 0;
580 if (UML_SETJMP(me) == 0)
584 static jmp_buf initial_jmpbuf;
586 /* XXX Make these percpu */
587 static void (*cb_proc)(void *arg);
589 static jmp_buf *cb_back;
591 int start_idle_thread(void *stack, jmp_buf *switch_buf)
595 set_handler(SIGWINCH);
598 * Can't use UML_SETJMP or UML_LONGJMP here because they save
599 * and restore signals, with the possible side-effect of
600 * trying to handle any signals which came when they were
601 * blocked, which can't be done on this stack.
602 * Signals must be blocked when jumping back here and restored
603 * after returning to the jumper.
605 n = setjmp(initial_jmpbuf);
607 case INIT_JMP_NEW_THREAD:
608 (*switch_buf)[0].JB_IP = (unsigned long) uml_finishsetup;
609 (*switch_buf)[0].JB_SP = (unsigned long) stack +
610 UM_THREAD_SIZE - sizeof(void *);
612 case INIT_JMP_CALLBACK:
614 longjmp(*cb_back, 1);
619 case INIT_JMP_REBOOT:
623 printk(UM_KERN_ERR "Bad sigsetjmp return in %s - %d\n",
627 longjmp(*switch_buf, 1);
630 printk(UM_KERN_ERR "impossible long jump!");
635 void initial_thread_cb_skas(void (*proc)(void *), void *arg)
643 block_signals_trace();
644 if (UML_SETJMP(&here) == 0)
645 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
646 unblock_signals_trace();
655 block_signals_trace();
656 UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
659 static bool noreboot;
661 static int __init noreboot_cmd_param(char *str, int *add)
668 __uml_setup("noreboot", noreboot_cmd_param,
670 " Rather than rebooting, exit always, akin to QEMU's -no-reboot option.\n"
671 " This is useful if you're using CONFIG_PANIC_TIMEOUT in order to catch\n"
674 void reboot_skas(void)
676 block_signals_trace();
677 UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT);
680 void __switch_mm(struct mm_id *mm_idp)
682 userspace_pid[0] = mm_idp->pid;