2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
25 #include "qemu-common.h"
26 #include "target_signal.h"
29 static struct target_sigaltstack target_sigaltstack_used = {
32 .ss_flags = TARGET_SS_DISABLE,
35 static struct target_sigaction sigact_table[TARGET_NSIG];
37 static void host_signal_handler(int host_signum, siginfo_t *info,
40 static uint8_t host_to_target_signal_table[_NSIG] = {
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
83 static uint8_t target_to_host_signal_table[_NSIG];
85 static inline int on_sig_stack(unsigned long sp)
87 return (sp - target_sigaltstack_used.ss_sp
88 < target_sigaltstack_used.ss_size);
91 static inline int sas_ss_flags(unsigned long sp)
93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
94 : on_sig_stack(sp) ? SS_ONSTACK : 0);
97 int host_to_target_signal(int sig)
99 if (sig < 0 || sig >= _NSIG)
101 return host_to_target_signal_table[sig];
104 int target_to_host_signal(int sig)
106 if (sig < 0 || sig >= _NSIG)
108 return target_to_host_signal_table[sig];
111 static inline void target_sigemptyset(target_sigset_t *set)
113 memset(set, 0, sizeof(*set));
116 static inline void target_sigaddset(target_sigset_t *set, int signum)
119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
120 set->sig[signum / TARGET_NSIG_BPW] |= mask;
123 static inline int target_sigismember(const target_sigset_t *set, int signum)
126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
130 static void host_to_target_sigset_internal(target_sigset_t *d,
134 target_sigemptyset(d);
135 for (i = 1; i <= TARGET_NSIG; i++) {
136 if (sigismember(s, i)) {
137 target_sigaddset(d, host_to_target_signal(i));
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
147 host_to_target_sigset_internal(&d1, s);
148 for(i = 0;i < TARGET_NSIG_WORDS; i++)
149 d->sig[i] = tswapal(d1.sig[i]);
152 static void target_to_host_sigset_internal(sigset_t *d,
153 const target_sigset_t *s)
157 for (i = 1; i <= TARGET_NSIG; i++) {
158 if (target_sigismember(s, i)) {
159 sigaddset(d, target_to_host_signal(i));
164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
169 for(i = 0;i < TARGET_NSIG_WORDS; i++)
170 s1.sig[i] = tswapal(s->sig[i]);
171 target_to_host_sigset_internal(d, &s1);
174 void host_to_target_old_sigset(abi_ulong *old_sigset,
175 const sigset_t *sigset)
178 host_to_target_sigset(&d, sigset);
179 *old_sigset = d.sig[0];
182 void target_to_host_old_sigset(sigset_t *sigset,
183 const abi_ulong *old_sigset)
188 d.sig[0] = *old_sigset;
189 for(i = 1;i < TARGET_NSIG_WORDS; i++)
191 target_to_host_sigset(sigset, &d);
194 int block_signals(void)
196 TaskState *ts = (TaskState *)thread_cpu->opaque;
199 /* It's OK to block everything including SIGSEGV, because we won't
200 * run any further guest code before unblocking signals in
201 * process_pending_signals().
204 sigprocmask(SIG_SETMASK, &set, 0);
206 return atomic_xchg(&ts->signal_pending, 1);
209 /* Wrapper for sigprocmask function
210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
212 * a signal was already pending and the syscall must be restarted, or
214 * If set is NULL, this is guaranteed not to fail.
216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
218 TaskState *ts = (TaskState *)thread_cpu->opaque;
221 *oldset = ts->signal_mask;
227 if (block_signals()) {
228 return -TARGET_ERESTARTSYS;
233 sigorset(&ts->signal_mask, &ts->signal_mask, set);
236 for (i = 1; i <= NSIG; ++i) {
237 if (sigismember(set, i)) {
238 sigdelset(&ts->signal_mask, i);
243 ts->signal_mask = *set;
246 g_assert_not_reached();
249 /* Silently ignore attempts to change blocking status of KILL or STOP */
250 sigdelset(&ts->signal_mask, SIGKILL);
251 sigdelset(&ts->signal_mask, SIGSTOP);
256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
257 !defined(TARGET_X86_64)
258 /* Just set the guest's signal mask to the specified value; the
259 * caller is assumed to have called block_signals() already.
261 static void set_sigmask(const sigset_t *set)
263 TaskState *ts = (TaskState *)thread_cpu->opaque;
265 ts->signal_mask = *set;
269 /* siginfo conversion */
271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
272 const siginfo_t *info)
274 int sig = host_to_target_signal(info->si_signo);
275 int si_code = info->si_code;
277 tinfo->si_signo = sig;
279 tinfo->si_code = info->si_code;
281 /* This memset serves two purposes:
282 * (1) ensure we don't leak random junk to the guest later
283 * (2) placate false positives from gcc about fields
284 * being used uninitialized if it chooses to inline both this
285 * function and tswap_siginfo() into host_to_target_siginfo().
287 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
289 /* This is awkward, because we have to use a combination of
290 * the si_code and si_signo to figure out which of the union's
291 * members are valid. (Within the host kernel it is always possible
292 * to tell, but the kernel carefully avoids giving userspace the
293 * high 16 bits of si_code, so we don't have the information to
294 * do this the easy way...) We therefore make our best guess,
295 * bearing in mind that a guest can spoof most of the si_codes
296 * via rt_sigqueueinfo() if it likes.
298 * Once we have made our guess, we record it in the top 16 bits of
299 * the si_code, so that tswap_siginfo() later can use it.
300 * tswap_siginfo() will strip these top bits out before writing
301 * si_code to the guest (sign-extending the lower bits).
308 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
309 * These are the only unspoofable si_code values.
311 tinfo->_sifields._kill._pid = info->si_pid;
312 tinfo->_sifields._kill._uid = info->si_uid;
313 si_type = QEMU_SI_KILL;
316 /* Everything else is spoofable. Make best guess based on signal */
319 tinfo->_sifields._sigchld._pid = info->si_pid;
320 tinfo->_sifields._sigchld._uid = info->si_uid;
321 tinfo->_sifields._sigchld._status
322 = host_to_target_waitstatus(info->si_status);
323 tinfo->_sifields._sigchld._utime = info->si_utime;
324 tinfo->_sifields._sigchld._stime = info->si_stime;
325 si_type = QEMU_SI_CHLD;
328 tinfo->_sifields._sigpoll._band = info->si_band;
329 tinfo->_sifields._sigpoll._fd = info->si_fd;
330 si_type = QEMU_SI_POLL;
333 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
334 tinfo->_sifields._rt._pid = info->si_pid;
335 tinfo->_sifields._rt._uid = info->si_uid;
336 /* XXX: potential problem if 64 bit */
337 tinfo->_sifields._rt._sigval.sival_ptr
338 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
339 si_type = QEMU_SI_RT;
345 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
348 static void tswap_siginfo(target_siginfo_t *tinfo,
349 const target_siginfo_t *info)
351 int si_type = extract32(info->si_code, 16, 16);
352 int si_code = sextract32(info->si_code, 0, 16);
354 __put_user(info->si_signo, &tinfo->si_signo);
355 __put_user(info->si_errno, &tinfo->si_errno);
356 __put_user(si_code, &tinfo->si_code);
358 /* We can use our internal marker of which fields in the structure
359 * are valid, rather than duplicating the guesswork of
360 * host_to_target_siginfo_noswap() here.
364 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
365 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
368 __put_user(info->_sifields._timer._timer1,
369 &tinfo->_sifields._timer._timer1);
370 __put_user(info->_sifields._timer._timer2,
371 &tinfo->_sifields._timer._timer2);
374 __put_user(info->_sifields._sigpoll._band,
375 &tinfo->_sifields._sigpoll._band);
376 __put_user(info->_sifields._sigpoll._fd,
377 &tinfo->_sifields._sigpoll._fd);
380 __put_user(info->_sifields._sigfault._addr,
381 &tinfo->_sifields._sigfault._addr);
384 __put_user(info->_sifields._sigchld._pid,
385 &tinfo->_sifields._sigchld._pid);
386 __put_user(info->_sifields._sigchld._uid,
387 &tinfo->_sifields._sigchld._uid);
388 __put_user(info->_sifields._sigchld._status,
389 &tinfo->_sifields._sigchld._status);
390 __put_user(info->_sifields._sigchld._utime,
391 &tinfo->_sifields._sigchld._utime);
392 __put_user(info->_sifields._sigchld._stime,
393 &tinfo->_sifields._sigchld._stime);
396 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
397 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
398 __put_user(info->_sifields._rt._sigval.sival_ptr,
399 &tinfo->_sifields._rt._sigval.sival_ptr);
402 g_assert_not_reached();
406 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
408 target_siginfo_t tgt_tmp;
409 host_to_target_siginfo_noswap(&tgt_tmp, info);
410 tswap_siginfo(tinfo, &tgt_tmp);
413 /* XXX: we support only POSIX RT signals are used. */
414 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
415 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
417 /* This conversion is used only for the rt_sigqueueinfo syscall,
418 * and so we know that the _rt fields are the valid ones.
422 __get_user(info->si_signo, &tinfo->si_signo);
423 __get_user(info->si_errno, &tinfo->si_errno);
424 __get_user(info->si_code, &tinfo->si_code);
425 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
426 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
427 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
428 info->si_value.sival_ptr = (void *)(long)sival_ptr;
431 static int fatal_signal (int sig)
436 case TARGET_SIGWINCH:
437 /* Ignored by default. */
444 /* Job control signals. */
451 /* returns 1 if given signal should dump core if not handled */
452 static int core_dump_signal(int sig)
468 void signal_init(void)
470 TaskState *ts = (TaskState *)thread_cpu->opaque;
471 struct sigaction act;
472 struct sigaction oact;
476 /* generate signal conversion tables */
477 for(i = 1; i < _NSIG; i++) {
478 if (host_to_target_signal_table[i] == 0)
479 host_to_target_signal_table[i] = i;
481 for(i = 1; i < _NSIG; i++) {
482 j = host_to_target_signal_table[i];
483 target_to_host_signal_table[j] = i;
486 /* Set the signal mask from the host mask. */
487 sigprocmask(0, 0, &ts->signal_mask);
489 /* set all host signal handlers. ALL signals are blocked during
490 the handlers to serialize them. */
491 memset(sigact_table, 0, sizeof(sigact_table));
493 sigfillset(&act.sa_mask);
494 act.sa_flags = SA_SIGINFO;
495 act.sa_sigaction = host_signal_handler;
496 for(i = 1; i <= TARGET_NSIG; i++) {
497 host_sig = target_to_host_signal(i);
498 sigaction(host_sig, NULL, &oact);
499 if (oact.sa_sigaction == (void *)SIG_IGN) {
500 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
501 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
502 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
504 /* If there's already a handler installed then something has
505 gone horribly wrong, so don't even try to handle that case. */
506 /* Install some handlers for our own use. We need at least
507 SIGSEGV and SIGBUS, to detect exceptions. We can not just
508 trap all signals because it affects syscall interrupt
509 behavior. But do trap all default-fatal signals. */
510 if (fatal_signal (i))
511 sigaction(host_sig, &act, NULL);
516 /* abort execution with signal */
517 static void QEMU_NORETURN force_sig(int target_sig)
519 CPUState *cpu = thread_cpu;
520 CPUArchState *env = cpu->env_ptr;
521 TaskState *ts = (TaskState *)cpu->opaque;
522 int host_sig, core_dumped = 0;
523 struct sigaction act;
525 host_sig = target_to_host_signal(target_sig);
526 trace_user_force_sig(env, target_sig, host_sig);
527 gdb_signalled(env, target_sig);
529 /* dump core if supported by target binary format */
530 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
533 ((*ts->bprm->core_dump)(target_sig, env) == 0);
536 /* we already dumped the core of target process, we don't want
537 * a coredump of qemu itself */
538 struct rlimit nodump;
539 getrlimit(RLIMIT_CORE, &nodump);
541 setrlimit(RLIMIT_CORE, &nodump);
542 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
543 target_sig, strsignal(host_sig), "core dumped" );
546 /* The proper exit code for dying from an uncaught signal is
547 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
548 * a negative value. To get the proper exit code we need to
549 * actually die from an uncaught signal. Here the default signal
550 * handler is installed, we send ourself a signal and we wait for
552 sigfillset(&act.sa_mask);
553 act.sa_handler = SIG_DFL;
555 sigaction(host_sig, &act, NULL);
557 /* For some reason raise(host_sig) doesn't send the signal when
558 * statically linked on x86-64. */
559 kill(getpid(), host_sig);
561 /* Make sure the signal isn't masked (just reuse the mask inside
563 sigdelset(&act.sa_mask, host_sig);
564 sigsuspend(&act.sa_mask);
570 /* queue a signal so that it will be send to the virtual CPU as soon
572 int queue_signal(CPUArchState *env, int sig, int si_type,
573 target_siginfo_t *info)
575 CPUState *cpu = ENV_GET_CPU(env);
576 TaskState *ts = cpu->opaque;
578 trace_user_queue_signal(env, sig);
580 info->si_code = deposit32(info->si_code, 16, 16, si_type);
582 ts->sync_signal.info = *info;
583 ts->sync_signal.pending = sig;
584 /* signal that a new signal is pending */
585 atomic_set(&ts->signal_pending, 1);
586 return 1; /* indicates that the signal was queued */
589 #ifndef HAVE_SAFE_SYSCALL
590 static inline void rewind_if_in_safe_syscall(void *puc)
592 /* Default version: never rewind */
596 static void host_signal_handler(int host_signum, siginfo_t *info,
599 CPUArchState *env = thread_cpu->env_ptr;
600 CPUState *cpu = ENV_GET_CPU(env);
601 TaskState *ts = cpu->opaque;
604 target_siginfo_t tinfo;
605 ucontext_t *uc = puc;
606 struct emulated_sigtable *k;
608 /* the CPU emulator uses some host signals to detect exceptions,
609 we forward to it some signals */
610 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
611 && info->si_code > 0) {
612 if (cpu_signal_handler(host_signum, info, puc))
616 /* get target signal number */
617 sig = host_to_target_signal(host_signum);
618 if (sig < 1 || sig > TARGET_NSIG)
620 trace_user_host_signal(env, host_signum, sig);
622 rewind_if_in_safe_syscall(puc);
624 host_to_target_siginfo_noswap(&tinfo, info);
625 k = &ts->sigtab[sig - 1];
628 ts->signal_pending = 1;
630 /* Block host signals until target signal handler entered. We
631 * can't block SIGSEGV or SIGBUS while we're executing guest
632 * code in case the guest code provokes one in the window between
633 * now and it getting out to the main loop. Signals will be
634 * unblocked again in process_pending_signals().
636 * WARNING: we cannot use sigfillset() here because the uc_sigmask
637 * field is a kernel sigset_t, which is much smaller than the
638 * libc sigset_t which sigfillset() operates on. Using sigfillset()
639 * would write 0xff bytes off the end of the structure and trash
640 * data on the struct.
641 * We can't use sizeof(uc->uc_sigmask) either, because the libc
642 * headers define the struct field with the wrong (too large) type.
644 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
645 sigdelset(&uc->uc_sigmask, SIGSEGV);
646 sigdelset(&uc->uc_sigmask, SIGBUS);
648 /* interrupt the virtual CPU as soon as possible */
649 cpu_exit(thread_cpu);
652 /* do_sigaltstack() returns target values and errnos. */
653 /* compare linux/kernel/signal.c:do_sigaltstack() */
654 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
657 struct target_sigaltstack oss;
659 /* XXX: test errors */
662 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
663 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
664 __put_user(sas_ss_flags(sp), &oss.ss_flags);
669 struct target_sigaltstack *uss;
670 struct target_sigaltstack ss;
671 size_t minstacksize = TARGET_MINSIGSTKSZ;
673 #if defined(TARGET_PPC64)
674 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
675 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
676 if (get_ppc64_abi(image) > 1) {
681 ret = -TARGET_EFAULT;
682 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
685 __get_user(ss.ss_sp, &uss->ss_sp);
686 __get_user(ss.ss_size, &uss->ss_size);
687 __get_user(ss.ss_flags, &uss->ss_flags);
688 unlock_user_struct(uss, uss_addr, 0);
691 if (on_sig_stack(sp))
694 ret = -TARGET_EINVAL;
695 if (ss.ss_flags != TARGET_SS_DISABLE
696 && ss.ss_flags != TARGET_SS_ONSTACK
700 if (ss.ss_flags == TARGET_SS_DISABLE) {
704 ret = -TARGET_ENOMEM;
705 if (ss.ss_size < minstacksize) {
710 target_sigaltstack_used.ss_sp = ss.ss_sp;
711 target_sigaltstack_used.ss_size = ss.ss_size;
715 ret = -TARGET_EFAULT;
716 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
725 /* do_sigaction() return target values and host errnos */
726 int do_sigaction(int sig, const struct target_sigaction *act,
727 struct target_sigaction *oact)
729 struct target_sigaction *k;
730 struct sigaction act1;
734 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
735 return -TARGET_EINVAL;
738 if (block_signals()) {
739 return -TARGET_ERESTARTSYS;
742 k = &sigact_table[sig - 1];
744 __put_user(k->_sa_handler, &oact->_sa_handler);
745 __put_user(k->sa_flags, &oact->sa_flags);
746 #if !defined(TARGET_MIPS)
747 __put_user(k->sa_restorer, &oact->sa_restorer);
750 oact->sa_mask = k->sa_mask;
753 /* FIXME: This is not threadsafe. */
754 __get_user(k->_sa_handler, &act->_sa_handler);
755 __get_user(k->sa_flags, &act->sa_flags);
756 #if !defined(TARGET_MIPS)
757 __get_user(k->sa_restorer, &act->sa_restorer);
759 /* To be swapped in target_to_host_sigset. */
760 k->sa_mask = act->sa_mask;
762 /* we update the host linux signal state */
763 host_sig = target_to_host_signal(sig);
764 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
765 sigfillset(&act1.sa_mask);
766 act1.sa_flags = SA_SIGINFO;
767 if (k->sa_flags & TARGET_SA_RESTART)
768 act1.sa_flags |= SA_RESTART;
769 /* NOTE: it is important to update the host kernel signal
770 ignore state to avoid getting unexpected interrupted
772 if (k->_sa_handler == TARGET_SIG_IGN) {
773 act1.sa_sigaction = (void *)SIG_IGN;
774 } else if (k->_sa_handler == TARGET_SIG_DFL) {
775 if (fatal_signal (sig))
776 act1.sa_sigaction = host_signal_handler;
778 act1.sa_sigaction = (void *)SIG_DFL;
780 act1.sa_sigaction = host_signal_handler;
782 ret = sigaction(host_sig, &act1, NULL);
788 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
790 /* from the Linux kernel */
792 struct target_fpreg {
793 uint16_t significand[4];
797 struct target_fpxreg {
798 uint16_t significand[4];
803 struct target_xmmreg {
804 abi_ulong element[4];
807 struct target_fpstate {
808 /* Regular FPU environment */
816 struct target_fpreg _st[8];
818 uint16_t magic; /* 0xffff = regular FPU data only */
820 /* FXSR FPU environment */
821 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
824 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
825 struct target_xmmreg _xmm[8];
826 abi_ulong padding[56];
829 #define X86_FXSR_MAGIC 0x0000
831 struct target_sigcontext {
849 abi_ulong esp_at_signal;
851 abi_ulong fpstate; /* pointer */
856 struct target_ucontext {
859 target_stack_t tuc_stack;
860 struct target_sigcontext tuc_mcontext;
861 target_sigset_t tuc_sigmask; /* mask last for extensibility */
868 struct target_sigcontext sc;
869 struct target_fpstate fpstate;
870 abi_ulong extramask[TARGET_NSIG_WORDS-1];
880 struct target_siginfo info;
881 struct target_ucontext uc;
882 struct target_fpstate fpstate;
887 * Set up a signal frame.
890 /* XXX: save x87 state */
891 static void setup_sigcontext(struct target_sigcontext *sc,
892 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
893 abi_ulong fpstate_addr)
895 CPUState *cs = CPU(x86_env_get_cpu(env));
898 /* already locked in setup_frame() */
899 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
900 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
901 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
902 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
903 __put_user(env->regs[R_EDI], &sc->edi);
904 __put_user(env->regs[R_ESI], &sc->esi);
905 __put_user(env->regs[R_EBP], &sc->ebp);
906 __put_user(env->regs[R_ESP], &sc->esp);
907 __put_user(env->regs[R_EBX], &sc->ebx);
908 __put_user(env->regs[R_EDX], &sc->edx);
909 __put_user(env->regs[R_ECX], &sc->ecx);
910 __put_user(env->regs[R_EAX], &sc->eax);
911 __put_user(cs->exception_index, &sc->trapno);
912 __put_user(env->error_code, &sc->err);
913 __put_user(env->eip, &sc->eip);
914 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
915 __put_user(env->eflags, &sc->eflags);
916 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
917 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
919 cpu_x86_fsave(env, fpstate_addr, 1);
920 fpstate->status = fpstate->sw;
922 __put_user(magic, &fpstate->magic);
923 __put_user(fpstate_addr, &sc->fpstate);
925 /* non-iBCS2 extensions.. */
926 __put_user(mask, &sc->oldmask);
927 __put_user(env->cr[2], &sc->cr2);
931 * Determine which stack to use..
934 static inline abi_ulong
935 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
939 /* Default to using normal stack */
940 esp = env->regs[R_ESP];
941 /* This is the X/Open sanctioned signal stack switching. */
942 if (ka->sa_flags & TARGET_SA_ONSTACK) {
943 if (sas_ss_flags(esp) == 0) {
944 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
948 /* This is the legacy signal stack switching. */
949 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
950 !(ka->sa_flags & TARGET_SA_RESTORER) &&
952 esp = (unsigned long) ka->sa_restorer;
955 return (esp - frame_size) & -8ul;
958 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
959 static void setup_frame(int sig, struct target_sigaction *ka,
960 target_sigset_t *set, CPUX86State *env)
962 abi_ulong frame_addr;
963 struct sigframe *frame;
966 frame_addr = get_sigframe(ka, env, sizeof(*frame));
967 trace_user_setup_frame(env, frame_addr);
969 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
972 __put_user(sig, &frame->sig);
974 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
975 frame_addr + offsetof(struct sigframe, fpstate));
977 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
978 __put_user(set->sig[i], &frame->extramask[i - 1]);
981 /* Set up to return from userspace. If provided, use a stub
982 already in userspace. */
983 if (ka->sa_flags & TARGET_SA_RESTORER) {
984 __put_user(ka->sa_restorer, &frame->pretcode);
987 abi_ulong retcode_addr;
988 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
989 __put_user(retcode_addr, &frame->pretcode);
990 /* This is popl %eax ; movl $,%eax ; int $0x80 */
992 __put_user(val16, (uint16_t *)(frame->retcode+0));
993 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
995 __put_user(val16, (uint16_t *)(frame->retcode+6));
999 /* Set up registers for signal handler */
1000 env->regs[R_ESP] = frame_addr;
1001 env->eip = ka->_sa_handler;
1003 cpu_x86_load_seg(env, R_DS, __USER_DS);
1004 cpu_x86_load_seg(env, R_ES, __USER_DS);
1005 cpu_x86_load_seg(env, R_SS, __USER_DS);
1006 cpu_x86_load_seg(env, R_CS, __USER_CS);
1007 env->eflags &= ~TF_MASK;
1009 unlock_user_struct(frame, frame_addr, 1);
1014 if (sig == TARGET_SIGSEGV) {
1015 ka->_sa_handler = TARGET_SIG_DFL;
1017 force_sig(TARGET_SIGSEGV /* , current */);
1020 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
1021 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1022 target_siginfo_t *info,
1023 target_sigset_t *set, CPUX86State *env)
1025 abi_ulong frame_addr, addr;
1026 struct rt_sigframe *frame;
1029 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1030 trace_user_setup_rt_frame(env, frame_addr);
1032 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1035 __put_user(sig, &frame->sig);
1036 addr = frame_addr + offsetof(struct rt_sigframe, info);
1037 __put_user(addr, &frame->pinfo);
1038 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1039 __put_user(addr, &frame->puc);
1040 tswap_siginfo(&frame->info, info);
1042 /* Create the ucontext. */
1043 __put_user(0, &frame->uc.tuc_flags);
1044 __put_user(0, &frame->uc.tuc_link);
1045 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1046 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1047 &frame->uc.tuc_stack.ss_flags);
1048 __put_user(target_sigaltstack_used.ss_size,
1049 &frame->uc.tuc_stack.ss_size);
1050 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1051 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1053 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1054 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1057 /* Set up to return from userspace. If provided, use a stub
1058 already in userspace. */
1059 if (ka->sa_flags & TARGET_SA_RESTORER) {
1060 __put_user(ka->sa_restorer, &frame->pretcode);
1063 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1064 __put_user(addr, &frame->pretcode);
1065 /* This is movl $,%eax ; int $0x80 */
1066 __put_user(0xb8, (char *)(frame->retcode+0));
1067 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1069 __put_user(val16, (uint16_t *)(frame->retcode+5));
1072 /* Set up registers for signal handler */
1073 env->regs[R_ESP] = frame_addr;
1074 env->eip = ka->_sa_handler;
1076 cpu_x86_load_seg(env, R_DS, __USER_DS);
1077 cpu_x86_load_seg(env, R_ES, __USER_DS);
1078 cpu_x86_load_seg(env, R_SS, __USER_DS);
1079 cpu_x86_load_seg(env, R_CS, __USER_CS);
1080 env->eflags &= ~TF_MASK;
1082 unlock_user_struct(frame, frame_addr, 1);
1087 if (sig == TARGET_SIGSEGV) {
1088 ka->_sa_handler = TARGET_SIG_DFL;
1090 force_sig(TARGET_SIGSEGV /* , current */);
1094 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1096 unsigned int err = 0;
1097 abi_ulong fpstate_addr;
1098 unsigned int tmpflags;
1100 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1101 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1102 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1103 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1105 env->regs[R_EDI] = tswapl(sc->edi);
1106 env->regs[R_ESI] = tswapl(sc->esi);
1107 env->regs[R_EBP] = tswapl(sc->ebp);
1108 env->regs[R_ESP] = tswapl(sc->esp);
1109 env->regs[R_EBX] = tswapl(sc->ebx);
1110 env->regs[R_EDX] = tswapl(sc->edx);
1111 env->regs[R_ECX] = tswapl(sc->ecx);
1112 env->regs[R_EAX] = tswapl(sc->eax);
1113 env->eip = tswapl(sc->eip);
1115 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1116 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1118 tmpflags = tswapl(sc->eflags);
1119 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1120 // regs->orig_eax = -1; /* disable syscall checks */
1122 fpstate_addr = tswapl(sc->fpstate);
1123 if (fpstate_addr != 0) {
1124 if (!access_ok(VERIFY_READ, fpstate_addr,
1125 sizeof(struct target_fpstate)))
1127 cpu_x86_frstor(env, fpstate_addr, 1);
1135 long do_sigreturn(CPUX86State *env)
1137 struct sigframe *frame;
1138 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1139 target_sigset_t target_set;
1143 trace_user_do_sigreturn(env, frame_addr);
1144 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1146 /* set blocked signals */
1147 __get_user(target_set.sig[0], &frame->sc.oldmask);
1148 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1149 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1152 target_to_host_sigset_internal(&set, &target_set);
1155 /* restore registers */
1156 if (restore_sigcontext(env, &frame->sc))
1158 unlock_user_struct(frame, frame_addr, 0);
1159 return -TARGET_QEMU_ESIGRETURN;
1162 unlock_user_struct(frame, frame_addr, 0);
1163 force_sig(TARGET_SIGSEGV);
1167 long do_rt_sigreturn(CPUX86State *env)
1169 abi_ulong frame_addr;
1170 struct rt_sigframe *frame;
1173 frame_addr = env->regs[R_ESP] - 4;
1174 trace_user_do_rt_sigreturn(env, frame_addr);
1175 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1177 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1180 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1184 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1185 get_sp_from_cpustate(env)) == -EFAULT) {
1189 unlock_user_struct(frame, frame_addr, 0);
1190 return -TARGET_QEMU_ESIGRETURN;
1193 unlock_user_struct(frame, frame_addr, 0);
1194 force_sig(TARGET_SIGSEGV);
1198 #elif defined(TARGET_AARCH64)
1200 struct target_sigcontext {
1201 uint64_t fault_address;
1202 /* AArch64 registers */
1207 /* 4K reserved for FP/SIMD state and future expansion */
1208 char __reserved[4096] __attribute__((__aligned__(16)));
1211 struct target_ucontext {
1212 abi_ulong tuc_flags;
1214 target_stack_t tuc_stack;
1215 target_sigset_t tuc_sigmask;
1216 /* glibc uses a 1024-bit sigset_t */
1217 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1218 /* last for future expansion */
1219 struct target_sigcontext tuc_mcontext;
1223 * Header to be used at the beginning of structures extending the user
1224 * context. Such structures must be placed after the rt_sigframe on the stack
1225 * and be 16-byte aligned. The last structure must be a dummy one with the
1226 * magic and size set to 0.
1228 struct target_aarch64_ctx {
1233 #define TARGET_FPSIMD_MAGIC 0x46508001
1235 struct target_fpsimd_context {
1236 struct target_aarch64_ctx head;
1239 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1243 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1244 * user space as it will change with the addition of new context. User space
1245 * should check the magic/size information.
1247 struct target_aux_context {
1248 struct target_fpsimd_context fpsimd;
1249 /* additional context to be added before "end" */
1250 struct target_aarch64_ctx end;
1253 struct target_rt_sigframe {
1254 struct target_siginfo info;
1255 struct target_ucontext uc;
1261 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1262 CPUARMState *env, target_sigset_t *set)
1265 struct target_aux_context *aux =
1266 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1268 /* set up the stack frame for unwinding */
1269 __put_user(env->xregs[29], &sf->fp);
1270 __put_user(env->xregs[30], &sf->lr);
1272 for (i = 0; i < 31; i++) {
1273 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1275 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1276 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1277 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1279 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1281 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1282 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1285 for (i = 0; i < 32; i++) {
1286 #ifdef TARGET_WORDS_BIGENDIAN
1287 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1288 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1290 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1291 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1294 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1295 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1296 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1297 __put_user(sizeof(struct target_fpsimd_context),
1298 &aux->fpsimd.head.size);
1300 /* set the "end" magic */
1301 __put_user(0, &aux->end.magic);
1302 __put_user(0, &aux->end.size);
1307 static int target_restore_sigframe(CPUARMState *env,
1308 struct target_rt_sigframe *sf)
1312 struct target_aux_context *aux =
1313 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1314 uint32_t magic, size, fpsr, fpcr;
1317 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1320 for (i = 0; i < 31; i++) {
1321 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1324 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1325 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1326 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1327 pstate_write(env, pstate);
1329 __get_user(magic, &aux->fpsimd.head.magic);
1330 __get_user(size, &aux->fpsimd.head.size);
1332 if (magic != TARGET_FPSIMD_MAGIC
1333 || size != sizeof(struct target_fpsimd_context)) {
1337 for (i = 0; i < 32; i++) {
1338 #ifdef TARGET_WORDS_BIGENDIAN
1339 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1340 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1342 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1343 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1346 __get_user(fpsr, &aux->fpsimd.fpsr);
1347 vfp_set_fpsr(env, fpsr);
1348 __get_user(fpcr, &aux->fpsimd.fpcr);
1349 vfp_set_fpcr(env, fpcr);
1354 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1358 sp = env->xregs[31];
1361 * This is the X/Open sanctioned signal stack switching.
1363 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1364 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1367 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1372 static void target_setup_frame(int usig, struct target_sigaction *ka,
1373 target_siginfo_t *info, target_sigset_t *set,
1376 struct target_rt_sigframe *frame;
1377 abi_ulong frame_addr, return_addr;
1379 frame_addr = get_sigframe(ka, env);
1380 trace_user_setup_frame(env, frame_addr);
1381 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1385 __put_user(0, &frame->uc.tuc_flags);
1386 __put_user(0, &frame->uc.tuc_link);
1388 __put_user(target_sigaltstack_used.ss_sp,
1389 &frame->uc.tuc_stack.ss_sp);
1390 __put_user(sas_ss_flags(env->xregs[31]),
1391 &frame->uc.tuc_stack.ss_flags);
1392 __put_user(target_sigaltstack_used.ss_size,
1393 &frame->uc.tuc_stack.ss_size);
1394 target_setup_sigframe(frame, env, set);
1395 if (ka->sa_flags & TARGET_SA_RESTORER) {
1396 return_addr = ka->sa_restorer;
1398 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1399 __put_user(0xd2801168, &frame->tramp[0]);
1400 __put_user(0xd4000001, &frame->tramp[1]);
1401 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1403 env->xregs[0] = usig;
1404 env->xregs[31] = frame_addr;
1405 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1406 env->pc = ka->_sa_handler;
1407 env->xregs[30] = return_addr;
1409 tswap_siginfo(&frame->info, info);
1410 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1411 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1414 unlock_user_struct(frame, frame_addr, 1);
1418 unlock_user_struct(frame, frame_addr, 1);
1419 force_sig(TARGET_SIGSEGV);
1422 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1423 target_siginfo_t *info, target_sigset_t *set,
1426 target_setup_frame(sig, ka, info, set, env);
1429 static void setup_frame(int sig, struct target_sigaction *ka,
1430 target_sigset_t *set, CPUARMState *env)
1432 target_setup_frame(sig, ka, 0, set, env);
1435 long do_rt_sigreturn(CPUARMState *env)
1437 struct target_rt_sigframe *frame = NULL;
1438 abi_ulong frame_addr = env->xregs[31];
1440 trace_user_do_rt_sigreturn(env, frame_addr);
1441 if (frame_addr & 15) {
1445 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1449 if (target_restore_sigframe(env, frame)) {
1453 if (do_sigaltstack(frame_addr +
1454 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1455 0, get_sp_from_cpustate(env)) == -EFAULT) {
1459 unlock_user_struct(frame, frame_addr, 0);
1460 return -TARGET_QEMU_ESIGRETURN;
1463 unlock_user_struct(frame, frame_addr, 0);
1464 force_sig(TARGET_SIGSEGV);
1468 long do_sigreturn(CPUARMState *env)
1470 return do_rt_sigreturn(env);
1473 #elif defined(TARGET_ARM)
1475 struct target_sigcontext {
1477 abi_ulong error_code;
1496 abi_ulong fault_address;
1499 struct target_ucontext_v1 {
1500 abi_ulong tuc_flags;
1502 target_stack_t tuc_stack;
1503 struct target_sigcontext tuc_mcontext;
1504 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1507 struct target_ucontext_v2 {
1508 abi_ulong tuc_flags;
1510 target_stack_t tuc_stack;
1511 struct target_sigcontext tuc_mcontext;
1512 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1513 char __unused[128 - sizeof(target_sigset_t)];
1514 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1517 struct target_user_vfp {
1518 uint64_t fpregs[32];
1522 struct target_user_vfp_exc {
1528 struct target_vfp_sigframe {
1531 struct target_user_vfp ufp;
1532 struct target_user_vfp_exc ufp_exc;
1533 } __attribute__((__aligned__(8)));
1535 struct target_iwmmxt_sigframe {
1539 /* Note that not all the coprocessor control registers are stored here */
1546 } __attribute__((__aligned__(8)));
1548 #define TARGET_VFP_MAGIC 0x56465001
1549 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1553 struct target_sigcontext sc;
1554 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1560 struct target_ucontext_v2 uc;
1564 struct rt_sigframe_v1
1568 struct target_siginfo info;
1569 struct target_ucontext_v1 uc;
1573 struct rt_sigframe_v2
1575 struct target_siginfo info;
1576 struct target_ucontext_v2 uc;
1580 #define TARGET_CONFIG_CPU_32 1
1583 * For ARM syscalls, we encode the syscall number into the instruction.
1585 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1586 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1589 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1590 * need two 16-bit instructions.
1592 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1593 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1595 static const abi_ulong retcodes[4] = {
1596 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1597 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1601 static inline int valid_user_regs(CPUARMState *regs)
1607 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1608 CPUARMState *env, abi_ulong mask)
1610 __put_user(env->regs[0], &sc->arm_r0);
1611 __put_user(env->regs[1], &sc->arm_r1);
1612 __put_user(env->regs[2], &sc->arm_r2);
1613 __put_user(env->regs[3], &sc->arm_r3);
1614 __put_user(env->regs[4], &sc->arm_r4);
1615 __put_user(env->regs[5], &sc->arm_r5);
1616 __put_user(env->regs[6], &sc->arm_r6);
1617 __put_user(env->regs[7], &sc->arm_r7);
1618 __put_user(env->regs[8], &sc->arm_r8);
1619 __put_user(env->regs[9], &sc->arm_r9);
1620 __put_user(env->regs[10], &sc->arm_r10);
1621 __put_user(env->regs[11], &sc->arm_fp);
1622 __put_user(env->regs[12], &sc->arm_ip);
1623 __put_user(env->regs[13], &sc->arm_sp);
1624 __put_user(env->regs[14], &sc->arm_lr);
1625 __put_user(env->regs[15], &sc->arm_pc);
1626 #ifdef TARGET_CONFIG_CPU_32
1627 __put_user(cpsr_read(env), &sc->arm_cpsr);
1630 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1631 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1632 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1633 __put_user(mask, &sc->oldmask);
1636 static inline abi_ulong
1637 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1639 unsigned long sp = regs->regs[13];
1642 * This is the X/Open sanctioned signal stack switching.
1644 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1645 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1648 * ATPCS B01 mandates 8-byte alignment
1650 return (sp - framesize) & ~7;
1654 setup_return(CPUARMState *env, struct target_sigaction *ka,
1655 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1657 abi_ulong handler = ka->_sa_handler;
1659 int thumb = handler & 1;
1660 uint32_t cpsr = cpsr_read(env);
1669 if (ka->sa_flags & TARGET_SA_RESTORER) {
1670 retcode = ka->sa_restorer;
1672 unsigned int idx = thumb;
1674 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1678 __put_user(retcodes[idx], rc);
1680 retcode = rc_addr + thumb;
1683 env->regs[0] = usig;
1684 env->regs[13] = frame_addr;
1685 env->regs[14] = retcode;
1686 env->regs[15] = handler & (thumb ? ~1 : ~3);
1687 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1690 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1693 struct target_vfp_sigframe *vfpframe;
1694 vfpframe = (struct target_vfp_sigframe *)regspace;
1695 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1696 __put_user(sizeof(*vfpframe), &vfpframe->size);
1697 for (i = 0; i < 32; i++) {
1698 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1700 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1701 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1702 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1703 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1704 return (abi_ulong*)(vfpframe+1);
1707 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1711 struct target_iwmmxt_sigframe *iwmmxtframe;
1712 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1713 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1714 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1715 for (i = 0; i < 16; i++) {
1716 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1718 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1719 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1720 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1721 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1722 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1723 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1724 return (abi_ulong*)(iwmmxtframe+1);
1727 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1728 target_sigset_t *set, CPUARMState *env)
1730 struct target_sigaltstack stack;
1732 abi_ulong *regspace;
1734 /* Clear all the bits of the ucontext we don't use. */
1735 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1737 memset(&stack, 0, sizeof(stack));
1738 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1739 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1740 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1741 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1743 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1744 /* Save coprocessor signal frame. */
1745 regspace = uc->tuc_regspace;
1746 if (arm_feature(env, ARM_FEATURE_VFP)) {
1747 regspace = setup_sigframe_v2_vfp(regspace, env);
1749 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1750 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1753 /* Write terminating magic word */
1754 __put_user(0, regspace);
1756 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1757 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1761 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1762 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1763 target_sigset_t *set, CPUARMState *regs)
1765 struct sigframe_v1 *frame;
1766 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1769 trace_user_setup_frame(regs, frame_addr);
1770 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1774 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1776 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1777 __put_user(set->sig[i], &frame->extramask[i - 1]);
1780 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1781 frame_addr + offsetof(struct sigframe_v1, retcode));
1783 unlock_user_struct(frame, frame_addr, 1);
1786 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1787 target_sigset_t *set, CPUARMState *regs)
1789 struct sigframe_v2 *frame;
1790 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1792 trace_user_setup_frame(regs, frame_addr);
1793 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1797 setup_sigframe_v2(&frame->uc, set, regs);
1799 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1800 frame_addr + offsetof(struct sigframe_v2, retcode));
1802 unlock_user_struct(frame, frame_addr, 1);
1805 static void setup_frame(int usig, struct target_sigaction *ka,
1806 target_sigset_t *set, CPUARMState *regs)
1808 if (get_osversion() >= 0x020612) {
1809 setup_frame_v2(usig, ka, set, regs);
1811 setup_frame_v1(usig, ka, set, regs);
1815 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1816 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1817 target_siginfo_t *info,
1818 target_sigset_t *set, CPUARMState *env)
1820 struct rt_sigframe_v1 *frame;
1821 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1822 struct target_sigaltstack stack;
1824 abi_ulong info_addr, uc_addr;
1826 trace_user_setup_rt_frame(env, frame_addr);
1827 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1831 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1832 __put_user(info_addr, &frame->pinfo);
1833 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1834 __put_user(uc_addr, &frame->puc);
1835 tswap_siginfo(&frame->info, info);
1837 /* Clear all the bits of the ucontext we don't use. */
1838 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1840 memset(&stack, 0, sizeof(stack));
1841 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1842 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1843 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1844 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1846 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1847 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1848 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1851 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1852 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1854 env->regs[1] = info_addr;
1855 env->regs[2] = uc_addr;
1857 unlock_user_struct(frame, frame_addr, 1);
1860 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1861 target_siginfo_t *info,
1862 target_sigset_t *set, CPUARMState *env)
1864 struct rt_sigframe_v2 *frame;
1865 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1866 abi_ulong info_addr, uc_addr;
1868 trace_user_setup_rt_frame(env, frame_addr);
1869 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1873 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1874 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1875 tswap_siginfo(&frame->info, info);
1877 setup_sigframe_v2(&frame->uc, set, env);
1879 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1880 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1882 env->regs[1] = info_addr;
1883 env->regs[2] = uc_addr;
1885 unlock_user_struct(frame, frame_addr, 1);
1888 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1889 target_siginfo_t *info,
1890 target_sigset_t *set, CPUARMState *env)
1892 if (get_osversion() >= 0x020612) {
1893 setup_rt_frame_v2(usig, ka, info, set, env);
1895 setup_rt_frame_v1(usig, ka, info, set, env);
1900 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1905 __get_user(env->regs[0], &sc->arm_r0);
1906 __get_user(env->regs[1], &sc->arm_r1);
1907 __get_user(env->regs[2], &sc->arm_r2);
1908 __get_user(env->regs[3], &sc->arm_r3);
1909 __get_user(env->regs[4], &sc->arm_r4);
1910 __get_user(env->regs[5], &sc->arm_r5);
1911 __get_user(env->regs[6], &sc->arm_r6);
1912 __get_user(env->regs[7], &sc->arm_r7);
1913 __get_user(env->regs[8], &sc->arm_r8);
1914 __get_user(env->regs[9], &sc->arm_r9);
1915 __get_user(env->regs[10], &sc->arm_r10);
1916 __get_user(env->regs[11], &sc->arm_fp);
1917 __get_user(env->regs[12], &sc->arm_ip);
1918 __get_user(env->regs[13], &sc->arm_sp);
1919 __get_user(env->regs[14], &sc->arm_lr);
1920 __get_user(env->regs[15], &sc->arm_pc);
1921 #ifdef TARGET_CONFIG_CPU_32
1922 __get_user(cpsr, &sc->arm_cpsr);
1923 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
1926 err |= !valid_user_regs(env);
1931 static long do_sigreturn_v1(CPUARMState *env)
1933 abi_ulong frame_addr;
1934 struct sigframe_v1 *frame = NULL;
1935 target_sigset_t set;
1940 * Since we stacked the signal on a 64-bit boundary,
1941 * then 'sp' should be word aligned here. If it's
1942 * not, then the user is trying to mess with us.
1944 frame_addr = env->regs[13];
1945 trace_user_do_sigreturn(env, frame_addr);
1946 if (frame_addr & 7) {
1950 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1954 __get_user(set.sig[0], &frame->sc.oldmask);
1955 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1956 __get_user(set.sig[i], &frame->extramask[i - 1]);
1959 target_to_host_sigset_internal(&host_set, &set);
1960 set_sigmask(&host_set);
1962 if (restore_sigcontext(env, &frame->sc)) {
1967 /* Send SIGTRAP if we're single-stepping */
1968 if (ptrace_cancel_bpt(current))
1969 send_sig(SIGTRAP, current, 1);
1971 unlock_user_struct(frame, frame_addr, 0);
1972 return -TARGET_QEMU_ESIGRETURN;
1975 force_sig(TARGET_SIGSEGV /* , current */);
1979 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1982 abi_ulong magic, sz;
1983 uint32_t fpscr, fpexc;
1984 struct target_vfp_sigframe *vfpframe;
1985 vfpframe = (struct target_vfp_sigframe *)regspace;
1987 __get_user(magic, &vfpframe->magic);
1988 __get_user(sz, &vfpframe->size);
1989 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1992 for (i = 0; i < 32; i++) {
1993 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1995 __get_user(fpscr, &vfpframe->ufp.fpscr);
1996 vfp_set_fpscr(env, fpscr);
1997 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1998 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1999 * and the exception flag is cleared
2002 fpexc &= ~((1 << 31) | (1 << 28));
2003 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
2004 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
2005 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
2006 return (abi_ulong*)(vfpframe + 1);
2009 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
2010 abi_ulong *regspace)
2013 abi_ulong magic, sz;
2014 struct target_iwmmxt_sigframe *iwmmxtframe;
2015 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2017 __get_user(magic, &iwmmxtframe->magic);
2018 __get_user(sz, &iwmmxtframe->size);
2019 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
2022 for (i = 0; i < 16; i++) {
2023 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2025 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2026 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2027 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2028 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2029 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2030 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2031 return (abi_ulong*)(iwmmxtframe + 1);
2034 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
2035 struct target_ucontext_v2 *uc)
2038 abi_ulong *regspace;
2040 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2041 set_sigmask(&host_set);
2043 if (restore_sigcontext(env, &uc->tuc_mcontext))
2046 /* Restore coprocessor signal frame */
2047 regspace = uc->tuc_regspace;
2048 if (arm_feature(env, ARM_FEATURE_VFP)) {
2049 regspace = restore_sigframe_v2_vfp(env, regspace);
2054 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2055 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2061 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2065 /* Send SIGTRAP if we're single-stepping */
2066 if (ptrace_cancel_bpt(current))
2067 send_sig(SIGTRAP, current, 1);
2073 static long do_sigreturn_v2(CPUARMState *env)
2075 abi_ulong frame_addr;
2076 struct sigframe_v2 *frame = NULL;
2079 * Since we stacked the signal on a 64-bit boundary,
2080 * then 'sp' should be word aligned here. If it's
2081 * not, then the user is trying to mess with us.
2083 frame_addr = env->regs[13];
2084 trace_user_do_sigreturn(env, frame_addr);
2085 if (frame_addr & 7) {
2089 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2093 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2097 unlock_user_struct(frame, frame_addr, 0);
2098 return -TARGET_QEMU_ESIGRETURN;
2101 unlock_user_struct(frame, frame_addr, 0);
2102 force_sig(TARGET_SIGSEGV /* , current */);
2106 long do_sigreturn(CPUARMState *env)
2108 if (get_osversion() >= 0x020612) {
2109 return do_sigreturn_v2(env);
2111 return do_sigreturn_v1(env);
2115 static long do_rt_sigreturn_v1(CPUARMState *env)
2117 abi_ulong frame_addr;
2118 struct rt_sigframe_v1 *frame = NULL;
2122 * Since we stacked the signal on a 64-bit boundary,
2123 * then 'sp' should be word aligned here. If it's
2124 * not, then the user is trying to mess with us.
2126 frame_addr = env->regs[13];
2127 trace_user_do_rt_sigreturn(env, frame_addr);
2128 if (frame_addr & 7) {
2132 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2136 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2137 set_sigmask(&host_set);
2139 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2143 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2147 /* Send SIGTRAP if we're single-stepping */
2148 if (ptrace_cancel_bpt(current))
2149 send_sig(SIGTRAP, current, 1);
2151 unlock_user_struct(frame, frame_addr, 0);
2152 return -TARGET_QEMU_ESIGRETURN;
2155 unlock_user_struct(frame, frame_addr, 0);
2156 force_sig(TARGET_SIGSEGV /* , current */);
2160 static long do_rt_sigreturn_v2(CPUARMState *env)
2162 abi_ulong frame_addr;
2163 struct rt_sigframe_v2 *frame = NULL;
2166 * Since we stacked the signal on a 64-bit boundary,
2167 * then 'sp' should be word aligned here. If it's
2168 * not, then the user is trying to mess with us.
2170 frame_addr = env->regs[13];
2171 trace_user_do_rt_sigreturn(env, frame_addr);
2172 if (frame_addr & 7) {
2176 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2180 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2184 unlock_user_struct(frame, frame_addr, 0);
2185 return -TARGET_QEMU_ESIGRETURN;
2188 unlock_user_struct(frame, frame_addr, 0);
2189 force_sig(TARGET_SIGSEGV /* , current */);
2193 long do_rt_sigreturn(CPUARMState *env)
2195 if (get_osversion() >= 0x020612) {
2196 return do_rt_sigreturn_v2(env);
2198 return do_rt_sigreturn_v1(env);
2202 #elif defined(TARGET_SPARC)
2204 #define __SUNOS_MAXWIN 31
2206 /* This is what SunOS does, so shall I. */
2207 struct target_sigcontext {
2208 abi_ulong sigc_onstack; /* state to restore */
2210 abi_ulong sigc_mask; /* sigmask to restore */
2211 abi_ulong sigc_sp; /* stack pointer */
2212 abi_ulong sigc_pc; /* program counter */
2213 abi_ulong sigc_npc; /* next program counter */
2214 abi_ulong sigc_psr; /* for condition codes etc */
2215 abi_ulong sigc_g1; /* User uses these two registers */
2216 abi_ulong sigc_o0; /* within the trampoline code. */
2218 /* Now comes information regarding the users window set
2219 * at the time of the signal.
2221 abi_ulong sigc_oswins; /* outstanding windows */
2223 /* stack ptrs for each regwin buf */
2224 char *sigc_spbuf[__SUNOS_MAXWIN];
2226 /* Windows to restore after signal */
2228 abi_ulong locals[8];
2230 } sigc_wbuf[__SUNOS_MAXWIN];
2232 /* A Sparc stack frame */
2233 struct sparc_stackf {
2234 abi_ulong locals[8];
2236 /* It's simpler to treat fp and callers_pc as elements of ins[]
2237 * since we never need to access them ourselves.
2241 abi_ulong xxargs[1];
2250 abi_ulong u_regs[16]; /* globals and ins */
2256 abi_ulong si_float_regs[32];
2257 unsigned long si_fsr;
2258 unsigned long si_fpqdepth;
2260 unsigned long *insn_addr;
2263 } qemu_siginfo_fpu_t;
2266 struct target_signal_frame {
2267 struct sparc_stackf ss;
2270 abi_ulong insns[2] __attribute__ ((aligned (8)));
2271 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2272 abi_ulong extra_size; /* Should be 0 */
2273 qemu_siginfo_fpu_t fpu_state;
2275 struct target_rt_signal_frame {
2276 struct sparc_stackf ss;
2281 unsigned int insns[2];
2283 unsigned int extra_size; /* Should be 0 */
2284 qemu_siginfo_fpu_t fpu_state;
2298 #define UREG_FP UREG_I6
2299 #define UREG_SP UREG_O6
2301 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2303 unsigned long framesize)
2307 sp = env->regwptr[UREG_FP];
2309 /* This is the X/Open sanctioned signal stack switching. */
2310 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2311 if (!on_sig_stack(sp)
2312 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2313 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2316 return sp - framesize;
2320 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2324 __put_user(env->psr, &si->si_regs.psr);
2325 __put_user(env->pc, &si->si_regs.pc);
2326 __put_user(env->npc, &si->si_regs.npc);
2327 __put_user(env->y, &si->si_regs.y);
2328 for (i=0; i < 8; i++) {
2329 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2331 for (i=0; i < 8; i++) {
2332 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2334 __put_user(mask, &si->si_mask);
2340 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2341 CPUSPARCState *env, unsigned long mask)
2345 __put_user(mask, &sc->sigc_mask);
2346 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2347 __put_user(env->pc, &sc->sigc_pc);
2348 __put_user(env->npc, &sc->sigc_npc);
2349 __put_user(env->psr, &sc->sigc_psr);
2350 __put_user(env->gregs[1], &sc->sigc_g1);
2351 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2356 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2358 static void setup_frame(int sig, struct target_sigaction *ka,
2359 target_sigset_t *set, CPUSPARCState *env)
2362 struct target_signal_frame *sf;
2363 int sigframe_size, err, i;
2365 /* 1. Make sure everything is clean */
2366 //synchronize_user_stack();
2368 sigframe_size = NF_ALIGNEDSZ;
2369 sf_addr = get_sigframe(ka, env, sigframe_size);
2370 trace_user_setup_frame(env, sf_addr);
2372 sf = lock_user(VERIFY_WRITE, sf_addr,
2373 sizeof(struct target_signal_frame), 0);
2378 if (invalid_frame_pointer(sf, sigframe_size))
2379 goto sigill_and_return;
2381 /* 2. Save the current process state */
2382 err = setup___siginfo(&sf->info, env, set->sig[0]);
2383 __put_user(0, &sf->extra_size);
2385 //save_fpu_state(regs, &sf->fpu_state);
2386 //__put_user(&sf->fpu_state, &sf->fpu_save);
2388 __put_user(set->sig[0], &sf->info.si_mask);
2389 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2390 __put_user(set->sig[i + 1], &sf->extramask[i]);
2393 for (i = 0; i < 8; i++) {
2394 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2396 for (i = 0; i < 8; i++) {
2397 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2402 /* 3. signal handler back-trampoline and parameters */
2403 env->regwptr[UREG_FP] = sf_addr;
2404 env->regwptr[UREG_I0] = sig;
2405 env->regwptr[UREG_I1] = sf_addr +
2406 offsetof(struct target_signal_frame, info);
2407 env->regwptr[UREG_I2] = sf_addr +
2408 offsetof(struct target_signal_frame, info);
2410 /* 4. signal handler */
2411 env->pc = ka->_sa_handler;
2412 env->npc = (env->pc + 4);
2413 /* 5. return to kernel instructions */
2414 if (ka->sa_restorer) {
2415 env->regwptr[UREG_I7] = ka->sa_restorer;
2419 env->regwptr[UREG_I7] = sf_addr +
2420 offsetof(struct target_signal_frame, insns) - 2 * 4;
2422 /* mov __NR_sigreturn, %g1 */
2424 __put_user(val32, &sf->insns[0]);
2428 __put_user(val32, &sf->insns[1]);
2432 /* Flush instruction space. */
2433 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2436 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2440 force_sig(TARGET_SIGILL);
2443 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2444 force_sig(TARGET_SIGSEGV);
2447 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2448 target_siginfo_t *info,
2449 target_sigset_t *set, CPUSPARCState *env)
2451 fprintf(stderr, "setup_rt_frame: not implemented\n");
2454 long do_sigreturn(CPUSPARCState *env)
2457 struct target_signal_frame *sf;
2458 uint32_t up_psr, pc, npc;
2459 target_sigset_t set;
2463 sf_addr = env->regwptr[UREG_FP];
2464 trace_user_do_sigreturn(env, sf_addr);
2465 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2469 /* 1. Make sure we are not getting garbage from the user */
2474 __get_user(pc, &sf->info.si_regs.pc);
2475 __get_user(npc, &sf->info.si_regs.npc);
2477 if ((pc | npc) & 3) {
2481 /* 2. Restore the state */
2482 __get_user(up_psr, &sf->info.si_regs.psr);
2484 /* User can only change condition codes and FPU enabling in %psr. */
2485 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2486 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2490 __get_user(env->y, &sf->info.si_regs.y);
2491 for (i=0; i < 8; i++) {
2492 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2494 for (i=0; i < 8; i++) {
2495 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2498 /* FIXME: implement FPU save/restore:
2499 * __get_user(fpu_save, &sf->fpu_save);
2501 * err |= restore_fpu_state(env, fpu_save);
2504 /* This is pretty much atomic, no amount locking would prevent
2505 * the races which exist anyways.
2507 __get_user(set.sig[0], &sf->info.si_mask);
2508 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2509 __get_user(set.sig[i], &sf->extramask[i - 1]);
2512 target_to_host_sigset_internal(&host_set, &set);
2513 set_sigmask(&host_set);
2518 unlock_user_struct(sf, sf_addr, 0);
2519 return -TARGET_QEMU_ESIGRETURN;
2522 unlock_user_struct(sf, sf_addr, 0);
2523 force_sig(TARGET_SIGSEGV);
2526 long do_rt_sigreturn(CPUSPARCState *env)
2528 trace_user_do_rt_sigreturn(env, 0);
2529 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2530 return -TARGET_ENOSYS;
2533 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2555 typedef abi_ulong target_mc_greg_t;
2556 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2558 struct target_mc_fq {
2559 abi_ulong *mcfq_addr;
2563 struct target_mc_fpu {
2567 //uint128_t qregs[16];
2569 abi_ulong mcfpu_fsr;
2570 abi_ulong mcfpu_fprs;
2571 abi_ulong mcfpu_gsr;
2572 struct target_mc_fq *mcfpu_fq;
2573 unsigned char mcfpu_qcnt;
2574 unsigned char mcfpu_qentsz;
2575 unsigned char mcfpu_enab;
2577 typedef struct target_mc_fpu target_mc_fpu_t;
2580 target_mc_gregset_t mc_gregs;
2581 target_mc_greg_t mc_fp;
2582 target_mc_greg_t mc_i7;
2583 target_mc_fpu_t mc_fpregs;
2584 } target_mcontext_t;
2586 struct target_ucontext {
2587 struct target_ucontext *tuc_link;
2588 abi_ulong tuc_flags;
2589 target_sigset_t tuc_sigmask;
2590 target_mcontext_t tuc_mcontext;
2593 /* A V9 register window */
2594 struct target_reg_window {
2595 abi_ulong locals[8];
2599 #define TARGET_STACK_BIAS 2047
2601 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2602 void sparc64_set_context(CPUSPARCState *env)
2605 struct target_ucontext *ucp;
2606 target_mc_gregset_t *grp;
2607 abi_ulong pc, npc, tstate;
2608 abi_ulong fp, i7, w_addr;
2611 ucp_addr = env->regwptr[UREG_I0];
2612 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2615 grp = &ucp->tuc_mcontext.mc_gregs;
2616 __get_user(pc, &((*grp)[MC_PC]));
2617 __get_user(npc, &((*grp)[MC_NPC]));
2618 if ((pc | npc) & 3) {
2621 if (env->regwptr[UREG_I1]) {
2622 target_sigset_t target_set;
2625 if (TARGET_NSIG_WORDS == 1) {
2626 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2628 abi_ulong *src, *dst;
2629 src = ucp->tuc_sigmask.sig;
2630 dst = target_set.sig;
2631 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2632 __get_user(*dst, src);
2635 target_to_host_sigset_internal(&set, &target_set);
2640 __get_user(env->y, &((*grp)[MC_Y]));
2641 __get_user(tstate, &((*grp)[MC_TSTATE]));
2642 env->asi = (tstate >> 24) & 0xff;
2643 cpu_put_ccr(env, tstate >> 32);
2644 cpu_put_cwp64(env, tstate & 0x1f);
2645 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2646 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2647 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2648 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2649 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2650 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2651 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2652 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2653 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2654 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2655 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2656 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2657 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2658 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2659 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2661 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2662 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2664 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2665 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2669 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2673 /* FIXME this does not match how the kernel handles the FPU in
2674 * its sparc64_set_context implementation. In particular the FPU
2675 * is only restored if fenab is non-zero in:
2676 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2678 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2680 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2681 for (i = 0; i < 64; i++, src++) {
2683 __get_user(env->fpr[i/2].l.lower, src);
2685 __get_user(env->fpr[i/2].l.upper, src);
2689 __get_user(env->fsr,
2690 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2691 __get_user(env->gsr,
2692 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2693 unlock_user_struct(ucp, ucp_addr, 0);
2696 unlock_user_struct(ucp, ucp_addr, 0);
2697 force_sig(TARGET_SIGSEGV);
2700 void sparc64_get_context(CPUSPARCState *env)
2703 struct target_ucontext *ucp;
2704 target_mc_gregset_t *grp;
2705 target_mcontext_t *mcp;
2706 abi_ulong fp, i7, w_addr;
2709 target_sigset_t target_set;
2712 ucp_addr = env->regwptr[UREG_I0];
2713 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2717 mcp = &ucp->tuc_mcontext;
2718 grp = &mcp->mc_gregs;
2720 /* Skip over the trap instruction, first. */
2724 /* If we're only reading the signal mask then do_sigprocmask()
2725 * is guaranteed not to fail, which is important because we don't
2726 * have any way to signal a failure or restart this operation since
2727 * this is not a normal syscall.
2729 err = do_sigprocmask(0, NULL, &set);
2731 host_to_target_sigset_internal(&target_set, &set);
2732 if (TARGET_NSIG_WORDS == 1) {
2733 __put_user(target_set.sig[0],
2734 (abi_ulong *)&ucp->tuc_sigmask);
2736 abi_ulong *src, *dst;
2737 src = target_set.sig;
2738 dst = ucp->tuc_sigmask.sig;
2739 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2740 __put_user(*src, dst);
2746 /* XXX: tstate must be saved properly */
2747 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2748 __put_user(env->pc, &((*grp)[MC_PC]));
2749 __put_user(env->npc, &((*grp)[MC_NPC]));
2750 __put_user(env->y, &((*grp)[MC_Y]));
2751 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2752 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2753 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2754 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2755 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2756 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2757 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2758 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2759 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2760 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2761 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2762 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2763 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2764 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2765 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2767 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2769 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2773 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2777 __put_user(fp, &(mcp->mc_fp));
2778 __put_user(i7, &(mcp->mc_i7));
2781 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2782 for (i = 0; i < 64; i++, dst++) {
2784 __put_user(env->fpr[i/2].l.lower, dst);
2786 __put_user(env->fpr[i/2].l.upper, dst);
2790 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2791 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2792 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2796 unlock_user_struct(ucp, ucp_addr, 1);
2799 unlock_user_struct(ucp, ucp_addr, 1);
2800 force_sig(TARGET_SIGSEGV);
2803 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2805 # if defined(TARGET_ABI_MIPSO32)
2806 struct target_sigcontext {
2807 uint32_t sc_regmask; /* Unused */
2810 uint64_t sc_regs[32];
2811 uint64_t sc_fpregs[32];
2812 uint32_t sc_ownedfp; /* Unused */
2813 uint32_t sc_fpc_csr;
2814 uint32_t sc_fpc_eir; /* Unused */
2815 uint32_t sc_used_math;
2816 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2820 target_ulong sc_hi1; /* Was sc_cause */
2821 target_ulong sc_lo1; /* Was sc_badvaddr */
2822 target_ulong sc_hi2; /* Was sc_sigset[4] */
2823 target_ulong sc_lo2;
2824 target_ulong sc_hi3;
2825 target_ulong sc_lo3;
2827 # else /* N32 || N64 */
2828 struct target_sigcontext {
2829 uint64_t sc_regs[32];
2830 uint64_t sc_fpregs[32];
2840 uint32_t sc_fpc_csr;
2841 uint32_t sc_used_math;
2843 uint32_t sc_reserved;
2848 uint32_t sf_ass[4]; /* argument save space for o32 */
2849 uint32_t sf_code[2]; /* signal trampoline */
2850 struct target_sigcontext sf_sc;
2851 target_sigset_t sf_mask;
2854 struct target_ucontext {
2855 target_ulong tuc_flags;
2856 target_ulong tuc_link;
2857 target_stack_t tuc_stack;
2859 struct target_sigcontext tuc_mcontext;
2860 target_sigset_t tuc_sigmask;
2863 struct target_rt_sigframe {
2864 uint32_t rs_ass[4]; /* argument save space for o32 */
2865 uint32_t rs_code[2]; /* signal trampoline */
2866 struct target_siginfo rs_info;
2867 struct target_ucontext rs_uc;
2870 /* Install trampoline to jump back from signal handler */
2871 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2876 * Set up the return code ...
2878 * li v0, __NR__foo_sigreturn
2882 __put_user(0x24020000 + syscall, tramp + 0);
2883 __put_user(0x0000000c , tramp + 1);
2887 static inline void setup_sigcontext(CPUMIPSState *regs,
2888 struct target_sigcontext *sc)
2892 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2893 regs->hflags &= ~MIPS_HFLAG_BMASK;
2895 __put_user(0, &sc->sc_regs[0]);
2896 for (i = 1; i < 32; ++i) {
2897 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2900 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2901 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2903 /* Rather than checking for dsp existence, always copy. The storage
2904 would just be garbage otherwise. */
2905 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2906 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2907 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2908 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2909 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2910 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2912 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2913 __put_user(dsp, &sc->sc_dsp);
2916 __put_user(1, &sc->sc_used_math);
2918 for (i = 0; i < 32; ++i) {
2919 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2924 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2928 __get_user(regs->CP0_EPC, &sc->sc_pc);
2930 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2931 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2933 for (i = 1; i < 32; ++i) {
2934 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2937 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2938 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2939 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2940 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2941 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2942 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2945 __get_user(dsp, &sc->sc_dsp);
2946 cpu_wrdsp(dsp, 0x3ff, regs);
2949 for (i = 0; i < 32; ++i) {
2950 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2955 * Determine which stack to use..
2957 static inline abi_ulong
2958 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2962 /* Default to using normal stack */
2963 sp = regs->active_tc.gpr[29];
2966 * FPU emulator may have its own trampoline active just
2967 * above the user stack, 16-bytes before the next lowest
2968 * 16 byte boundary. Try to avoid trashing it.
2972 /* This is the X/Open sanctioned signal stack switching. */
2973 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2974 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2977 return (sp - frame_size) & ~7;
2980 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2982 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2983 env->hflags &= ~MIPS_HFLAG_M16;
2984 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2985 env->active_tc.PC &= ~(target_ulong) 1;
2989 # if defined(TARGET_ABI_MIPSO32)
2990 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2991 static void setup_frame(int sig, struct target_sigaction * ka,
2992 target_sigset_t *set, CPUMIPSState *regs)
2994 struct sigframe *frame;
2995 abi_ulong frame_addr;
2998 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2999 trace_user_setup_frame(regs, frame_addr);
3000 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3004 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
3006 setup_sigcontext(regs, &frame->sf_sc);
3008 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3009 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
3013 * Arguments to signal handler:
3015 * a0 = signal number
3016 * a1 = 0 (should be cause)
3017 * a2 = pointer to struct sigcontext
3019 * $25 and PC point to the signal handler, $29 points to the
3022 regs->active_tc.gpr[ 4] = sig;
3023 regs->active_tc.gpr[ 5] = 0;
3024 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3025 regs->active_tc.gpr[29] = frame_addr;
3026 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3027 /* The original kernel code sets CP0_EPC to the handler
3028 * since it returns to userland using eret
3029 * we cannot do this here, and we must set PC directly */
3030 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3031 mips_set_hflags_isa_mode_from_pc(regs);
3032 unlock_user_struct(frame, frame_addr, 1);
3036 force_sig(TARGET_SIGSEGV/*, current*/);
3039 long do_sigreturn(CPUMIPSState *regs)
3041 struct sigframe *frame;
3042 abi_ulong frame_addr;
3044 target_sigset_t target_set;
3047 frame_addr = regs->active_tc.gpr[29];
3048 trace_user_do_sigreturn(regs, frame_addr);
3049 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3052 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3053 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
3056 target_to_host_sigset_internal(&blocked, &target_set);
3057 set_sigmask(&blocked);
3059 restore_sigcontext(regs, &frame->sf_sc);
3063 * Don't let your children do this ...
3065 __asm__ __volatile__(
3073 regs->active_tc.PC = regs->CP0_EPC;
3074 mips_set_hflags_isa_mode_from_pc(regs);
3075 /* I am not sure this is right, but it seems to work
3076 * maybe a problem with nested signals ? */
3078 return -TARGET_QEMU_ESIGRETURN;
3081 force_sig(TARGET_SIGSEGV/*, current*/);
3086 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3087 target_siginfo_t *info,
3088 target_sigset_t *set, CPUMIPSState *env)
3090 struct target_rt_sigframe *frame;
3091 abi_ulong frame_addr;
3094 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3095 trace_user_setup_rt_frame(env, frame_addr);
3096 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3100 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3102 tswap_siginfo(&frame->rs_info, info);
3104 __put_user(0, &frame->rs_uc.tuc_flags);
3105 __put_user(0, &frame->rs_uc.tuc_link);
3106 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3107 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3108 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3109 &frame->rs_uc.tuc_stack.ss_flags);
3111 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3113 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3114 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3118 * Arguments to signal handler:
3120 * a0 = signal number
3121 * a1 = pointer to siginfo_t
3122 * a2 = pointer to struct ucontext
3124 * $25 and PC point to the signal handler, $29 points to the
3127 env->active_tc.gpr[ 4] = sig;
3128 env->active_tc.gpr[ 5] = frame_addr
3129 + offsetof(struct target_rt_sigframe, rs_info);
3130 env->active_tc.gpr[ 6] = frame_addr
3131 + offsetof(struct target_rt_sigframe, rs_uc);
3132 env->active_tc.gpr[29] = frame_addr;
3133 env->active_tc.gpr[31] = frame_addr
3134 + offsetof(struct target_rt_sigframe, rs_code);
3135 /* The original kernel code sets CP0_EPC to the handler
3136 * since it returns to userland using eret
3137 * we cannot do this here, and we must set PC directly */
3138 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3139 mips_set_hflags_isa_mode_from_pc(env);
3140 unlock_user_struct(frame, frame_addr, 1);
3144 unlock_user_struct(frame, frame_addr, 1);
3145 force_sig(TARGET_SIGSEGV/*, current*/);
3148 long do_rt_sigreturn(CPUMIPSState *env)
3150 struct target_rt_sigframe *frame;
3151 abi_ulong frame_addr;
3154 frame_addr = env->active_tc.gpr[29];
3155 trace_user_do_rt_sigreturn(env, frame_addr);
3156 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3160 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3161 set_sigmask(&blocked);
3163 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3165 if (do_sigaltstack(frame_addr +
3166 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3167 0, get_sp_from_cpustate(env)) == -EFAULT)
3170 env->active_tc.PC = env->CP0_EPC;
3171 mips_set_hflags_isa_mode_from_pc(env);
3172 /* I am not sure this is right, but it seems to work
3173 * maybe a problem with nested signals ? */
3175 return -TARGET_QEMU_ESIGRETURN;
3178 force_sig(TARGET_SIGSEGV/*, current*/);
3182 #elif defined(TARGET_SH4)
3185 * code and data structures from linux kernel:
3186 * include/asm-sh/sigcontext.h
3187 * arch/sh/kernel/signal.c
3190 struct target_sigcontext {
3191 target_ulong oldmask;
3194 target_ulong sc_gregs[16];
3198 target_ulong sc_gbr;
3199 target_ulong sc_mach;
3200 target_ulong sc_macl;
3203 target_ulong sc_fpregs[16];
3204 target_ulong sc_xfpregs[16];
3205 unsigned int sc_fpscr;
3206 unsigned int sc_fpul;
3207 unsigned int sc_ownedfp;
3210 struct target_sigframe
3212 struct target_sigcontext sc;
3213 target_ulong extramask[TARGET_NSIG_WORDS-1];
3214 uint16_t retcode[3];
3218 struct target_ucontext {
3219 target_ulong tuc_flags;
3220 struct target_ucontext *tuc_link;
3221 target_stack_t tuc_stack;
3222 struct target_sigcontext tuc_mcontext;
3223 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3226 struct target_rt_sigframe
3228 struct target_siginfo info;
3229 struct target_ucontext uc;
3230 uint16_t retcode[3];
3234 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3235 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3237 static abi_ulong get_sigframe(struct target_sigaction *ka,
3238 unsigned long sp, size_t frame_size)
3240 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3241 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3244 return (sp - frame_size) & -8ul;
3247 static void setup_sigcontext(struct target_sigcontext *sc,
3248 CPUSH4State *regs, unsigned long mask)
3252 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3253 COPY(gregs[0]); COPY(gregs[1]);
3254 COPY(gregs[2]); COPY(gregs[3]);
3255 COPY(gregs[4]); COPY(gregs[5]);
3256 COPY(gregs[6]); COPY(gregs[7]);
3257 COPY(gregs[8]); COPY(gregs[9]);
3258 COPY(gregs[10]); COPY(gregs[11]);
3259 COPY(gregs[12]); COPY(gregs[13]);
3260 COPY(gregs[14]); COPY(gregs[15]);
3261 COPY(gbr); COPY(mach);
3262 COPY(macl); COPY(pr);
3266 for (i=0; i<16; i++) {
3267 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3269 __put_user(regs->fpscr, &sc->sc_fpscr);
3270 __put_user(regs->fpul, &sc->sc_fpul);
3272 /* non-iBCS2 extensions.. */
3273 __put_user(mask, &sc->oldmask);
3276 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3280 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3281 COPY(gregs[0]); COPY(gregs[1]);
3282 COPY(gregs[2]); COPY(gregs[3]);
3283 COPY(gregs[4]); COPY(gregs[5]);
3284 COPY(gregs[6]); COPY(gregs[7]);
3285 COPY(gregs[8]); COPY(gregs[9]);
3286 COPY(gregs[10]); COPY(gregs[11]);
3287 COPY(gregs[12]); COPY(gregs[13]);
3288 COPY(gregs[14]); COPY(gregs[15]);
3289 COPY(gbr); COPY(mach);
3290 COPY(macl); COPY(pr);
3294 for (i=0; i<16; i++) {
3295 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3297 __get_user(regs->fpscr, &sc->sc_fpscr);
3298 __get_user(regs->fpul, &sc->sc_fpul);
3300 regs->tra = -1; /* disable syscall checks */
3303 static void setup_frame(int sig, struct target_sigaction *ka,
3304 target_sigset_t *set, CPUSH4State *regs)
3306 struct target_sigframe *frame;
3307 abi_ulong frame_addr;
3310 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3311 trace_user_setup_frame(regs, frame_addr);
3312 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3316 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3318 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3319 __put_user(set->sig[i + 1], &frame->extramask[i]);
3322 /* Set up to return from userspace. If provided, use a stub
3323 already in userspace. */
3324 if (ka->sa_flags & TARGET_SA_RESTORER) {
3325 regs->pr = (unsigned long) ka->sa_restorer;
3327 /* Generate return code (system call to sigreturn) */
3328 abi_ulong retcode_addr = frame_addr +
3329 offsetof(struct target_sigframe, retcode);
3330 __put_user(MOVW(2), &frame->retcode[0]);
3331 __put_user(TRAP_NOARG, &frame->retcode[1]);
3332 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3333 regs->pr = (unsigned long) retcode_addr;
3336 /* Set up registers for signal handler */
3337 regs->gregs[15] = frame_addr;
3338 regs->gregs[4] = sig; /* Arg for signal handler */
3340 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3341 regs->pc = (unsigned long) ka->_sa_handler;
3343 unlock_user_struct(frame, frame_addr, 1);
3347 unlock_user_struct(frame, frame_addr, 1);
3348 force_sig(TARGET_SIGSEGV);
3351 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3352 target_siginfo_t *info,
3353 target_sigset_t *set, CPUSH4State *regs)
3355 struct target_rt_sigframe *frame;
3356 abi_ulong frame_addr;
3359 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3360 trace_user_setup_rt_frame(regs, frame_addr);
3361 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3365 tswap_siginfo(&frame->info, info);
3367 /* Create the ucontext. */
3368 __put_user(0, &frame->uc.tuc_flags);
3369 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3370 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3371 &frame->uc.tuc_stack.ss_sp);
3372 __put_user(sas_ss_flags(regs->gregs[15]),
3373 &frame->uc.tuc_stack.ss_flags);
3374 __put_user(target_sigaltstack_used.ss_size,
3375 &frame->uc.tuc_stack.ss_size);
3376 setup_sigcontext(&frame->uc.tuc_mcontext,
3378 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3379 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3382 /* Set up to return from userspace. If provided, use a stub
3383 already in userspace. */
3384 if (ka->sa_flags & TARGET_SA_RESTORER) {
3385 regs->pr = (unsigned long) ka->sa_restorer;
3387 /* Generate return code (system call to sigreturn) */
3388 abi_ulong retcode_addr = frame_addr +
3389 offsetof(struct target_rt_sigframe, retcode);
3390 __put_user(MOVW(2), &frame->retcode[0]);
3391 __put_user(TRAP_NOARG, &frame->retcode[1]);
3392 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3393 regs->pr = (unsigned long) retcode_addr;
3396 /* Set up registers for signal handler */
3397 regs->gregs[15] = frame_addr;
3398 regs->gregs[4] = sig; /* Arg for signal handler */
3399 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3400 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3401 regs->pc = (unsigned long) ka->_sa_handler;
3403 unlock_user_struct(frame, frame_addr, 1);
3407 unlock_user_struct(frame, frame_addr, 1);
3408 force_sig(TARGET_SIGSEGV);
3411 long do_sigreturn(CPUSH4State *regs)
3413 struct target_sigframe *frame;
3414 abi_ulong frame_addr;
3416 target_sigset_t target_set;
3420 frame_addr = regs->gregs[15];
3421 trace_user_do_sigreturn(regs, frame_addr);
3422 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3426 __get_user(target_set.sig[0], &frame->sc.oldmask);
3427 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3428 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3434 target_to_host_sigset_internal(&blocked, &target_set);
3435 set_sigmask(&blocked);
3437 restore_sigcontext(regs, &frame->sc);
3439 unlock_user_struct(frame, frame_addr, 0);
3440 return -TARGET_QEMU_ESIGRETURN;
3443 unlock_user_struct(frame, frame_addr, 0);
3444 force_sig(TARGET_SIGSEGV);
3448 long do_rt_sigreturn(CPUSH4State *regs)
3450 struct target_rt_sigframe *frame;
3451 abi_ulong frame_addr;
3454 frame_addr = regs->gregs[15];
3455 trace_user_do_rt_sigreturn(regs, frame_addr);
3456 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3460 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3461 set_sigmask(&blocked);
3463 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
3465 if (do_sigaltstack(frame_addr +
3466 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3467 0, get_sp_from_cpustate(regs)) == -EFAULT) {
3471 unlock_user_struct(frame, frame_addr, 0);
3472 return -TARGET_QEMU_ESIGRETURN;
3475 unlock_user_struct(frame, frame_addr, 0);
3476 force_sig(TARGET_SIGSEGV);
3479 #elif defined(TARGET_MICROBLAZE)
3481 struct target_sigcontext {
3482 struct target_pt_regs regs; /* needs to be first */
3486 struct target_stack_t {
3489 unsigned int ss_size;
3492 struct target_ucontext {
3493 abi_ulong tuc_flags;
3495 struct target_stack_t tuc_stack;
3496 struct target_sigcontext tuc_mcontext;
3497 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3500 /* Signal frames. */
3501 struct target_signal_frame {
3502 struct target_ucontext uc;
3503 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3507 struct rt_signal_frame {
3513 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3515 __put_user(env->regs[0], &sc->regs.r0);
3516 __put_user(env->regs[1], &sc->regs.r1);
3517 __put_user(env->regs[2], &sc->regs.r2);
3518 __put_user(env->regs[3], &sc->regs.r3);
3519 __put_user(env->regs[4], &sc->regs.r4);
3520 __put_user(env->regs[5], &sc->regs.r5);
3521 __put_user(env->regs[6], &sc->regs.r6);
3522 __put_user(env->regs[7], &sc->regs.r7);
3523 __put_user(env->regs[8], &sc->regs.r8);
3524 __put_user(env->regs[9], &sc->regs.r9);
3525 __put_user(env->regs[10], &sc->regs.r10);
3526 __put_user(env->regs[11], &sc->regs.r11);
3527 __put_user(env->regs[12], &sc->regs.r12);
3528 __put_user(env->regs[13], &sc->regs.r13);
3529 __put_user(env->regs[14], &sc->regs.r14);
3530 __put_user(env->regs[15], &sc->regs.r15);
3531 __put_user(env->regs[16], &sc->regs.r16);
3532 __put_user(env->regs[17], &sc->regs.r17);
3533 __put_user(env->regs[18], &sc->regs.r18);
3534 __put_user(env->regs[19], &sc->regs.r19);
3535 __put_user(env->regs[20], &sc->regs.r20);
3536 __put_user(env->regs[21], &sc->regs.r21);
3537 __put_user(env->regs[22], &sc->regs.r22);
3538 __put_user(env->regs[23], &sc->regs.r23);
3539 __put_user(env->regs[24], &sc->regs.r24);
3540 __put_user(env->regs[25], &sc->regs.r25);
3541 __put_user(env->regs[26], &sc->regs.r26);
3542 __put_user(env->regs[27], &sc->regs.r27);
3543 __put_user(env->regs[28], &sc->regs.r28);
3544 __put_user(env->regs[29], &sc->regs.r29);
3545 __put_user(env->regs[30], &sc->regs.r30);
3546 __put_user(env->regs[31], &sc->regs.r31);
3547 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3550 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3552 __get_user(env->regs[0], &sc->regs.r0);
3553 __get_user(env->regs[1], &sc->regs.r1);
3554 __get_user(env->regs[2], &sc->regs.r2);
3555 __get_user(env->regs[3], &sc->regs.r3);
3556 __get_user(env->regs[4], &sc->regs.r4);
3557 __get_user(env->regs[5], &sc->regs.r5);
3558 __get_user(env->regs[6], &sc->regs.r6);
3559 __get_user(env->regs[7], &sc->regs.r7);
3560 __get_user(env->regs[8], &sc->regs.r8);
3561 __get_user(env->regs[9], &sc->regs.r9);
3562 __get_user(env->regs[10], &sc->regs.r10);
3563 __get_user(env->regs[11], &sc->regs.r11);
3564 __get_user(env->regs[12], &sc->regs.r12);
3565 __get_user(env->regs[13], &sc->regs.r13);
3566 __get_user(env->regs[14], &sc->regs.r14);
3567 __get_user(env->regs[15], &sc->regs.r15);
3568 __get_user(env->regs[16], &sc->regs.r16);
3569 __get_user(env->regs[17], &sc->regs.r17);
3570 __get_user(env->regs[18], &sc->regs.r18);
3571 __get_user(env->regs[19], &sc->regs.r19);
3572 __get_user(env->regs[20], &sc->regs.r20);
3573 __get_user(env->regs[21], &sc->regs.r21);
3574 __get_user(env->regs[22], &sc->regs.r22);
3575 __get_user(env->regs[23], &sc->regs.r23);
3576 __get_user(env->regs[24], &sc->regs.r24);
3577 __get_user(env->regs[25], &sc->regs.r25);
3578 __get_user(env->regs[26], &sc->regs.r26);
3579 __get_user(env->regs[27], &sc->regs.r27);
3580 __get_user(env->regs[28], &sc->regs.r28);
3581 __get_user(env->regs[29], &sc->regs.r29);
3582 __get_user(env->regs[30], &sc->regs.r30);
3583 __get_user(env->regs[31], &sc->regs.r31);
3584 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3587 static abi_ulong get_sigframe(struct target_sigaction *ka,
3588 CPUMBState *env, int frame_size)
3590 abi_ulong sp = env->regs[1];
3592 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3593 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3596 return ((sp - frame_size) & -8UL);
3599 static void setup_frame(int sig, struct target_sigaction *ka,
3600 target_sigset_t *set, CPUMBState *env)
3602 struct target_signal_frame *frame;
3603 abi_ulong frame_addr;
3606 frame_addr = get_sigframe(ka, env, sizeof *frame);
3607 trace_user_setup_frame(env, frame_addr);
3608 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3611 /* Save the mask. */
3612 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3614 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3615 __put_user(set->sig[i], &frame->extramask[i - 1]);
3618 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3620 /* Set up to return from userspace. If provided, use a stub
3621 already in userspace. */
3622 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3623 if (ka->sa_flags & TARGET_SA_RESTORER) {
3624 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3627 /* Note, these encodings are _big endian_! */
3628 /* addi r12, r0, __NR_sigreturn */
3629 t = 0x31800000UL | TARGET_NR_sigreturn;
3630 __put_user(t, frame->tramp + 0);
3633 __put_user(t, frame->tramp + 1);
3635 /* Return from sighandler will jump to the tramp.
3636 Negative 8 offset because return is rtsd r15, 8 */
3637 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
3641 /* Set up registers for signal handler */
3642 env->regs[1] = frame_addr;
3643 /* Signal handler args: */
3644 env->regs[5] = sig; /* Arg 0: signum */
3646 /* arg 1: sigcontext */
3647 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3649 /* Offset of 4 to handle microblaze rtid r14, 0 */
3650 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3652 unlock_user_struct(frame, frame_addr, 1);
3655 force_sig(TARGET_SIGSEGV);
3658 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3659 target_siginfo_t *info,
3660 target_sigset_t *set, CPUMBState *env)
3662 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3665 long do_sigreturn(CPUMBState *env)
3667 struct target_signal_frame *frame;
3668 abi_ulong frame_addr;
3669 target_sigset_t target_set;
3673 frame_addr = env->regs[R_SP];
3674 trace_user_do_sigreturn(env, frame_addr);
3675 /* Make sure the guest isn't playing games. */
3676 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3679 /* Restore blocked signals */
3680 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3681 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3682 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3684 target_to_host_sigset_internal(&set, &target_set);
3687 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3688 /* We got here through a sigreturn syscall, our path back is via an
3689 rtb insn so setup r14 for that. */
3690 env->regs[14] = env->sregs[SR_PC];
3692 unlock_user_struct(frame, frame_addr, 0);
3693 return -TARGET_QEMU_ESIGRETURN;
3695 force_sig(TARGET_SIGSEGV);
3698 long do_rt_sigreturn(CPUMBState *env)
3700 trace_user_do_rt_sigreturn(env, 0);
3701 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3702 return -TARGET_ENOSYS;
3705 #elif defined(TARGET_CRIS)
3707 struct target_sigcontext {
3708 struct target_pt_regs regs; /* needs to be first */
3710 uint32_t usp; /* usp before stacking this gunk on it */
3713 /* Signal frames. */
3714 struct target_signal_frame {
3715 struct target_sigcontext sc;
3716 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3717 uint16_t retcode[4]; /* Trampoline code. */
3720 struct rt_signal_frame {
3725 uint16_t retcode[4]; /* Trampoline code. */
3728 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3730 __put_user(env->regs[0], &sc->regs.r0);
3731 __put_user(env->regs[1], &sc->regs.r1);
3732 __put_user(env->regs[2], &sc->regs.r2);
3733 __put_user(env->regs[3], &sc->regs.r3);
3734 __put_user(env->regs[4], &sc->regs.r4);
3735 __put_user(env->regs[5], &sc->regs.r5);
3736 __put_user(env->regs[6], &sc->regs.r6);
3737 __put_user(env->regs[7], &sc->regs.r7);
3738 __put_user(env->regs[8], &sc->regs.r8);
3739 __put_user(env->regs[9], &sc->regs.r9);
3740 __put_user(env->regs[10], &sc->regs.r10);
3741 __put_user(env->regs[11], &sc->regs.r11);
3742 __put_user(env->regs[12], &sc->regs.r12);
3743 __put_user(env->regs[13], &sc->regs.r13);
3744 __put_user(env->regs[14], &sc->usp);
3745 __put_user(env->regs[15], &sc->regs.acr);
3746 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
3747 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
3748 __put_user(env->pc, &sc->regs.erp);
3751 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3753 __get_user(env->regs[0], &sc->regs.r0);
3754 __get_user(env->regs[1], &sc->regs.r1);
3755 __get_user(env->regs[2], &sc->regs.r2);
3756 __get_user(env->regs[3], &sc->regs.r3);
3757 __get_user(env->regs[4], &sc->regs.r4);
3758 __get_user(env->regs[5], &sc->regs.r5);
3759 __get_user(env->regs[6], &sc->regs.r6);
3760 __get_user(env->regs[7], &sc->regs.r7);
3761 __get_user(env->regs[8], &sc->regs.r8);
3762 __get_user(env->regs[9], &sc->regs.r9);
3763 __get_user(env->regs[10], &sc->regs.r10);
3764 __get_user(env->regs[11], &sc->regs.r11);
3765 __get_user(env->regs[12], &sc->regs.r12);
3766 __get_user(env->regs[13], &sc->regs.r13);
3767 __get_user(env->regs[14], &sc->usp);
3768 __get_user(env->regs[15], &sc->regs.acr);
3769 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
3770 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
3771 __get_user(env->pc, &sc->regs.erp);
3774 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
3777 /* Align the stack downwards to 4. */
3778 sp = (env->regs[R_SP] & ~3);
3779 return sp - framesize;
3782 static void setup_frame(int sig, struct target_sigaction *ka,
3783 target_sigset_t *set, CPUCRISState *env)
3785 struct target_signal_frame *frame;
3786 abi_ulong frame_addr;
3789 frame_addr = get_sigframe(env, sizeof *frame);
3790 trace_user_setup_frame(env, frame_addr);
3791 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3795 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
3796 * use this trampoline anymore but it sets it up for GDB.
3797 * In QEMU, using the trampoline simplifies things a bit so we use it.
3799 * This is movu.w __NR_sigreturn, r9; break 13;
3801 __put_user(0x9c5f, frame->retcode+0);
3802 __put_user(TARGET_NR_sigreturn,
3803 frame->retcode + 1);
3804 __put_user(0xe93d, frame->retcode + 2);
3806 /* Save the mask. */
3807 __put_user(set->sig[0], &frame->sc.oldmask);
3809 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3810 __put_user(set->sig[i], &frame->extramask[i - 1]);
3813 setup_sigcontext(&frame->sc, env);
3815 /* Move the stack and setup the arguments for the handler. */
3816 env->regs[R_SP] = frame_addr;
3817 env->regs[10] = sig;
3818 env->pc = (unsigned long) ka->_sa_handler;
3819 /* Link SRP so the guest returns through the trampoline. */
3820 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
3822 unlock_user_struct(frame, frame_addr, 1);
3825 force_sig(TARGET_SIGSEGV);
3828 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3829 target_siginfo_t *info,
3830 target_sigset_t *set, CPUCRISState *env)
3832 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
3835 long do_sigreturn(CPUCRISState *env)
3837 struct target_signal_frame *frame;
3838 abi_ulong frame_addr;
3839 target_sigset_t target_set;
3843 frame_addr = env->regs[R_SP];
3844 trace_user_do_sigreturn(env, frame_addr);
3845 /* Make sure the guest isn't playing games. */
3846 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
3850 /* Restore blocked signals */
3851 __get_user(target_set.sig[0], &frame->sc.oldmask);
3852 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3853 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3855 target_to_host_sigset_internal(&set, &target_set);
3858 restore_sigcontext(&frame->sc, env);
3859 unlock_user_struct(frame, frame_addr, 0);
3860 return -TARGET_QEMU_ESIGRETURN;
3862 force_sig(TARGET_SIGSEGV);
3865 long do_rt_sigreturn(CPUCRISState *env)
3867 trace_user_do_rt_sigreturn(env, 0);
3868 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
3869 return -TARGET_ENOSYS;
3872 #elif defined(TARGET_OPENRISC)
3874 struct target_sigcontext {
3875 struct target_pt_regs regs;
3880 struct target_ucontext {
3881 abi_ulong tuc_flags;
3883 target_stack_t tuc_stack;
3884 struct target_sigcontext tuc_mcontext;
3885 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3888 struct target_rt_sigframe {
3891 struct target_siginfo info;
3892 struct target_sigcontext sc;
3893 struct target_ucontext uc;
3894 unsigned char retcode[16]; /* trampoline code */
3897 /* This is the asm-generic/ucontext.h version */
3899 static int restore_sigcontext(CPUOpenRISCState *regs,
3900 struct target_sigcontext *sc)
3902 unsigned int err = 0;
3903 unsigned long old_usp;
3905 /* Alwys make any pending restarted system call return -EINTR */
3906 current_thread_info()->restart_block.fn = do_no_restart_syscall;
3908 /* restore the regs from &sc->regs (same as sc, since regs is first)
3909 * (sc is already checked for VERIFY_READ since the sigframe was
3910 * checked in sys_sigreturn previously)
3913 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
3917 /* make sure the U-flag is set so user-mode cannot fool us */
3921 /* restore the old USP as it was before we stacked the sc etc.
3922 * (we cannot just pop the sigcontext since we aligned the sp and
3923 * stuff after pushing it)
3926 __get_user(old_usp, &sc->usp);
3927 phx_signal("old_usp 0x%lx", old_usp);
3929 __PHX__ REALLY /* ??? */
3931 regs->gpr[1] = old_usp;
3933 /* TODO: the other ports use regs->orig_XX to disable syscall checks
3934 * after this completes, but we don't use that mechanism. maybe we can
3945 /* Set up a signal frame. */
3947 static void setup_sigcontext(struct target_sigcontext *sc,
3948 CPUOpenRISCState *regs,
3951 unsigned long usp = regs->gpr[1];
3953 /* copy the regs. they are first in sc so we can use sc directly */
3955 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
3957 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
3958 the signal handler. The frametype will be restored to its previous
3959 value in restore_sigcontext. */
3960 /*regs->frametype = CRIS_FRAME_NORMAL;*/
3962 /* then some other stuff */
3963 __put_user(mask, &sc->oldmask);
3964 __put_user(usp, &sc->usp);
3967 static inline unsigned long align_sigframe(unsigned long sp)
3972 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
3973 CPUOpenRISCState *regs,
3976 unsigned long sp = regs->gpr[1];
3977 int onsigstack = on_sig_stack(sp);
3980 /* This is the X/Open sanctioned signal stack switching. */
3981 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
3982 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3985 sp = align_sigframe(sp - frame_size);
3988 * If we are on the alternate signal stack and would overflow it, don't.
3989 * Return an always-bogus address instead so we will die with SIGSEGV.
3992 if (onsigstack && !likely(on_sig_stack(sp))) {
3999 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4000 target_siginfo_t *info,
4001 target_sigset_t *set, CPUOpenRISCState *env)
4004 abi_ulong frame_addr;
4005 unsigned long return_ip;
4006 struct target_rt_sigframe *frame;
4007 abi_ulong info_addr, uc_addr;
4009 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4010 trace_user_setup_rt_frame(env, frame_addr);
4011 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4015 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4016 __put_user(info_addr, &frame->pinfo);
4017 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4018 __put_user(uc_addr, &frame->puc);
4020 if (ka->sa_flags & SA_SIGINFO) {
4021 tswap_siginfo(&frame->info, info);
4024 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/
4025 __put_user(0, &frame->uc.tuc_flags);
4026 __put_user(0, &frame->uc.tuc_link);
4027 __put_user(target_sigaltstack_used.ss_sp,
4028 &frame->uc.tuc_stack.ss_sp);
4029 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags);
4030 __put_user(target_sigaltstack_used.ss_size,
4031 &frame->uc.tuc_stack.ss_size);
4032 setup_sigcontext(&frame->sc, env, set->sig[0]);
4034 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
4036 /* trampoline - the desired return ip is the retcode itself */
4037 return_ip = (unsigned long)&frame->retcode;
4038 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
4039 __put_user(0xa960, (short *)(frame->retcode + 0));
4040 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
4041 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
4042 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
4048 /* TODO what is the current->exec_domain stuff and invmap ? */
4050 /* Set up registers for signal handler */
4051 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
4052 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
4053 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */
4054 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */
4055 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */
4057 /* actually move the usp to reflect the stacked frame */
4058 env->gpr[1] = (unsigned long)frame;
4063 unlock_user_struct(frame, frame_addr, 1);
4064 if (sig == TARGET_SIGSEGV) {
4065 ka->_sa_handler = TARGET_SIG_DFL;
4067 force_sig(TARGET_SIGSEGV);
4070 long do_sigreturn(CPUOpenRISCState *env)
4072 trace_user_do_sigreturn(env, 0);
4073 fprintf(stderr, "do_sigreturn: not implemented\n");
4074 return -TARGET_ENOSYS;
4077 long do_rt_sigreturn(CPUOpenRISCState *env)
4079 trace_user_do_rt_sigreturn(env, 0);
4080 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4081 return -TARGET_ENOSYS;
4083 /* TARGET_OPENRISC */
4085 #elif defined(TARGET_S390X)
4087 #define __NUM_GPRS 16
4088 #define __NUM_FPRS 16
4089 #define __NUM_ACRS 16
4091 #define S390_SYSCALL_SIZE 2
4092 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4094 #define _SIGCONTEXT_NSIG 64
4095 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4096 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4097 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4098 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4099 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4103 target_ulong gprs[__NUM_GPRS];
4104 unsigned int acrs[__NUM_ACRS];
4105 } target_s390_regs_common;
4109 double fprs[__NUM_FPRS];
4110 } target_s390_fp_regs;
4113 target_s390_regs_common regs;
4114 target_s390_fp_regs fpregs;
4117 struct target_sigcontext {
4118 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4119 target_sigregs *sregs;
4123 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4124 struct target_sigcontext sc;
4125 target_sigregs sregs;
4127 uint8_t retcode[S390_SYSCALL_SIZE];
4130 struct target_ucontext {
4131 target_ulong tuc_flags;
4132 struct target_ucontext *tuc_link;
4133 target_stack_t tuc_stack;
4134 target_sigregs tuc_mcontext;
4135 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4139 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4140 uint8_t retcode[S390_SYSCALL_SIZE];
4141 struct target_siginfo info;
4142 struct target_ucontext uc;
4145 static inline abi_ulong
4146 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4150 /* Default to using normal stack */
4153 /* This is the X/Open sanctioned signal stack switching. */
4154 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4155 if (!sas_ss_flags(sp)) {
4156 sp = target_sigaltstack_used.ss_sp +
4157 target_sigaltstack_used.ss_size;
4161 /* This is the legacy signal stack switching. */
4162 else if (/* FIXME !user_mode(regs) */ 0 &&
4163 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4165 sp = (abi_ulong) ka->sa_restorer;
4168 return (sp - frame_size) & -8ul;
4171 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4174 //save_access_regs(current->thread.acrs); FIXME
4176 /* Copy a 'clean' PSW mask to the user to avoid leaking
4177 information about whether PER is currently on. */
4178 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4179 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4180 for (i = 0; i < 16; i++) {
4181 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4183 for (i = 0; i < 16; i++) {
4184 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4187 * We have to store the fp registers to current->thread.fp_regs
4188 * to merge them with the emulated registers.
4190 //save_fp_regs(¤t->thread.fp_regs); FIXME
4191 for (i = 0; i < 16; i++) {
4192 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4196 static void setup_frame(int sig, struct target_sigaction *ka,
4197 target_sigset_t *set, CPUS390XState *env)
4200 abi_ulong frame_addr;
4202 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4203 trace_user_setup_frame(env, frame_addr);
4204 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4208 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4210 save_sigregs(env, &frame->sregs);
4212 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4213 (abi_ulong *)&frame->sc.sregs);
4215 /* Set up to return from userspace. If provided, use a stub
4216 already in userspace. */
4217 if (ka->sa_flags & TARGET_SA_RESTORER) {
4218 env->regs[14] = (unsigned long)
4219 ka->sa_restorer | PSW_ADDR_AMODE;
4221 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
4223 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4224 (uint16_t *)(frame->retcode));
4227 /* Set up backchain. */
4228 __put_user(env->regs[15], (abi_ulong *) frame);
4230 /* Set up registers for signal handler */
4231 env->regs[15] = frame_addr;
4232 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4234 env->regs[2] = sig; //map_signal(sig);
4235 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4237 /* We forgot to include these in the sigcontext.
4238 To avoid breaking binary compatibility, they are passed as args. */
4239 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4240 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4242 /* Place signal number on stack to allow backtrace from handler. */
4243 __put_user(env->regs[2], &frame->signo);
4244 unlock_user_struct(frame, frame_addr, 1);
4248 force_sig(TARGET_SIGSEGV);
4251 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4252 target_siginfo_t *info,
4253 target_sigset_t *set, CPUS390XState *env)
4257 abi_ulong frame_addr;
4259 frame_addr = get_sigframe(ka, env, sizeof *frame);
4260 trace_user_setup_rt_frame(env, frame_addr);
4261 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4265 tswap_siginfo(&frame->info, info);
4267 /* Create the ucontext. */
4268 __put_user(0, &frame->uc.tuc_flags);
4269 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4270 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4271 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4272 &frame->uc.tuc_stack.ss_flags);
4273 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4274 save_sigregs(env, &frame->uc.tuc_mcontext);
4275 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4276 __put_user((abi_ulong)set->sig[i],
4277 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4280 /* Set up to return from userspace. If provided, use a stub
4281 already in userspace. */
4282 if (ka->sa_flags & TARGET_SA_RESTORER) {
4283 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4285 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4286 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4287 (uint16_t *)(frame->retcode));
4290 /* Set up backchain. */
4291 __put_user(env->regs[15], (abi_ulong *) frame);
4293 /* Set up registers for signal handler */
4294 env->regs[15] = frame_addr;
4295 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4297 env->regs[2] = sig; //map_signal(sig);
4298 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4299 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4303 force_sig(TARGET_SIGSEGV);
4307 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4312 for (i = 0; i < 16; i++) {
4313 __get_user(env->regs[i], &sc->regs.gprs[i]);
4316 __get_user(env->psw.mask, &sc->regs.psw.mask);
4317 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
4318 (unsigned long long)env->psw.addr);
4319 __get_user(env->psw.addr, &sc->regs.psw.addr);
4320 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4322 for (i = 0; i < 16; i++) {
4323 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4325 for (i = 0; i < 16; i++) {
4326 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4332 long do_sigreturn(CPUS390XState *env)
4335 abi_ulong frame_addr = env->regs[15];
4336 target_sigset_t target_set;
4339 trace_user_do_sigreturn(env, frame_addr);
4340 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4343 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4345 target_to_host_sigset_internal(&set, &target_set);
4346 set_sigmask(&set); /* ~_BLOCKABLE? */
4348 if (restore_sigregs(env, &frame->sregs)) {
4352 unlock_user_struct(frame, frame_addr, 0);
4353 return -TARGET_QEMU_ESIGRETURN;
4356 force_sig(TARGET_SIGSEGV);
4360 long do_rt_sigreturn(CPUS390XState *env)
4363 abi_ulong frame_addr = env->regs[15];
4366 trace_user_do_rt_sigreturn(env, frame_addr);
4367 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4370 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4372 set_sigmask(&set); /* ~_BLOCKABLE? */
4374 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4378 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4379 get_sp_from_cpustate(env)) == -EFAULT) {
4382 unlock_user_struct(frame, frame_addr, 0);
4383 return -TARGET_QEMU_ESIGRETURN;
4386 unlock_user_struct(frame, frame_addr, 0);
4387 force_sig(TARGET_SIGSEGV);
4391 #elif defined(TARGET_PPC)
4393 /* Size of dummy stack frame allocated when calling signal handler.
4394 See arch/powerpc/include/asm/ptrace.h. */
4395 #if defined(TARGET_PPC64)
4396 #define SIGNAL_FRAMESIZE 128
4398 #define SIGNAL_FRAMESIZE 64
4401 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4402 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4403 struct target_mcontext {
4404 target_ulong mc_gregs[48];
4405 /* Includes fpscr. */
4406 uint64_t mc_fregs[33];
4407 target_ulong mc_pad[2];
4408 /* We need to handle Altivec and SPE at the same time, which no
4409 kernel needs to do. Fortunately, the kernel defines this bit to
4410 be Altivec-register-large all the time, rather than trying to
4411 twiddle it based on the specific platform. */
4413 /* SPE vector registers. One extra for SPEFSCR. */
4415 /* Altivec vector registers. The packing of VSCR and VRSAVE
4416 varies depending on whether we're PPC64 or not: PPC64 splits
4417 them apart; PPC32 stuffs them together. */
4418 #if defined(TARGET_PPC64)
4419 #define QEMU_NVRREG 34
4421 #define QEMU_NVRREG 33
4423 ppc_avr_t altivec[QEMU_NVRREG];
4425 } mc_vregs __attribute__((__aligned__(16)));
4428 /* See arch/powerpc/include/asm/sigcontext.h. */
4429 struct target_sigcontext {
4430 target_ulong _unused[4];
4432 #if defined(TARGET_PPC64)
4435 target_ulong handler;
4436 target_ulong oldmask;
4437 target_ulong regs; /* struct pt_regs __user * */
4438 #if defined(TARGET_PPC64)
4439 struct target_mcontext mcontext;
4443 /* Indices for target_mcontext.mc_gregs, below.
4444 See arch/powerpc/include/asm/ptrace.h for details. */
4480 TARGET_PT_ORIG_R3 = 34,
4485 /* Yes, there are two registers with #39. One is 64-bit only. */
4487 TARGET_PT_SOFTE = 39,
4488 TARGET_PT_TRAP = 40,
4490 TARGET_PT_DSISR = 42,
4491 TARGET_PT_RESULT = 43,
4492 TARGET_PT_REGS_COUNT = 44
4496 struct target_ucontext {
4497 target_ulong tuc_flags;
4498 target_ulong tuc_link; /* struct ucontext __user * */
4499 struct target_sigaltstack tuc_stack;
4500 #if !defined(TARGET_PPC64)
4502 target_ulong tuc_regs; /* struct mcontext __user *
4503 points to uc_mcontext field */
4505 target_sigset_t tuc_sigmask;
4506 #if defined(TARGET_PPC64)
4507 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
4508 struct target_sigcontext tuc_sigcontext;
4510 int32_t tuc_maskext[30];
4511 int32_t tuc_pad2[3];
4512 struct target_mcontext tuc_mcontext;
4516 /* See arch/powerpc/kernel/signal_32.c. */
4517 struct target_sigframe {
4518 struct target_sigcontext sctx;
4519 struct target_mcontext mctx;
4523 #if defined(TARGET_PPC64)
4525 #define TARGET_TRAMP_SIZE 6
4527 struct target_rt_sigframe {
4528 /* sys_rt_sigreturn requires the ucontext be the first field */
4529 struct target_ucontext uc;
4530 target_ulong _unused[2];
4531 uint32_t trampoline[TARGET_TRAMP_SIZE];
4532 target_ulong pinfo; /* struct siginfo __user * */
4533 target_ulong puc; /* void __user * */
4534 struct target_siginfo info;
4535 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
4537 } __attribute__((aligned(16)));
4541 struct target_rt_sigframe {
4542 struct target_siginfo info;
4543 struct target_ucontext uc;
4549 #if defined(TARGET_PPC64)
4551 struct target_func_ptr {
4558 /* We use the mc_pad field for the signal return trampoline. */
4559 #define tramp mc_pad
4561 /* See arch/powerpc/kernel/signal.c. */
4562 static target_ulong get_sigframe(struct target_sigaction *ka,
4568 oldsp = env->gpr[1];
4570 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
4571 (sas_ss_flags(oldsp) == 0)) {
4572 oldsp = (target_sigaltstack_used.ss_sp
4573 + target_sigaltstack_used.ss_size);
4576 return (oldsp - frame_size) & ~0xFUL;
4579 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
4581 target_ulong msr = env->msr;
4583 target_ulong ccr = 0;
4585 /* In general, the kernel attempts to be intelligent about what it
4586 needs to save for Altivec/FP/SPE registers. We don't care that
4587 much, so we just go ahead and save everything. */
4589 /* Save general registers. */
4590 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4591 __put_user(env->gpr[i], &frame->mc_gregs[i]);
4593 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4594 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4595 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4596 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4598 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4599 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
4601 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4603 /* Save Altivec registers if necessary. */
4604 if (env->insns_flags & PPC_ALTIVEC) {
4605 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4606 ppc_avr_t *avr = &env->avr[i];
4607 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4609 __put_user(avr->u64[0], &vreg->u64[0]);
4610 __put_user(avr->u64[1], &vreg->u64[1]);
4612 /* Set MSR_VR in the saved MSR value to indicate that
4613 frame->mc_vregs contains valid data. */
4615 __put_user((uint32_t)env->spr[SPR_VRSAVE],
4616 &frame->mc_vregs.altivec[32].u32[3]);
4619 /* Save floating point registers. */
4620 if (env->insns_flags & PPC_FLOAT) {
4621 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4622 __put_user(env->fpr[i], &frame->mc_fregs[i]);
4624 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
4627 /* Save SPE registers. The kernel only saves the high half. */
4628 if (env->insns_flags & PPC_SPE) {
4629 #if defined(TARGET_PPC64)
4630 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4631 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
4634 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4635 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4638 /* Set MSR_SPE in the saved MSR value to indicate that
4639 frame->mc_vregs contains valid data. */
4641 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4645 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4648 static void encode_trampoline(int sigret, uint32_t *tramp)
4650 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
4652 __put_user(0x38000000 | sigret, &tramp[0]);
4653 __put_user(0x44000002, &tramp[1]);
4657 static void restore_user_regs(CPUPPCState *env,
4658 struct target_mcontext *frame, int sig)
4660 target_ulong save_r2 = 0;
4667 save_r2 = env->gpr[2];
4670 /* Restore general registers. */
4671 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4672 __get_user(env->gpr[i], &frame->mc_gregs[i]);
4674 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4675 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4676 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4677 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4678 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4680 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4681 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
4685 env->gpr[2] = save_r2;
4688 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4690 /* If doing signal return, restore the previous little-endian mode. */
4692 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
4694 /* Restore Altivec registers if necessary. */
4695 if (env->insns_flags & PPC_ALTIVEC) {
4696 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4697 ppc_avr_t *avr = &env->avr[i];
4698 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4700 __get_user(avr->u64[0], &vreg->u64[0]);
4701 __get_user(avr->u64[1], &vreg->u64[1]);
4703 /* Set MSR_VEC in the saved MSR value to indicate that
4704 frame->mc_vregs contains valid data. */
4705 __get_user(env->spr[SPR_VRSAVE],
4706 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3]));
4709 /* Restore floating point registers. */
4710 if (env->insns_flags & PPC_FLOAT) {
4712 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4713 __get_user(env->fpr[i], &frame->mc_fregs[i]);
4715 __get_user(fpscr, &frame->mc_fregs[32]);
4716 env->fpscr = (uint32_t) fpscr;
4719 /* Save SPE registers. The kernel only saves the high half. */
4720 if (env->insns_flags & PPC_SPE) {
4721 #if defined(TARGET_PPC64)
4722 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4725 __get_user(hi, &frame->mc_vregs.spe[i]);
4726 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
4729 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4730 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4733 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4737 static void setup_frame(int sig, struct target_sigaction *ka,
4738 target_sigset_t *set, CPUPPCState *env)
4740 struct target_sigframe *frame;
4741 struct target_sigcontext *sc;
4742 target_ulong frame_addr, newsp;
4744 #if defined(TARGET_PPC64)
4745 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4748 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4749 trace_user_setup_frame(env, frame_addr);
4750 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
4754 __put_user(ka->_sa_handler, &sc->handler);
4755 __put_user(set->sig[0], &sc->oldmask);
4756 #if TARGET_ABI_BITS == 64
4757 __put_user(set->sig[0] >> 32, &sc->_unused[3]);
4759 __put_user(set->sig[1], &sc->_unused[3]);
4761 __put_user(h2g(&frame->mctx), &sc->regs);
4762 __put_user(sig, &sc->signal);
4764 /* Save user regs. */
4765 save_user_regs(env, &frame->mctx);
4767 /* Construct the trampoline code on the stack. */
4768 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
4770 /* The kernel checks for the presence of a VDSO here. We don't
4771 emulate a vdso, so use a sigreturn system call. */
4772 env->lr = (target_ulong) h2g(frame->mctx.tramp);
4774 /* Turn off all fp exceptions. */
4777 /* Create a stack frame for the caller of the handler. */
4778 newsp = frame_addr - SIGNAL_FRAMESIZE;
4779 err |= put_user(env->gpr[1], newsp, target_ulong);
4784 /* Set up registers for signal handler. */
4785 env->gpr[1] = newsp;
4787 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
4789 #if defined(TARGET_PPC64)
4790 if (get_ppc64_abi(image) < 2) {
4791 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4792 struct target_func_ptr *handler =
4793 (struct target_func_ptr *)g2h(ka->_sa_handler);
4794 env->nip = tswapl(handler->entry);
4795 env->gpr[2] = tswapl(handler->toc);
4797 /* ELFv2 PPC64 function pointers are entry points, but R12
4798 * must also be set */
4799 env->nip = tswapl((target_ulong) ka->_sa_handler);
4800 env->gpr[12] = env->nip;
4803 env->nip = (target_ulong) ka->_sa_handler;
4806 /* Signal handlers are entered in big-endian mode. */
4807 env->msr &= ~(1ull << MSR_LE);
4809 unlock_user_struct(frame, frame_addr, 1);
4813 unlock_user_struct(frame, frame_addr, 1);
4814 force_sig(TARGET_SIGSEGV);
4817 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4818 target_siginfo_t *info,
4819 target_sigset_t *set, CPUPPCState *env)
4821 struct target_rt_sigframe *rt_sf;
4822 uint32_t *trampptr = 0;
4823 struct target_mcontext *mctx = 0;
4824 target_ulong rt_sf_addr, newsp = 0;
4826 #if defined(TARGET_PPC64)
4827 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4830 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
4831 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
4834 tswap_siginfo(&rt_sf->info, info);
4836 __put_user(0, &rt_sf->uc.tuc_flags);
4837 __put_user(0, &rt_sf->uc.tuc_link);
4838 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
4839 &rt_sf->uc.tuc_stack.ss_sp);
4840 __put_user(sas_ss_flags(env->gpr[1]),
4841 &rt_sf->uc.tuc_stack.ss_flags);
4842 __put_user(target_sigaltstack_used.ss_size,
4843 &rt_sf->uc.tuc_stack.ss_size);
4844 #if !defined(TARGET_PPC64)
4845 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
4846 &rt_sf->uc.tuc_regs);
4848 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
4849 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
4852 #if defined(TARGET_PPC64)
4853 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
4854 trampptr = &rt_sf->trampoline[0];
4856 mctx = &rt_sf->uc.tuc_mcontext;
4857 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
4860 save_user_regs(env, mctx);
4861 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
4863 /* The kernel checks for the presence of a VDSO here. We don't
4864 emulate a vdso, so use a sigreturn system call. */
4865 env->lr = (target_ulong) h2g(trampptr);
4867 /* Turn off all fp exceptions. */
4870 /* Create a stack frame for the caller of the handler. */
4871 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
4872 err |= put_user(env->gpr[1], newsp, target_ulong);
4877 /* Set up registers for signal handler. */
4878 env->gpr[1] = newsp;
4879 env->gpr[3] = (target_ulong) sig;
4880 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
4881 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
4882 env->gpr[6] = (target_ulong) h2g(rt_sf);
4884 #if defined(TARGET_PPC64)
4885 if (get_ppc64_abi(image) < 2) {
4886 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4887 struct target_func_ptr *handler =
4888 (struct target_func_ptr *)g2h(ka->_sa_handler);
4889 env->nip = tswapl(handler->entry);
4890 env->gpr[2] = tswapl(handler->toc);
4892 /* ELFv2 PPC64 function pointers are entry points, but R12
4893 * must also be set */
4894 env->nip = tswapl((target_ulong) ka->_sa_handler);
4895 env->gpr[12] = env->nip;
4898 env->nip = (target_ulong) ka->_sa_handler;
4901 /* Signal handlers are entered in big-endian mode. */
4902 env->msr &= ~(1ull << MSR_LE);
4904 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4908 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4909 force_sig(TARGET_SIGSEGV);
4913 long do_sigreturn(CPUPPCState *env)
4915 struct target_sigcontext *sc = NULL;
4916 struct target_mcontext *sr = NULL;
4917 target_ulong sr_addr = 0, sc_addr;
4919 target_sigset_t set;
4921 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
4922 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
4925 #if defined(TARGET_PPC64)
4926 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
4928 __get_user(set.sig[0], &sc->oldmask);
4929 __get_user(set.sig[1], &sc->_unused[3]);
4931 target_to_host_sigset_internal(&blocked, &set);
4932 set_sigmask(&blocked);
4934 __get_user(sr_addr, &sc->regs);
4935 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
4937 restore_user_regs(env, sr, 1);
4939 unlock_user_struct(sr, sr_addr, 1);
4940 unlock_user_struct(sc, sc_addr, 1);
4941 return -TARGET_QEMU_ESIGRETURN;
4944 unlock_user_struct(sr, sr_addr, 1);
4945 unlock_user_struct(sc, sc_addr, 1);
4946 force_sig(TARGET_SIGSEGV);
4950 /* See arch/powerpc/kernel/signal_32.c. */
4951 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
4953 struct target_mcontext *mcp;
4954 target_ulong mcp_addr;
4956 target_sigset_t set;
4958 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
4962 #if defined(TARGET_PPC64)
4963 mcp_addr = h2g(ucp) +
4964 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
4966 __get_user(mcp_addr, &ucp->tuc_regs);
4969 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
4972 target_to_host_sigset_internal(&blocked, &set);
4973 set_sigmask(&blocked);
4974 restore_user_regs(env, mcp, sig);
4976 unlock_user_struct(mcp, mcp_addr, 1);
4980 long do_rt_sigreturn(CPUPPCState *env)
4982 struct target_rt_sigframe *rt_sf = NULL;
4983 target_ulong rt_sf_addr;
4985 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
4986 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
4989 if (do_setcontext(&rt_sf->uc, env, 1))
4992 do_sigaltstack(rt_sf_addr
4993 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
4996 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4997 return -TARGET_QEMU_ESIGRETURN;
5000 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5001 force_sig(TARGET_SIGSEGV);
5005 #elif defined(TARGET_M68K)
5007 struct target_sigcontext {
5014 unsigned short sc_sr;
5018 struct target_sigframe
5025 abi_ulong extramask[TARGET_NSIG_WORDS-1];
5026 struct target_sigcontext sc;
5029 typedef int target_greg_t;
5030 #define TARGET_NGREG 18
5031 typedef target_greg_t target_gregset_t[TARGET_NGREG];
5033 typedef struct target_fpregset {
5036 } target_fpregset_t;
5038 struct target_mcontext {
5040 target_gregset_t gregs;
5041 target_fpregset_t fpregs;
5044 #define TARGET_MCONTEXT_VERSION 2
5046 struct target_ucontext {
5047 abi_ulong tuc_flags;
5049 target_stack_t tuc_stack;
5050 struct target_mcontext tuc_mcontext;
5051 abi_long tuc_filler[80];
5052 target_sigset_t tuc_sigmask;
5055 struct target_rt_sigframe
5062 struct target_siginfo info;
5063 struct target_ucontext uc;
5066 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5069 __put_user(mask, &sc->sc_mask);
5070 __put_user(env->aregs[7], &sc->sc_usp);
5071 __put_user(env->dregs[0], &sc->sc_d0);
5072 __put_user(env->dregs[1], &sc->sc_d1);
5073 __put_user(env->aregs[0], &sc->sc_a0);
5074 __put_user(env->aregs[1], &sc->sc_a1);
5075 __put_user(env->sr, &sc->sc_sr);
5076 __put_user(env->pc, &sc->sc_pc);
5080 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5084 __get_user(env->aregs[7], &sc->sc_usp);
5085 __get_user(env->dregs[0], &sc->sc_d0);
5086 __get_user(env->dregs[1], &sc->sc_d1);
5087 __get_user(env->aregs[0], &sc->sc_a0);
5088 __get_user(env->aregs[1], &sc->sc_a1);
5089 __get_user(env->pc, &sc->sc_pc);
5090 __get_user(temp, &sc->sc_sr);
5091 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5095 * Determine which stack to use..
5097 static inline abi_ulong
5098 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5103 sp = regs->aregs[7];
5105 /* This is the X/Open sanctioned signal stack switching. */
5106 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5107 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5110 return ((sp - frame_size) & -8UL);
5113 static void setup_frame(int sig, struct target_sigaction *ka,
5114 target_sigset_t *set, CPUM68KState *env)
5116 struct target_sigframe *frame;
5117 abi_ulong frame_addr;
5118 abi_ulong retcode_addr;
5122 frame_addr = get_sigframe(ka, env, sizeof *frame);
5123 trace_user_setup_frame(env, frame_addr);
5124 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5128 __put_user(sig, &frame->sig);
5130 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5131 __put_user(sc_addr, &frame->psc);
5133 setup_sigcontext(&frame->sc, env, set->sig[0]);
5135 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5136 __put_user(set->sig[i], &frame->extramask[i - 1]);
5139 /* Set up to return from userspace. */
5141 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5142 __put_user(retcode_addr, &frame->pretcode);
5144 /* moveq #,d0; trap #0 */
5146 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5147 (uint32_t *)(frame->retcode));
5149 /* Set up to return from userspace */
5151 env->aregs[7] = frame_addr;
5152 env->pc = ka->_sa_handler;
5154 unlock_user_struct(frame, frame_addr, 1);
5158 force_sig(TARGET_SIGSEGV);
5161 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5164 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5166 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5167 __put_user(env->dregs[0], &gregs[0]);
5168 __put_user(env->dregs[1], &gregs[1]);
5169 __put_user(env->dregs[2], &gregs[2]);
5170 __put_user(env->dregs[3], &gregs[3]);
5171 __put_user(env->dregs[4], &gregs[4]);
5172 __put_user(env->dregs[5], &gregs[5]);
5173 __put_user(env->dregs[6], &gregs[6]);
5174 __put_user(env->dregs[7], &gregs[7]);
5175 __put_user(env->aregs[0], &gregs[8]);
5176 __put_user(env->aregs[1], &gregs[9]);
5177 __put_user(env->aregs[2], &gregs[10]);
5178 __put_user(env->aregs[3], &gregs[11]);
5179 __put_user(env->aregs[4], &gregs[12]);
5180 __put_user(env->aregs[5], &gregs[13]);
5181 __put_user(env->aregs[6], &gregs[14]);
5182 __put_user(env->aregs[7], &gregs[15]);
5183 __put_user(env->pc, &gregs[16]);
5184 __put_user(env->sr, &gregs[17]);
5189 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5190 struct target_ucontext *uc)
5193 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5195 __get_user(temp, &uc->tuc_mcontext.version);
5196 if (temp != TARGET_MCONTEXT_VERSION)
5199 /* restore passed registers */
5200 __get_user(env->dregs[0], &gregs[0]);
5201 __get_user(env->dregs[1], &gregs[1]);
5202 __get_user(env->dregs[2], &gregs[2]);
5203 __get_user(env->dregs[3], &gregs[3]);
5204 __get_user(env->dregs[4], &gregs[4]);
5205 __get_user(env->dregs[5], &gregs[5]);
5206 __get_user(env->dregs[6], &gregs[6]);
5207 __get_user(env->dregs[7], &gregs[7]);
5208 __get_user(env->aregs[0], &gregs[8]);
5209 __get_user(env->aregs[1], &gregs[9]);
5210 __get_user(env->aregs[2], &gregs[10]);
5211 __get_user(env->aregs[3], &gregs[11]);
5212 __get_user(env->aregs[4], &gregs[12]);
5213 __get_user(env->aregs[5], &gregs[13]);
5214 __get_user(env->aregs[6], &gregs[14]);
5215 __get_user(env->aregs[7], &gregs[15]);
5216 __get_user(env->pc, &gregs[16]);
5217 __get_user(temp, &gregs[17]);
5218 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5226 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5227 target_siginfo_t *info,
5228 target_sigset_t *set, CPUM68KState *env)
5230 struct target_rt_sigframe *frame;
5231 abi_ulong frame_addr;
5232 abi_ulong retcode_addr;
5233 abi_ulong info_addr;
5238 frame_addr = get_sigframe(ka, env, sizeof *frame);
5239 trace_user_setup_rt_frame(env, frame_addr);
5240 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5244 __put_user(sig, &frame->sig);
5246 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5247 __put_user(info_addr, &frame->pinfo);
5249 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5250 __put_user(uc_addr, &frame->puc);
5252 tswap_siginfo(&frame->info, info);
5254 /* Create the ucontext */
5256 __put_user(0, &frame->uc.tuc_flags);
5257 __put_user(0, &frame->uc.tuc_link);
5258 __put_user(target_sigaltstack_used.ss_sp,
5259 &frame->uc.tuc_stack.ss_sp);
5260 __put_user(sas_ss_flags(env->aregs[7]),
5261 &frame->uc.tuc_stack.ss_flags);
5262 __put_user(target_sigaltstack_used.ss_size,
5263 &frame->uc.tuc_stack.ss_size);
5264 err |= target_rt_setup_ucontext(&frame->uc, env);
5269 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5270 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5273 /* Set up to return from userspace. */
5275 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5276 __put_user(retcode_addr, &frame->pretcode);
5278 /* moveq #,d0; notb d0; trap #0 */
5280 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5281 (uint32_t *)(frame->retcode + 0));
5282 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5287 /* Set up to return from userspace */
5289 env->aregs[7] = frame_addr;
5290 env->pc = ka->_sa_handler;
5292 unlock_user_struct(frame, frame_addr, 1);
5296 unlock_user_struct(frame, frame_addr, 1);
5297 force_sig(TARGET_SIGSEGV);
5300 long do_sigreturn(CPUM68KState *env)
5302 struct target_sigframe *frame;
5303 abi_ulong frame_addr = env->aregs[7] - 4;
5304 target_sigset_t target_set;
5308 trace_user_do_sigreturn(env, frame_addr);
5309 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5312 /* set blocked signals */
5314 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5316 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5317 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5320 target_to_host_sigset_internal(&set, &target_set);
5323 /* restore registers */
5325 restore_sigcontext(env, &frame->sc);
5327 unlock_user_struct(frame, frame_addr, 0);
5328 return -TARGET_QEMU_ESIGRETURN;
5331 force_sig(TARGET_SIGSEGV);
5335 long do_rt_sigreturn(CPUM68KState *env)
5337 struct target_rt_sigframe *frame;
5338 abi_ulong frame_addr = env->aregs[7] - 4;
5339 target_sigset_t target_set;
5342 trace_user_do_rt_sigreturn(env, frame_addr);
5343 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5346 target_to_host_sigset_internal(&set, &target_set);
5349 /* restore registers */
5351 if (target_rt_restore_ucontext(env, &frame->uc))
5354 if (do_sigaltstack(frame_addr +
5355 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5356 0, get_sp_from_cpustate(env)) == -EFAULT)
5359 unlock_user_struct(frame, frame_addr, 0);
5360 return -TARGET_QEMU_ESIGRETURN;
5363 unlock_user_struct(frame, frame_addr, 0);
5364 force_sig(TARGET_SIGSEGV);
5368 #elif defined(TARGET_ALPHA)
5370 struct target_sigcontext {
5371 abi_long sc_onstack;
5375 abi_long sc_regs[32];
5376 abi_long sc_ownedfp;
5377 abi_long sc_fpregs[32];
5379 abi_ulong sc_fp_control;
5380 abi_ulong sc_reserved1;
5381 abi_ulong sc_reserved2;
5384 abi_ulong sc_traparg_a0;
5385 abi_ulong sc_traparg_a1;
5386 abi_ulong sc_traparg_a2;
5387 abi_ulong sc_fp_trap_pc;
5388 abi_ulong sc_fp_trigger_sum;
5389 abi_ulong sc_fp_trigger_inst;
5392 struct target_ucontext {
5393 abi_ulong tuc_flags;
5395 abi_ulong tuc_osf_sigmask;
5396 target_stack_t tuc_stack;
5397 struct target_sigcontext tuc_mcontext;
5398 target_sigset_t tuc_sigmask;
5401 struct target_sigframe {
5402 struct target_sigcontext sc;
5403 unsigned int retcode[3];
5406 struct target_rt_sigframe {
5407 target_siginfo_t info;
5408 struct target_ucontext uc;
5409 unsigned int retcode[3];
5412 #define INSN_MOV_R30_R16 0x47fe0410
5413 #define INSN_LDI_R0 0x201f0000
5414 #define INSN_CALLSYS 0x00000083
5416 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
5417 abi_ulong frame_addr, target_sigset_t *set)
5421 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
5422 __put_user(set->sig[0], &sc->sc_mask);
5423 __put_user(env->pc, &sc->sc_pc);
5424 __put_user(8, &sc->sc_ps);
5426 for (i = 0; i < 31; ++i) {
5427 __put_user(env->ir[i], &sc->sc_regs[i]);
5429 __put_user(0, &sc->sc_regs[31]);
5431 for (i = 0; i < 31; ++i) {
5432 __put_user(env->fir[i], &sc->sc_fpregs[i]);
5434 __put_user(0, &sc->sc_fpregs[31]);
5435 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
5437 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
5438 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
5439 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
5442 static void restore_sigcontext(CPUAlphaState *env,
5443 struct target_sigcontext *sc)
5448 __get_user(env->pc, &sc->sc_pc);
5450 for (i = 0; i < 31; ++i) {
5451 __get_user(env->ir[i], &sc->sc_regs[i]);
5453 for (i = 0; i < 31; ++i) {
5454 __get_user(env->fir[i], &sc->sc_fpregs[i]);
5457 __get_user(fpcr, &sc->sc_fpcr);
5458 cpu_alpha_store_fpcr(env, fpcr);
5461 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
5463 unsigned long framesize)
5465 abi_ulong sp = env->ir[IR_SP];
5467 /* This is the X/Open sanctioned signal stack switching. */
5468 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
5469 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5471 return (sp - framesize) & -32;
5474 static void setup_frame(int sig, struct target_sigaction *ka,
5475 target_sigset_t *set, CPUAlphaState *env)
5477 abi_ulong frame_addr, r26;
5478 struct target_sigframe *frame;
5481 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5482 trace_user_setup_frame(env, frame_addr);
5483 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5487 setup_sigcontext(&frame->sc, env, frame_addr, set);
5489 if (ka->sa_restorer) {
5490 r26 = ka->sa_restorer;
5492 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5493 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
5494 &frame->retcode[1]);
5495 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5500 unlock_user_struct(frame, frame_addr, 1);
5504 if (sig == TARGET_SIGSEGV) {
5505 ka->_sa_handler = TARGET_SIG_DFL;
5507 force_sig(TARGET_SIGSEGV);
5510 env->ir[IR_RA] = r26;
5511 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5512 env->ir[IR_A0] = sig;
5514 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
5515 env->ir[IR_SP] = frame_addr;
5518 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5519 target_siginfo_t *info,
5520 target_sigset_t *set, CPUAlphaState *env)
5522 abi_ulong frame_addr, r26;
5523 struct target_rt_sigframe *frame;
5526 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5527 trace_user_setup_rt_frame(env, frame_addr);
5528 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5532 tswap_siginfo(&frame->info, info);
5534 __put_user(0, &frame->uc.tuc_flags);
5535 __put_user(0, &frame->uc.tuc_link);
5536 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
5537 __put_user(target_sigaltstack_used.ss_sp,
5538 &frame->uc.tuc_stack.ss_sp);
5539 __put_user(sas_ss_flags(env->ir[IR_SP]),
5540 &frame->uc.tuc_stack.ss_flags);
5541 __put_user(target_sigaltstack_used.ss_size,
5542 &frame->uc.tuc_stack.ss_size);
5543 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
5544 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
5545 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5548 if (ka->sa_restorer) {
5549 r26 = ka->sa_restorer;
5551 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5552 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
5553 &frame->retcode[1]);
5554 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5561 if (sig == TARGET_SIGSEGV) {
5562 ka->_sa_handler = TARGET_SIG_DFL;
5564 force_sig(TARGET_SIGSEGV);
5567 env->ir[IR_RA] = r26;
5568 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5569 env->ir[IR_A0] = sig;
5570 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
5571 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
5572 env->ir[IR_SP] = frame_addr;
5575 long do_sigreturn(CPUAlphaState *env)
5577 struct target_sigcontext *sc;
5578 abi_ulong sc_addr = env->ir[IR_A0];
5579 target_sigset_t target_set;
5582 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
5586 target_sigemptyset(&target_set);
5587 __get_user(target_set.sig[0], &sc->sc_mask);
5589 target_to_host_sigset_internal(&set, &target_set);
5592 restore_sigcontext(env, sc);
5593 unlock_user_struct(sc, sc_addr, 0);
5594 return -TARGET_QEMU_ESIGRETURN;
5597 force_sig(TARGET_SIGSEGV);
5600 long do_rt_sigreturn(CPUAlphaState *env)
5602 abi_ulong frame_addr = env->ir[IR_A0];
5603 struct target_rt_sigframe *frame;
5606 trace_user_do_rt_sigreturn(env, frame_addr);
5607 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5610 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5613 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5614 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5616 0, env->ir[IR_SP]) == -EFAULT) {
5620 unlock_user_struct(frame, frame_addr, 0);
5621 return -TARGET_QEMU_ESIGRETURN;
5625 unlock_user_struct(frame, frame_addr, 0);
5626 force_sig(TARGET_SIGSEGV);
5629 #elif defined(TARGET_TILEGX)
5631 struct target_sigcontext {
5633 /* General-purpose registers. */
5634 abi_ulong gregs[56];
5636 abi_ulong __gregs[53];
5637 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
5638 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
5639 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
5642 abi_ulong pc; /* Program counter. */
5643 abi_ulong ics; /* In Interrupt Critical Section? */
5644 abi_ulong faultnum; /* Fault number. */
5648 struct target_ucontext {
5649 abi_ulong tuc_flags;
5651 target_stack_t tuc_stack;
5652 struct target_sigcontext tuc_mcontext;
5653 target_sigset_t tuc_sigmask; /* mask last for extensibility */
5656 struct target_rt_sigframe {
5657 unsigned char save_area[16]; /* caller save area */
5658 struct target_siginfo info;
5659 struct target_ucontext uc;
5660 abi_ulong retcode[2];
5663 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
5664 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
5667 static void setup_sigcontext(struct target_sigcontext *sc,
5668 CPUArchState *env, int signo)
5672 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5673 __put_user(env->regs[i], &sc->gregs[i]);
5676 __put_user(env->pc, &sc->pc);
5677 __put_user(0, &sc->ics);
5678 __put_user(signo, &sc->faultnum);
5681 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
5685 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5686 __get_user(env->regs[i], &sc->gregs[i]);
5689 __get_user(env->pc, &sc->pc);
5692 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
5695 unsigned long sp = env->regs[TILEGX_R_SP];
5697 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
5701 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
5702 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5710 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5711 target_siginfo_t *info,
5712 target_sigset_t *set, CPUArchState *env)
5714 abi_ulong frame_addr;
5715 struct target_rt_sigframe *frame;
5716 unsigned long restorer;
5718 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5719 trace_user_setup_rt_frame(env, frame_addr);
5720 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5724 /* Always write at least the signal number for the stack backtracer. */
5725 if (ka->sa_flags & TARGET_SA_SIGINFO) {
5726 /* At sigreturn time, restore the callee-save registers too. */
5727 tswap_siginfo(&frame->info, info);
5728 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
5730 __put_user(info->si_signo, &frame->info.si_signo);
5733 /* Create the ucontext. */
5734 __put_user(0, &frame->uc.tuc_flags);
5735 __put_user(0, &frame->uc.tuc_link);
5736 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
5737 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
5738 &frame->uc.tuc_stack.ss_flags);
5739 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
5740 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
5742 if (ka->sa_flags & TARGET_SA_RESTORER) {
5743 restorer = (unsigned long) ka->sa_restorer;
5745 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
5746 __put_user(INSN_SWINT1, &frame->retcode[1]);
5747 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
5749 env->pc = (unsigned long) ka->_sa_handler;
5750 env->regs[TILEGX_R_SP] = (unsigned long) frame;
5751 env->regs[TILEGX_R_LR] = restorer;
5752 env->regs[0] = (unsigned long) sig;
5753 env->regs[1] = (unsigned long) &frame->info;
5754 env->regs[2] = (unsigned long) &frame->uc;
5755 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
5757 unlock_user_struct(frame, frame_addr, 1);
5761 if (sig == TARGET_SIGSEGV) {
5762 ka->_sa_handler = TARGET_SIG_DFL;
5764 force_sig(TARGET_SIGSEGV /* , current */);
5767 long do_rt_sigreturn(CPUTLGState *env)
5769 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
5770 struct target_rt_sigframe *frame;
5773 trace_user_do_rt_sigreturn(env, frame_addr);
5774 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5777 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5780 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5781 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5783 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
5787 unlock_user_struct(frame, frame_addr, 0);
5788 return -TARGET_QEMU_ESIGRETURN;
5792 unlock_user_struct(frame, frame_addr, 0);
5793 force_sig(TARGET_SIGSEGV);
5798 static void setup_frame(int sig, struct target_sigaction *ka,
5799 target_sigset_t *set, CPUArchState *env)
5801 fprintf(stderr, "setup_frame: not implemented\n");
5804 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5805 target_siginfo_t *info,
5806 target_sigset_t *set, CPUArchState *env)
5808 fprintf(stderr, "setup_rt_frame: not implemented\n");
5811 long do_sigreturn(CPUArchState *env)
5813 fprintf(stderr, "do_sigreturn: not implemented\n");
5814 return -TARGET_ENOSYS;
5817 long do_rt_sigreturn(CPUArchState *env)
5819 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
5820 return -TARGET_ENOSYS;
5825 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
5826 struct emulated_sigtable *k)
5828 CPUState *cpu = ENV_GET_CPU(cpu_env);
5831 target_sigset_t target_old_set;
5832 struct target_sigaction *sa;
5833 TaskState *ts = cpu->opaque;
5835 trace_user_handle_signal(cpu_env, sig);
5836 /* dequeue signal */
5839 sig = gdb_handlesig(cpu, sig);
5842 handler = TARGET_SIG_IGN;
5844 sa = &sigact_table[sig - 1];
5845 handler = sa->_sa_handler;
5849 print_taken_signal(sig, &k->info);
5852 if (handler == TARGET_SIG_DFL) {
5853 /* default handler : ignore some signal. The other are job control or fatal */
5854 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
5855 kill(getpid(),SIGSTOP);
5856 } else if (sig != TARGET_SIGCHLD &&
5857 sig != TARGET_SIGURG &&
5858 sig != TARGET_SIGWINCH &&
5859 sig != TARGET_SIGCONT) {
5862 } else if (handler == TARGET_SIG_IGN) {
5864 } else if (handler == TARGET_SIG_ERR) {
5867 /* compute the blocked signals during the handler execution */
5868 sigset_t *blocked_set;
5870 target_to_host_sigset(&set, &sa->sa_mask);
5871 /* SA_NODEFER indicates that the current signal should not be
5872 blocked during the handler */
5873 if (!(sa->sa_flags & TARGET_SA_NODEFER))
5874 sigaddset(&set, target_to_host_signal(sig));
5876 /* save the previous blocked signal state to restore it at the
5877 end of the signal execution (see do_sigreturn) */
5878 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
5880 /* block signals in the handler */
5881 blocked_set = ts->in_sigsuspend ?
5882 &ts->sigsuspend_mask : &ts->signal_mask;
5883 sigorset(&ts->signal_mask, blocked_set, &set);
5884 ts->in_sigsuspend = 0;
5886 /* if the CPU is in VM86 mode, we restore the 32 bit values */
5887 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
5889 CPUX86State *env = cpu_env;
5890 if (env->eflags & VM_MASK)
5891 save_v86_state(env);
5894 /* prepare the stack frame of the virtual CPU */
5895 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
5896 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX)
5897 /* These targets do not have traditional signals. */
5898 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
5900 if (sa->sa_flags & TARGET_SA_SIGINFO)
5901 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
5903 setup_frame(sig, sa, &target_old_set, cpu_env);
5905 if (sa->sa_flags & TARGET_SA_RESETHAND) {
5906 sa->_sa_handler = TARGET_SIG_DFL;
5911 void process_pending_signals(CPUArchState *cpu_env)
5913 CPUState *cpu = ENV_GET_CPU(cpu_env);
5915 TaskState *ts = cpu->opaque;
5917 sigset_t *blocked_set;
5919 while (atomic_read(&ts->signal_pending)) {
5920 /* FIXME: This is not threadsafe. */
5922 sigprocmask(SIG_SETMASK, &set, 0);
5925 sig = ts->sync_signal.pending;
5927 /* Synchronous signals are forced,
5928 * see force_sig_info() and callers in Linux
5929 * Note that not all of our queue_signal() calls in QEMU correspond
5930 * to force_sig_info() calls in Linux (some are send_sig_info()).
5931 * However it seems like a kernel bug to me to allow the process
5932 * to block a synchronous signal since it could then just end up
5933 * looping round and round indefinitely.
5935 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
5936 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
5937 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
5938 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
5941 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
5944 for (sig = 1; sig <= TARGET_NSIG; sig++) {
5945 blocked_set = ts->in_sigsuspend ?
5946 &ts->sigsuspend_mask : &ts->signal_mask;
5948 if (ts->sigtab[sig - 1].pending &&
5949 (!sigismember(blocked_set,
5950 target_to_host_signal_table[sig]))) {
5951 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
5952 /* Restart scan from the beginning, as handle_pending_signal
5953 * might have resulted in a new synchronous signal (eg SIGSEGV).
5959 /* if no signal is pending, unblock signals and recheck (the act
5960 * of unblocking might cause us to take another host signal which
5961 * will set signal_pending again).
5963 atomic_set(&ts->signal_pending, 0);
5964 ts->in_sigsuspend = 0;
5965 set = ts->signal_mask;
5966 sigdelset(&set, SIGSEGV);
5967 sigdelset(&set, SIGBUS);
5968 sigprocmask(SIG_SETMASK, &set, 0);
5970 ts->in_sigsuspend = 0;