2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
25 #include "qemu-common.h"
26 #include "target_signal.h"
28 #include "signal-common.h"
30 struct target_sigaltstack target_sigaltstack_used = {
33 .ss_flags = TARGET_SS_DISABLE,
36 static struct target_sigaction sigact_table[TARGET_NSIG];
38 static void host_signal_handler(int host_signum, siginfo_t *info,
41 static uint8_t host_to_target_signal_table[_NSIG] = {
42 [SIGHUP] = TARGET_SIGHUP,
43 [SIGINT] = TARGET_SIGINT,
44 [SIGQUIT] = TARGET_SIGQUIT,
45 [SIGILL] = TARGET_SIGILL,
46 [SIGTRAP] = TARGET_SIGTRAP,
47 [SIGABRT] = TARGET_SIGABRT,
48 /* [SIGIOT] = TARGET_SIGIOT,*/
49 [SIGBUS] = TARGET_SIGBUS,
50 [SIGFPE] = TARGET_SIGFPE,
51 [SIGKILL] = TARGET_SIGKILL,
52 [SIGUSR1] = TARGET_SIGUSR1,
53 [SIGSEGV] = TARGET_SIGSEGV,
54 [SIGUSR2] = TARGET_SIGUSR2,
55 [SIGPIPE] = TARGET_SIGPIPE,
56 [SIGALRM] = TARGET_SIGALRM,
57 [SIGTERM] = TARGET_SIGTERM,
59 [SIGSTKFLT] = TARGET_SIGSTKFLT,
61 [SIGCHLD] = TARGET_SIGCHLD,
62 [SIGCONT] = TARGET_SIGCONT,
63 [SIGSTOP] = TARGET_SIGSTOP,
64 [SIGTSTP] = TARGET_SIGTSTP,
65 [SIGTTIN] = TARGET_SIGTTIN,
66 [SIGTTOU] = TARGET_SIGTTOU,
67 [SIGURG] = TARGET_SIGURG,
68 [SIGXCPU] = TARGET_SIGXCPU,
69 [SIGXFSZ] = TARGET_SIGXFSZ,
70 [SIGVTALRM] = TARGET_SIGVTALRM,
71 [SIGPROF] = TARGET_SIGPROF,
72 [SIGWINCH] = TARGET_SIGWINCH,
73 [SIGIO] = TARGET_SIGIO,
74 [SIGPWR] = TARGET_SIGPWR,
75 [SIGSYS] = TARGET_SIGSYS,
76 /* next signals stay the same */
77 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
78 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
79 To fix this properly we need to do manual signal delivery multiplexed
80 over a single host signal. */
81 [__SIGRTMIN] = __SIGRTMAX,
82 [__SIGRTMAX] = __SIGRTMIN,
84 static uint8_t target_to_host_signal_table[_NSIG];
86 int host_to_target_signal(int sig)
88 if (sig < 0 || sig >= _NSIG)
90 return host_to_target_signal_table[sig];
93 int target_to_host_signal(int sig)
95 if (sig < 0 || sig >= _NSIG)
97 return target_to_host_signal_table[sig];
100 static inline void target_sigaddset(target_sigset_t *set, int signum)
103 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
104 set->sig[signum / TARGET_NSIG_BPW] |= mask;
107 static inline int target_sigismember(const target_sigset_t *set, int signum)
110 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
111 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
114 void host_to_target_sigset_internal(target_sigset_t *d,
118 target_sigemptyset(d);
119 for (i = 1; i <= TARGET_NSIG; i++) {
120 if (sigismember(s, i)) {
121 target_sigaddset(d, host_to_target_signal(i));
126 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
131 host_to_target_sigset_internal(&d1, s);
132 for(i = 0;i < TARGET_NSIG_WORDS; i++)
133 d->sig[i] = tswapal(d1.sig[i]);
136 void target_to_host_sigset_internal(sigset_t *d,
137 const target_sigset_t *s)
141 for (i = 1; i <= TARGET_NSIG; i++) {
142 if (target_sigismember(s, i)) {
143 sigaddset(d, target_to_host_signal(i));
148 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
153 for(i = 0;i < TARGET_NSIG_WORDS; i++)
154 s1.sig[i] = tswapal(s->sig[i]);
155 target_to_host_sigset_internal(d, &s1);
158 void host_to_target_old_sigset(abi_ulong *old_sigset,
159 const sigset_t *sigset)
162 host_to_target_sigset(&d, sigset);
163 *old_sigset = d.sig[0];
166 void target_to_host_old_sigset(sigset_t *sigset,
167 const abi_ulong *old_sigset)
172 d.sig[0] = *old_sigset;
173 for(i = 1;i < TARGET_NSIG_WORDS; i++)
175 target_to_host_sigset(sigset, &d);
178 int block_signals(void)
180 TaskState *ts = (TaskState *)thread_cpu->opaque;
183 /* It's OK to block everything including SIGSEGV, because we won't
184 * run any further guest code before unblocking signals in
185 * process_pending_signals().
188 sigprocmask(SIG_SETMASK, &set, 0);
190 return atomic_xchg(&ts->signal_pending, 1);
193 /* Wrapper for sigprocmask function
194 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
195 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
196 * a signal was already pending and the syscall must be restarted, or
198 * If set is NULL, this is guaranteed not to fail.
200 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
202 TaskState *ts = (TaskState *)thread_cpu->opaque;
205 *oldset = ts->signal_mask;
211 if (block_signals()) {
212 return -TARGET_ERESTARTSYS;
217 sigorset(&ts->signal_mask, &ts->signal_mask, set);
220 for (i = 1; i <= NSIG; ++i) {
221 if (sigismember(set, i)) {
222 sigdelset(&ts->signal_mask, i);
227 ts->signal_mask = *set;
230 g_assert_not_reached();
233 /* Silently ignore attempts to change blocking status of KILL or STOP */
234 sigdelset(&ts->signal_mask, SIGKILL);
235 sigdelset(&ts->signal_mask, SIGSTOP);
240 #if !defined(TARGET_OPENRISC) && !defined(TARGET_NIOS2)
241 /* Just set the guest's signal mask to the specified value; the
242 * caller is assumed to have called block_signals() already.
244 void set_sigmask(const sigset_t *set)
246 TaskState *ts = (TaskState *)thread_cpu->opaque;
248 ts->signal_mask = *set;
252 /* siginfo conversion */
254 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
255 const siginfo_t *info)
257 int sig = host_to_target_signal(info->si_signo);
258 int si_code = info->si_code;
260 tinfo->si_signo = sig;
262 tinfo->si_code = info->si_code;
264 /* This memset serves two purposes:
265 * (1) ensure we don't leak random junk to the guest later
266 * (2) placate false positives from gcc about fields
267 * being used uninitialized if it chooses to inline both this
268 * function and tswap_siginfo() into host_to_target_siginfo().
270 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
272 /* This is awkward, because we have to use a combination of
273 * the si_code and si_signo to figure out which of the union's
274 * members are valid. (Within the host kernel it is always possible
275 * to tell, but the kernel carefully avoids giving userspace the
276 * high 16 bits of si_code, so we don't have the information to
277 * do this the easy way...) We therefore make our best guess,
278 * bearing in mind that a guest can spoof most of the si_codes
279 * via rt_sigqueueinfo() if it likes.
281 * Once we have made our guess, we record it in the top 16 bits of
282 * the si_code, so that tswap_siginfo() later can use it.
283 * tswap_siginfo() will strip these top bits out before writing
284 * si_code to the guest (sign-extending the lower bits).
291 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
292 * These are the only unspoofable si_code values.
294 tinfo->_sifields._kill._pid = info->si_pid;
295 tinfo->_sifields._kill._uid = info->si_uid;
296 si_type = QEMU_SI_KILL;
299 /* Everything else is spoofable. Make best guess based on signal */
302 tinfo->_sifields._sigchld._pid = info->si_pid;
303 tinfo->_sifields._sigchld._uid = info->si_uid;
304 tinfo->_sifields._sigchld._status
305 = host_to_target_waitstatus(info->si_status);
306 tinfo->_sifields._sigchld._utime = info->si_utime;
307 tinfo->_sifields._sigchld._stime = info->si_stime;
308 si_type = QEMU_SI_CHLD;
311 tinfo->_sifields._sigpoll._band = info->si_band;
312 tinfo->_sifields._sigpoll._fd = info->si_fd;
313 si_type = QEMU_SI_POLL;
316 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
317 tinfo->_sifields._rt._pid = info->si_pid;
318 tinfo->_sifields._rt._uid = info->si_uid;
319 /* XXX: potential problem if 64 bit */
320 tinfo->_sifields._rt._sigval.sival_ptr
321 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
322 si_type = QEMU_SI_RT;
328 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
331 void tswap_siginfo(target_siginfo_t *tinfo,
332 const target_siginfo_t *info)
334 int si_type = extract32(info->si_code, 16, 16);
335 int si_code = sextract32(info->si_code, 0, 16);
337 __put_user(info->si_signo, &tinfo->si_signo);
338 __put_user(info->si_errno, &tinfo->si_errno);
339 __put_user(si_code, &tinfo->si_code);
341 /* We can use our internal marker of which fields in the structure
342 * are valid, rather than duplicating the guesswork of
343 * host_to_target_siginfo_noswap() here.
347 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
348 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
351 __put_user(info->_sifields._timer._timer1,
352 &tinfo->_sifields._timer._timer1);
353 __put_user(info->_sifields._timer._timer2,
354 &tinfo->_sifields._timer._timer2);
357 __put_user(info->_sifields._sigpoll._band,
358 &tinfo->_sifields._sigpoll._band);
359 __put_user(info->_sifields._sigpoll._fd,
360 &tinfo->_sifields._sigpoll._fd);
363 __put_user(info->_sifields._sigfault._addr,
364 &tinfo->_sifields._sigfault._addr);
367 __put_user(info->_sifields._sigchld._pid,
368 &tinfo->_sifields._sigchld._pid);
369 __put_user(info->_sifields._sigchld._uid,
370 &tinfo->_sifields._sigchld._uid);
371 __put_user(info->_sifields._sigchld._status,
372 &tinfo->_sifields._sigchld._status);
373 __put_user(info->_sifields._sigchld._utime,
374 &tinfo->_sifields._sigchld._utime);
375 __put_user(info->_sifields._sigchld._stime,
376 &tinfo->_sifields._sigchld._stime);
379 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
380 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
381 __put_user(info->_sifields._rt._sigval.sival_ptr,
382 &tinfo->_sifields._rt._sigval.sival_ptr);
385 g_assert_not_reached();
389 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
391 target_siginfo_t tgt_tmp;
392 host_to_target_siginfo_noswap(&tgt_tmp, info);
393 tswap_siginfo(tinfo, &tgt_tmp);
396 /* XXX: we support only POSIX RT signals are used. */
397 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
398 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
400 /* This conversion is used only for the rt_sigqueueinfo syscall,
401 * and so we know that the _rt fields are the valid ones.
405 __get_user(info->si_signo, &tinfo->si_signo);
406 __get_user(info->si_errno, &tinfo->si_errno);
407 __get_user(info->si_code, &tinfo->si_code);
408 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
409 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
410 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
411 info->si_value.sival_ptr = (void *)(long)sival_ptr;
414 static int fatal_signal (int sig)
419 case TARGET_SIGWINCH:
420 /* Ignored by default. */
427 /* Job control signals. */
434 /* returns 1 if given signal should dump core if not handled */
435 static int core_dump_signal(int sig)
451 void signal_init(void)
453 TaskState *ts = (TaskState *)thread_cpu->opaque;
454 struct sigaction act;
455 struct sigaction oact;
459 /* generate signal conversion tables */
460 for(i = 1; i < _NSIG; i++) {
461 if (host_to_target_signal_table[i] == 0)
462 host_to_target_signal_table[i] = i;
464 for(i = 1; i < _NSIG; i++) {
465 j = host_to_target_signal_table[i];
466 target_to_host_signal_table[j] = i;
469 /* Set the signal mask from the host mask. */
470 sigprocmask(0, 0, &ts->signal_mask);
472 /* set all host signal handlers. ALL signals are blocked during
473 the handlers to serialize them. */
474 memset(sigact_table, 0, sizeof(sigact_table));
476 sigfillset(&act.sa_mask);
477 act.sa_flags = SA_SIGINFO;
478 act.sa_sigaction = host_signal_handler;
479 for(i = 1; i <= TARGET_NSIG; i++) {
480 host_sig = target_to_host_signal(i);
481 sigaction(host_sig, NULL, &oact);
482 if (oact.sa_sigaction == (void *)SIG_IGN) {
483 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
484 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
485 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
487 /* If there's already a handler installed then something has
488 gone horribly wrong, so don't even try to handle that case. */
489 /* Install some handlers for our own use. We need at least
490 SIGSEGV and SIGBUS, to detect exceptions. We can not just
491 trap all signals because it affects syscall interrupt
492 behavior. But do trap all default-fatal signals. */
493 if (fatal_signal (i))
494 sigaction(host_sig, &act, NULL);
498 /* Force a synchronously taken signal. The kernel force_sig() function
499 * also forces the signal to "not blocked, not ignored", but for QEMU
500 * that work is done in process_pending_signals().
502 void force_sig(int sig)
504 CPUState *cpu = thread_cpu;
505 CPUArchState *env = cpu->env_ptr;
506 target_siginfo_t info;
510 info.si_code = TARGET_SI_KERNEL;
511 info._sifields._kill._pid = 0;
512 info._sifields._kill._uid = 0;
513 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
516 /* Force a SIGSEGV if we couldn't write to memory trying to set
517 * up the signal frame. oldsig is the signal we were trying to handle
518 * at the point of failure.
520 #if !defined(TARGET_RISCV)
521 void force_sigsegv(int oldsig)
523 if (oldsig == SIGSEGV) {
524 /* Make sure we don't try to deliver the signal again; this will
525 * end up with handle_pending_signal() calling dump_core_and_abort().
527 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
529 force_sig(TARGET_SIGSEGV);
534 /* abort execution with signal */
535 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
537 CPUState *cpu = thread_cpu;
538 CPUArchState *env = cpu->env_ptr;
539 TaskState *ts = (TaskState *)cpu->opaque;
540 int host_sig, core_dumped = 0;
541 struct sigaction act;
543 host_sig = target_to_host_signal(target_sig);
544 trace_user_force_sig(env, target_sig, host_sig);
545 gdb_signalled(env, target_sig);
547 /* dump core if supported by target binary format */
548 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
551 ((*ts->bprm->core_dump)(target_sig, env) == 0);
554 /* we already dumped the core of target process, we don't want
555 * a coredump of qemu itself */
556 struct rlimit nodump;
557 getrlimit(RLIMIT_CORE, &nodump);
559 setrlimit(RLIMIT_CORE, &nodump);
560 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
561 target_sig, strsignal(host_sig), "core dumped" );
564 /* The proper exit code for dying from an uncaught signal is
565 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
566 * a negative value. To get the proper exit code we need to
567 * actually die from an uncaught signal. Here the default signal
568 * handler is installed, we send ourself a signal and we wait for
570 sigfillset(&act.sa_mask);
571 act.sa_handler = SIG_DFL;
573 sigaction(host_sig, &act, NULL);
575 /* For some reason raise(host_sig) doesn't send the signal when
576 * statically linked on x86-64. */
577 kill(getpid(), host_sig);
579 /* Make sure the signal isn't masked (just reuse the mask inside
581 sigdelset(&act.sa_mask, host_sig);
582 sigsuspend(&act.sa_mask);
588 /* queue a signal so that it will be send to the virtual CPU as soon
590 int queue_signal(CPUArchState *env, int sig, int si_type,
591 target_siginfo_t *info)
593 CPUState *cpu = ENV_GET_CPU(env);
594 TaskState *ts = cpu->opaque;
596 trace_user_queue_signal(env, sig);
598 info->si_code = deposit32(info->si_code, 16, 16, si_type);
600 ts->sync_signal.info = *info;
601 ts->sync_signal.pending = sig;
602 /* signal that a new signal is pending */
603 atomic_set(&ts->signal_pending, 1);
604 return 1; /* indicates that the signal was queued */
607 #ifndef HAVE_SAFE_SYSCALL
608 static inline void rewind_if_in_safe_syscall(void *puc)
610 /* Default version: never rewind */
614 static void host_signal_handler(int host_signum, siginfo_t *info,
617 CPUArchState *env = thread_cpu->env_ptr;
618 CPUState *cpu = ENV_GET_CPU(env);
619 TaskState *ts = cpu->opaque;
622 target_siginfo_t tinfo;
623 ucontext_t *uc = puc;
624 struct emulated_sigtable *k;
626 /* the CPU emulator uses some host signals to detect exceptions,
627 we forward to it some signals */
628 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
629 && info->si_code > 0) {
630 if (cpu_signal_handler(host_signum, info, puc))
634 /* get target signal number */
635 sig = host_to_target_signal(host_signum);
636 if (sig < 1 || sig > TARGET_NSIG)
638 trace_user_host_signal(env, host_signum, sig);
640 rewind_if_in_safe_syscall(puc);
642 host_to_target_siginfo_noswap(&tinfo, info);
643 k = &ts->sigtab[sig - 1];
646 ts->signal_pending = 1;
648 /* Block host signals until target signal handler entered. We
649 * can't block SIGSEGV or SIGBUS while we're executing guest
650 * code in case the guest code provokes one in the window between
651 * now and it getting out to the main loop. Signals will be
652 * unblocked again in process_pending_signals().
654 * WARNING: we cannot use sigfillset() here because the uc_sigmask
655 * field is a kernel sigset_t, which is much smaller than the
656 * libc sigset_t which sigfillset() operates on. Using sigfillset()
657 * would write 0xff bytes off the end of the structure and trash
658 * data on the struct.
659 * We can't use sizeof(uc->uc_sigmask) either, because the libc
660 * headers define the struct field with the wrong (too large) type.
662 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
663 sigdelset(&uc->uc_sigmask, SIGSEGV);
664 sigdelset(&uc->uc_sigmask, SIGBUS);
666 /* interrupt the virtual CPU as soon as possible */
667 cpu_exit(thread_cpu);
670 /* do_sigaltstack() returns target values and errnos. */
671 /* compare linux/kernel/signal.c:do_sigaltstack() */
672 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
675 struct target_sigaltstack oss;
677 /* XXX: test errors */
680 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
681 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
682 __put_user(sas_ss_flags(sp), &oss.ss_flags);
687 struct target_sigaltstack *uss;
688 struct target_sigaltstack ss;
689 size_t minstacksize = TARGET_MINSIGSTKSZ;
691 #if defined(TARGET_PPC64)
692 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
693 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
694 if (get_ppc64_abi(image) > 1) {
699 ret = -TARGET_EFAULT;
700 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
703 __get_user(ss.ss_sp, &uss->ss_sp);
704 __get_user(ss.ss_size, &uss->ss_size);
705 __get_user(ss.ss_flags, &uss->ss_flags);
706 unlock_user_struct(uss, uss_addr, 0);
709 if (on_sig_stack(sp))
712 ret = -TARGET_EINVAL;
713 if (ss.ss_flags != TARGET_SS_DISABLE
714 && ss.ss_flags != TARGET_SS_ONSTACK
718 if (ss.ss_flags == TARGET_SS_DISABLE) {
722 ret = -TARGET_ENOMEM;
723 if (ss.ss_size < minstacksize) {
728 target_sigaltstack_used.ss_sp = ss.ss_sp;
729 target_sigaltstack_used.ss_size = ss.ss_size;
733 ret = -TARGET_EFAULT;
734 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
743 /* do_sigaction() return target values and host errnos */
744 int do_sigaction(int sig, const struct target_sigaction *act,
745 struct target_sigaction *oact)
747 struct target_sigaction *k;
748 struct sigaction act1;
752 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
753 return -TARGET_EINVAL;
756 if (block_signals()) {
757 return -TARGET_ERESTARTSYS;
760 k = &sigact_table[sig - 1];
762 __put_user(k->_sa_handler, &oact->_sa_handler);
763 __put_user(k->sa_flags, &oact->sa_flags);
764 #ifdef TARGET_ARCH_HAS_SA_RESTORER
765 __put_user(k->sa_restorer, &oact->sa_restorer);
768 oact->sa_mask = k->sa_mask;
771 /* FIXME: This is not threadsafe. */
772 __get_user(k->_sa_handler, &act->_sa_handler);
773 __get_user(k->sa_flags, &act->sa_flags);
774 #ifdef TARGET_ARCH_HAS_SA_RESTORER
775 __get_user(k->sa_restorer, &act->sa_restorer);
777 /* To be swapped in target_to_host_sigset. */
778 k->sa_mask = act->sa_mask;
780 /* we update the host linux signal state */
781 host_sig = target_to_host_signal(sig);
782 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
783 sigfillset(&act1.sa_mask);
784 act1.sa_flags = SA_SIGINFO;
785 if (k->sa_flags & TARGET_SA_RESTART)
786 act1.sa_flags |= SA_RESTART;
787 /* NOTE: it is important to update the host kernel signal
788 ignore state to avoid getting unexpected interrupted
790 if (k->_sa_handler == TARGET_SIG_IGN) {
791 act1.sa_sigaction = (void *)SIG_IGN;
792 } else if (k->_sa_handler == TARGET_SIG_DFL) {
793 if (fatal_signal (sig))
794 act1.sa_sigaction = host_signal_handler;
796 act1.sa_sigaction = (void *)SIG_DFL;
798 act1.sa_sigaction = host_signal_handler;
800 ret = sigaction(host_sig, &act1, NULL);
806 #if defined(TARGET_PPC)
808 /* Size of dummy stack frame allocated when calling signal handler.
809 See arch/powerpc/include/asm/ptrace.h. */
810 #if defined(TARGET_PPC64)
811 #define SIGNAL_FRAMESIZE 128
813 #define SIGNAL_FRAMESIZE 64
816 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
817 on 64-bit PPC, sigcontext and mcontext are one and the same. */
818 struct target_mcontext {
819 target_ulong mc_gregs[48];
820 /* Includes fpscr. */
821 uint64_t mc_fregs[33];
822 #if defined(TARGET_PPC64)
823 /* Pointer to the vector regs */
826 target_ulong mc_pad[2];
828 /* We need to handle Altivec and SPE at the same time, which no
829 kernel needs to do. Fortunately, the kernel defines this bit to
830 be Altivec-register-large all the time, rather than trying to
831 twiddle it based on the specific platform. */
833 /* SPE vector registers. One extra for SPEFSCR. */
835 /* Altivec vector registers. The packing of VSCR and VRSAVE
836 varies depending on whether we're PPC64 or not: PPC64 splits
837 them apart; PPC32 stuffs them together.
838 We also need to account for the VSX registers on PPC64
840 #if defined(TARGET_PPC64)
841 #define QEMU_NVRREG (34 + 16)
842 /* On ppc64, this mcontext structure is naturally *unaligned*,
843 * or rather it is aligned on a 8 bytes boundary but not on
844 * a 16 bytes one. This pad fixes it up. This is also why the
845 * vector regs are referenced by the v_regs pointer above so
846 * any amount of padding can be added here
850 /* On ppc32, we are already aligned to 16 bytes */
851 #define QEMU_NVRREG 33
853 /* We cannot use ppc_avr_t here as we do *not* want the implied
854 * 16-bytes alignment that would result from it. This would have
855 * the effect of making the whole struct target_mcontext aligned
856 * which breaks the layout of struct target_ucontext on ppc64.
858 uint64_t altivec[QEMU_NVRREG][2];
863 /* See arch/powerpc/include/asm/sigcontext.h. */
864 struct target_sigcontext {
865 target_ulong _unused[4];
867 #if defined(TARGET_PPC64)
870 target_ulong handler;
871 target_ulong oldmask;
872 target_ulong regs; /* struct pt_regs __user * */
873 #if defined(TARGET_PPC64)
874 struct target_mcontext mcontext;
878 /* Indices for target_mcontext.mc_gregs, below.
879 See arch/powerpc/include/asm/ptrace.h for details. */
915 TARGET_PT_ORIG_R3 = 34,
920 /* Yes, there are two registers with #39. One is 64-bit only. */
922 TARGET_PT_SOFTE = 39,
925 TARGET_PT_DSISR = 42,
926 TARGET_PT_RESULT = 43,
927 TARGET_PT_REGS_COUNT = 44
931 struct target_ucontext {
932 target_ulong tuc_flags;
933 target_ulong tuc_link; /* ucontext_t __user * */
934 struct target_sigaltstack tuc_stack;
935 #if !defined(TARGET_PPC64)
937 target_ulong tuc_regs; /* struct mcontext __user *
938 points to uc_mcontext field */
940 target_sigset_t tuc_sigmask;
941 #if defined(TARGET_PPC64)
942 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
943 struct target_sigcontext tuc_sigcontext;
945 int32_t tuc_maskext[30];
947 struct target_mcontext tuc_mcontext;
951 /* See arch/powerpc/kernel/signal_32.c. */
952 struct target_sigframe {
953 struct target_sigcontext sctx;
954 struct target_mcontext mctx;
958 #if defined(TARGET_PPC64)
960 #define TARGET_TRAMP_SIZE 6
962 struct target_rt_sigframe {
963 /* sys_rt_sigreturn requires the ucontext be the first field */
964 struct target_ucontext uc;
965 target_ulong _unused[2];
966 uint32_t trampoline[TARGET_TRAMP_SIZE];
967 target_ulong pinfo; /* struct siginfo __user * */
968 target_ulong puc; /* void __user * */
969 struct target_siginfo info;
970 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
972 } __attribute__((aligned(16)));
976 struct target_rt_sigframe {
977 struct target_siginfo info;
978 struct target_ucontext uc;
984 #if defined(TARGET_PPC64)
986 struct target_func_ptr {
993 /* We use the mc_pad field for the signal return trampoline. */
996 /* See arch/powerpc/kernel/signal.c. */
997 static target_ulong get_sigframe(struct target_sigaction *ka,
1003 oldsp = env->gpr[1];
1005 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
1006 (sas_ss_flags(oldsp) == 0)) {
1007 oldsp = (target_sigaltstack_used.ss_sp
1008 + target_sigaltstack_used.ss_size);
1011 return (oldsp - frame_size) & ~0xFUL;
1014 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
1015 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
1016 #define PPC_VEC_HI 0
1017 #define PPC_VEC_LO 1
1019 #define PPC_VEC_HI 1
1020 #define PPC_VEC_LO 0
1024 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
1026 target_ulong msr = env->msr;
1028 target_ulong ccr = 0;
1030 /* In general, the kernel attempts to be intelligent about what it
1031 needs to save for Altivec/FP/SPE registers. We don't care that
1032 much, so we just go ahead and save everything. */
1034 /* Save general registers. */
1035 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
1036 __put_user(env->gpr[i], &frame->mc_gregs[i]);
1038 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
1039 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
1040 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
1041 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
1043 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
1044 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
1046 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
1048 /* Save Altivec registers if necessary. */
1049 if (env->insns_flags & PPC_ALTIVEC) {
1051 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
1052 ppc_avr_t *avr = &env->avr[i];
1053 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
1055 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
1056 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
1058 /* Set MSR_VR in the saved MSR value to indicate that
1059 frame->mc_vregs contains valid data. */
1061 #if defined(TARGET_PPC64)
1062 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
1063 /* 64-bit needs to put a pointer to the vectors in the frame */
1064 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
1066 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
1068 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
1071 /* Save VSX second halves */
1072 if (env->insns_flags2 & PPC2_VSX) {
1073 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
1074 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
1075 __put_user(env->vsr[i], &vsregs[i]);
1079 /* Save floating point registers. */
1080 if (env->insns_flags & PPC_FLOAT) {
1081 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
1082 __put_user(env->fpr[i], &frame->mc_fregs[i]);
1084 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
1087 /* Save SPE registers. The kernel only saves the high half. */
1088 if (env->insns_flags & PPC_SPE) {
1089 #if defined(TARGET_PPC64)
1090 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
1091 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
1094 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
1095 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
1098 /* Set MSR_SPE in the saved MSR value to indicate that
1099 frame->mc_vregs contains valid data. */
1101 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
1105 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
1108 static void encode_trampoline(int sigret, uint32_t *tramp)
1110 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
1112 __put_user(0x38000000 | sigret, &tramp[0]);
1113 __put_user(0x44000002, &tramp[1]);
1117 static void restore_user_regs(CPUPPCState *env,
1118 struct target_mcontext *frame, int sig)
1120 target_ulong save_r2 = 0;
1127 save_r2 = env->gpr[2];
1130 /* Restore general registers. */
1131 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
1132 __get_user(env->gpr[i], &frame->mc_gregs[i]);
1134 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
1135 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
1136 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
1137 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
1138 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
1140 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
1141 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
1145 env->gpr[2] = save_r2;
1148 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
1150 /* If doing signal return, restore the previous little-endian mode. */
1152 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
1154 /* Restore Altivec registers if necessary. */
1155 if (env->insns_flags & PPC_ALTIVEC) {
1158 #if defined(TARGET_PPC64)
1160 /* 64-bit needs to recover the pointer to the vectors from the frame */
1161 __get_user(v_addr, &frame->v_regs);
1162 v_regs = g2h(v_addr);
1164 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
1166 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
1167 ppc_avr_t *avr = &env->avr[i];
1168 ppc_avr_t *vreg = &v_regs[i];
1170 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
1171 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
1173 /* Set MSR_VEC in the saved MSR value to indicate that
1174 frame->mc_vregs contains valid data. */
1175 #if defined(TARGET_PPC64)
1176 vrsave = (uint32_t *)&v_regs[33];
1178 vrsave = (uint32_t *)&v_regs[32];
1180 __get_user(env->spr[SPR_VRSAVE], vrsave);
1183 /* Restore VSX second halves */
1184 if (env->insns_flags2 & PPC2_VSX) {
1185 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
1186 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
1187 __get_user(env->vsr[i], &vsregs[i]);
1191 /* Restore floating point registers. */
1192 if (env->insns_flags & PPC_FLOAT) {
1194 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
1195 __get_user(env->fpr[i], &frame->mc_fregs[i]);
1197 __get_user(fpscr, &frame->mc_fregs[32]);
1198 env->fpscr = (uint32_t) fpscr;
1201 /* Save SPE registers. The kernel only saves the high half. */
1202 if (env->insns_flags & PPC_SPE) {
1203 #if defined(TARGET_PPC64)
1204 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
1207 __get_user(hi, &frame->mc_vregs.spe[i]);
1208 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
1211 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
1212 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
1215 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
1219 #if !defined(TARGET_PPC64)
1220 static void setup_frame(int sig, struct target_sigaction *ka,
1221 target_sigset_t *set, CPUPPCState *env)
1223 struct target_sigframe *frame;
1224 struct target_sigcontext *sc;
1225 target_ulong frame_addr, newsp;
1228 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1229 trace_user_setup_frame(env, frame_addr);
1230 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
1234 __put_user(ka->_sa_handler, &sc->handler);
1235 __put_user(set->sig[0], &sc->oldmask);
1236 __put_user(set->sig[1], &sc->_unused[3]);
1237 __put_user(h2g(&frame->mctx), &sc->regs);
1238 __put_user(sig, &sc->signal);
1240 /* Save user regs. */
1241 save_user_regs(env, &frame->mctx);
1243 /* Construct the trampoline code on the stack. */
1244 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
1246 /* The kernel checks for the presence of a VDSO here. We don't
1247 emulate a vdso, so use a sigreturn system call. */
1248 env->lr = (target_ulong) h2g(frame->mctx.tramp);
1250 /* Turn off all fp exceptions. */
1253 /* Create a stack frame for the caller of the handler. */
1254 newsp = frame_addr - SIGNAL_FRAMESIZE;
1255 err |= put_user(env->gpr[1], newsp, target_ulong);
1260 /* Set up registers for signal handler. */
1261 env->gpr[1] = newsp;
1263 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
1265 env->nip = (target_ulong) ka->_sa_handler;
1267 /* Signal handlers are entered in big-endian mode. */
1268 env->msr &= ~(1ull << MSR_LE);
1270 unlock_user_struct(frame, frame_addr, 1);
1274 unlock_user_struct(frame, frame_addr, 1);
1277 #endif /* !defined(TARGET_PPC64) */
1279 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1280 target_siginfo_t *info,
1281 target_sigset_t *set, CPUPPCState *env)
1283 struct target_rt_sigframe *rt_sf;
1284 uint32_t *trampptr = 0;
1285 struct target_mcontext *mctx = 0;
1286 target_ulong rt_sf_addr, newsp = 0;
1288 #if defined(TARGET_PPC64)
1289 struct target_sigcontext *sc = 0;
1290 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
1293 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
1294 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
1297 tswap_siginfo(&rt_sf->info, info);
1299 __put_user(0, &rt_sf->uc.tuc_flags);
1300 __put_user(0, &rt_sf->uc.tuc_link);
1301 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
1302 &rt_sf->uc.tuc_stack.ss_sp);
1303 __put_user(sas_ss_flags(env->gpr[1]),
1304 &rt_sf->uc.tuc_stack.ss_flags);
1305 __put_user(target_sigaltstack_used.ss_size,
1306 &rt_sf->uc.tuc_stack.ss_size);
1307 #if !defined(TARGET_PPC64)
1308 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
1309 &rt_sf->uc.tuc_regs);
1311 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1312 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
1315 #if defined(TARGET_PPC64)
1316 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
1317 trampptr = &rt_sf->trampoline[0];
1319 sc = &rt_sf->uc.tuc_sigcontext;
1320 __put_user(h2g(mctx), &sc->regs);
1321 __put_user(sig, &sc->signal);
1323 mctx = &rt_sf->uc.tuc_mcontext;
1324 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
1327 save_user_regs(env, mctx);
1328 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
1330 /* The kernel checks for the presence of a VDSO here. We don't
1331 emulate a vdso, so use a sigreturn system call. */
1332 env->lr = (target_ulong) h2g(trampptr);
1334 /* Turn off all fp exceptions. */
1337 /* Create a stack frame for the caller of the handler. */
1338 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
1339 err |= put_user(env->gpr[1], newsp, target_ulong);
1344 /* Set up registers for signal handler. */
1345 env->gpr[1] = newsp;
1346 env->gpr[3] = (target_ulong) sig;
1347 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
1348 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
1349 env->gpr[6] = (target_ulong) h2g(rt_sf);
1351 #if defined(TARGET_PPC64)
1352 if (get_ppc64_abi(image) < 2) {
1353 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
1354 struct target_func_ptr *handler =
1355 (struct target_func_ptr *)g2h(ka->_sa_handler);
1356 env->nip = tswapl(handler->entry);
1357 env->gpr[2] = tswapl(handler->toc);
1359 /* ELFv2 PPC64 function pointers are entry points, but R12
1360 * must also be set */
1361 env->nip = tswapl((target_ulong) ka->_sa_handler);
1362 env->gpr[12] = env->nip;
1365 env->nip = (target_ulong) ka->_sa_handler;
1368 /* Signal handlers are entered in big-endian mode. */
1369 env->msr &= ~(1ull << MSR_LE);
1371 unlock_user_struct(rt_sf, rt_sf_addr, 1);
1375 unlock_user_struct(rt_sf, rt_sf_addr, 1);
1380 #if !defined(TARGET_PPC64)
1381 long do_sigreturn(CPUPPCState *env)
1383 struct target_sigcontext *sc = NULL;
1384 struct target_mcontext *sr = NULL;
1385 target_ulong sr_addr = 0, sc_addr;
1387 target_sigset_t set;
1389 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
1390 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
1393 #if defined(TARGET_PPC64)
1394 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
1396 __get_user(set.sig[0], &sc->oldmask);
1397 __get_user(set.sig[1], &sc->_unused[3]);
1399 target_to_host_sigset_internal(&blocked, &set);
1400 set_sigmask(&blocked);
1402 __get_user(sr_addr, &sc->regs);
1403 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
1405 restore_user_regs(env, sr, 1);
1407 unlock_user_struct(sr, sr_addr, 1);
1408 unlock_user_struct(sc, sc_addr, 1);
1409 return -TARGET_QEMU_ESIGRETURN;
1412 unlock_user_struct(sr, sr_addr, 1);
1413 unlock_user_struct(sc, sc_addr, 1);
1414 force_sig(TARGET_SIGSEGV);
1415 return -TARGET_QEMU_ESIGRETURN;
1417 #endif /* !defined(TARGET_PPC64) */
1419 /* See arch/powerpc/kernel/signal_32.c. */
1420 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
1422 struct target_mcontext *mcp;
1423 target_ulong mcp_addr;
1425 target_sigset_t set;
1427 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
1431 #if defined(TARGET_PPC64)
1432 mcp_addr = h2g(ucp) +
1433 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
1435 __get_user(mcp_addr, &ucp->tuc_regs);
1438 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
1441 target_to_host_sigset_internal(&blocked, &set);
1442 set_sigmask(&blocked);
1443 restore_user_regs(env, mcp, sig);
1445 unlock_user_struct(mcp, mcp_addr, 1);
1449 long do_rt_sigreturn(CPUPPCState *env)
1451 struct target_rt_sigframe *rt_sf = NULL;
1452 target_ulong rt_sf_addr;
1454 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
1455 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
1458 if (do_setcontext(&rt_sf->uc, env, 1))
1461 do_sigaltstack(rt_sf_addr
1462 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
1465 unlock_user_struct(rt_sf, rt_sf_addr, 1);
1466 return -TARGET_QEMU_ESIGRETURN;
1469 unlock_user_struct(rt_sf, rt_sf_addr, 1);
1470 force_sig(TARGET_SIGSEGV);
1471 return -TARGET_QEMU_ESIGRETURN;
1475 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
1476 struct emulated_sigtable *k)
1478 CPUState *cpu = ENV_GET_CPU(cpu_env);
1481 target_sigset_t target_old_set;
1482 struct target_sigaction *sa;
1483 TaskState *ts = cpu->opaque;
1485 trace_user_handle_signal(cpu_env, sig);
1486 /* dequeue signal */
1489 sig = gdb_handlesig(cpu, sig);
1492 handler = TARGET_SIG_IGN;
1494 sa = &sigact_table[sig - 1];
1495 handler = sa->_sa_handler;
1499 print_taken_signal(sig, &k->info);
1502 if (handler == TARGET_SIG_DFL) {
1503 /* default handler : ignore some signal. The other are job control or fatal */
1504 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
1505 kill(getpid(),SIGSTOP);
1506 } else if (sig != TARGET_SIGCHLD &&
1507 sig != TARGET_SIGURG &&
1508 sig != TARGET_SIGWINCH &&
1509 sig != TARGET_SIGCONT) {
1510 dump_core_and_abort(sig);
1512 } else if (handler == TARGET_SIG_IGN) {
1514 } else if (handler == TARGET_SIG_ERR) {
1515 dump_core_and_abort(sig);
1517 /* compute the blocked signals during the handler execution */
1518 sigset_t *blocked_set;
1520 target_to_host_sigset(&set, &sa->sa_mask);
1521 /* SA_NODEFER indicates that the current signal should not be
1522 blocked during the handler */
1523 if (!(sa->sa_flags & TARGET_SA_NODEFER))
1524 sigaddset(&set, target_to_host_signal(sig));
1526 /* save the previous blocked signal state to restore it at the
1527 end of the signal execution (see do_sigreturn) */
1528 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
1530 /* block signals in the handler */
1531 blocked_set = ts->in_sigsuspend ?
1532 &ts->sigsuspend_mask : &ts->signal_mask;
1533 sigorset(&ts->signal_mask, blocked_set, &set);
1534 ts->in_sigsuspend = 0;
1536 /* if the CPU is in VM86 mode, we restore the 32 bit values */
1537 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
1539 CPUX86State *env = cpu_env;
1540 if (env->eflags & VM_MASK)
1541 save_v86_state(env);
1544 /* prepare the stack frame of the virtual CPU */
1545 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
1546 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
1547 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
1548 || defined(TARGET_NIOS2) || defined(TARGET_X86_64) \
1549 || defined(TARGET_RISCV) || defined(TARGET_XTENSA)
1550 /* These targets do not have traditional signals. */
1551 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1553 if (sa->sa_flags & TARGET_SA_SIGINFO)
1554 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
1556 setup_frame(sig, sa, &target_old_set, cpu_env);
1558 if (sa->sa_flags & TARGET_SA_RESETHAND) {
1559 sa->_sa_handler = TARGET_SIG_DFL;
1564 void process_pending_signals(CPUArchState *cpu_env)
1566 CPUState *cpu = ENV_GET_CPU(cpu_env);
1568 TaskState *ts = cpu->opaque;
1570 sigset_t *blocked_set;
1572 while (atomic_read(&ts->signal_pending)) {
1573 /* FIXME: This is not threadsafe. */
1575 sigprocmask(SIG_SETMASK, &set, 0);
1578 sig = ts->sync_signal.pending;
1580 /* Synchronous signals are forced,
1581 * see force_sig_info() and callers in Linux
1582 * Note that not all of our queue_signal() calls in QEMU correspond
1583 * to force_sig_info() calls in Linux (some are send_sig_info()).
1584 * However it seems like a kernel bug to me to allow the process
1585 * to block a synchronous signal since it could then just end up
1586 * looping round and round indefinitely.
1588 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1589 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1590 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1591 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1594 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
1597 for (sig = 1; sig <= TARGET_NSIG; sig++) {
1598 blocked_set = ts->in_sigsuspend ?
1599 &ts->sigsuspend_mask : &ts->signal_mask;
1601 if (ts->sigtab[sig - 1].pending &&
1602 (!sigismember(blocked_set,
1603 target_to_host_signal_table[sig]))) {
1604 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
1605 /* Restart scan from the beginning, as handle_pending_signal
1606 * might have resulted in a new synchronous signal (eg SIGSEGV).
1612 /* if no signal is pending, unblock signals and recheck (the act
1613 * of unblocking might cause us to take another host signal which
1614 * will set signal_pending again).
1616 atomic_set(&ts->signal_pending, 0);
1617 ts->in_sigsuspend = 0;
1618 set = ts->signal_mask;
1619 sigdelset(&set, SIGSEGV);
1620 sigdelset(&set, SIGBUS);
1621 sigprocmask(SIG_SETMASK, &set, 0);
1623 ts->in_sigsuspend = 0;