* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
+#include "qemu/bitops.h"
#include <sys/ucontext.h>
#include <sys/resource.h>
target_to_host_sigset(sigset, &d);
}
+int block_signals(void)
+{
+ TaskState *ts = (TaskState *)thread_cpu->opaque;
+ sigset_t set;
+
+ /* It's OK to block everything including SIGSEGV, because we won't
+ * run any further guest code before unblocking signals in
+ * process_pending_signals().
+ */
+ sigfillset(&set);
+ sigprocmask(SIG_SETMASK, &set, 0);
+
+ return atomic_xchg(&ts->signal_pending, 1);
+}
+
/* Wrapper for sigprocmask function
* Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
- * are host signal set, not guest ones. This wraps the sigprocmask host calls
- * that should be protected (calls originated from guest)
+ * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
+ * a signal was already pending and the syscall must be restarted, or
+ * 0 on success.
+ * If set is NULL, this is guaranteed not to fail.
*/
int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
{
- int ret;
- sigset_t val;
- sigset_t *temp = NULL;
- CPUState *cpu = thread_cpu;
- TaskState *ts = (TaskState *)cpu->opaque;
- bool segv_was_blocked = ts->sigsegv_blocked;
+ TaskState *ts = (TaskState *)thread_cpu->opaque;
+
+ if (oldset) {
+ *oldset = ts->signal_mask;
+ }
if (set) {
- bool has_sigsegv = sigismember(set, SIGSEGV);
- val = *set;
- temp = &val;
+ int i;
- sigdelset(temp, SIGSEGV);
+ if (block_signals()) {
+ return -TARGET_ERESTARTSYS;
+ }
switch (how) {
case SIG_BLOCK:
- if (has_sigsegv) {
- ts->sigsegv_blocked = true;
- }
+ sigorset(&ts->signal_mask, &ts->signal_mask, set);
break;
case SIG_UNBLOCK:
- if (has_sigsegv) {
- ts->sigsegv_blocked = false;
+ for (i = 1; i <= NSIG; ++i) {
+ if (sigismember(set, i)) {
+ sigdelset(&ts->signal_mask, i);
+ }
}
break;
case SIG_SETMASK:
- ts->sigsegv_blocked = has_sigsegv;
+ ts->signal_mask = *set;
break;
default:
g_assert_not_reached();
}
- }
-
- ret = sigprocmask(how, temp, oldset);
- if (oldset && segv_was_blocked) {
- sigaddset(oldset, SIGSEGV);
+ /* Silently ignore attempts to change blocking status of KILL or STOP */
+ sigdelset(&ts->signal_mask, SIGKILL);
+ sigdelset(&ts->signal_mask, SIGSTOP);
}
+ return 0;
+}
- return ret;
+#if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
+ !defined(TARGET_X86_64)
+/* Just set the guest's signal mask to the specified value; the
+ * caller is assumed to have called block_signals() already.
+ */
+static void set_sigmask(const sigset_t *set)
+{
+ TaskState *ts = (TaskState *)thread_cpu->opaque;
+
+ ts->signal_mask = *set;
}
+#endif
/* siginfo conversion */
const siginfo_t *info)
{
int sig = host_to_target_signal(info->si_signo);
+ int si_code = info->si_code;
+ int si_type;
tinfo->si_signo = sig;
tinfo->si_errno = 0;
tinfo->si_code = info->si_code;
- if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
- || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
- /* Should never come here, but who knows. The information for
- the target is irrelevant. */
- tinfo->_sifields._sigfault._addr = 0;
- } else if (sig == TARGET_SIGIO) {
- tinfo->_sifields._sigpoll._band = info->si_band;
- tinfo->_sifields._sigpoll._fd = info->si_fd;
- } else if (sig == TARGET_SIGCHLD) {
- tinfo->_sifields._sigchld._pid = info->si_pid;
- tinfo->_sifields._sigchld._uid = info->si_uid;
- tinfo->_sifields._sigchld._status
+ /* This memset serves two purposes:
+ * (1) ensure we don't leak random junk to the guest later
+ * (2) placate false positives from gcc about fields
+ * being used uninitialized if it chooses to inline both this
+ * function and tswap_siginfo() into host_to_target_siginfo().
+ */
+ memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
+
+ /* This is awkward, because we have to use a combination of
+ * the si_code and si_signo to figure out which of the union's
+ * members are valid. (Within the host kernel it is always possible
+ * to tell, but the kernel carefully avoids giving userspace the
+ * high 16 bits of si_code, so we don't have the information to
+ * do this the easy way...) We therefore make our best guess,
+ * bearing in mind that a guest can spoof most of the si_codes
+ * via rt_sigqueueinfo() if it likes.
+ *
+ * Once we have made our guess, we record it in the top 16 bits of
+ * the si_code, so that tswap_siginfo() later can use it.
+ * tswap_siginfo() will strip these top bits out before writing
+ * si_code to the guest (sign-extending the lower bits).
+ */
+
+ switch (si_code) {
+ case SI_USER:
+ case SI_TKILL:
+ case SI_KERNEL:
+ /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
+ * These are the only unspoofable si_code values.
+ */
+ tinfo->_sifields._kill._pid = info->si_pid;
+ tinfo->_sifields._kill._uid = info->si_uid;
+ si_type = QEMU_SI_KILL;
+ break;
+ default:
+ /* Everything else is spoofable. Make best guess based on signal */
+ switch (sig) {
+ case TARGET_SIGCHLD:
+ tinfo->_sifields._sigchld._pid = info->si_pid;
+ tinfo->_sifields._sigchld._uid = info->si_uid;
+ tinfo->_sifields._sigchld._status
= host_to_target_waitstatus(info->si_status);
- tinfo->_sifields._sigchld._utime = info->si_utime;
- tinfo->_sifields._sigchld._stime = info->si_stime;
- } else if (sig >= TARGET_SIGRTMIN) {
- tinfo->_sifields._rt._pid = info->si_pid;
- tinfo->_sifields._rt._uid = info->si_uid;
- /* XXX: potential problem if 64 bit */
- tinfo->_sifields._rt._sigval.sival_ptr
+ tinfo->_sifields._sigchld._utime = info->si_utime;
+ tinfo->_sifields._sigchld._stime = info->si_stime;
+ si_type = QEMU_SI_CHLD;
+ break;
+ case TARGET_SIGIO:
+ tinfo->_sifields._sigpoll._band = info->si_band;
+ tinfo->_sifields._sigpoll._fd = info->si_fd;
+ si_type = QEMU_SI_POLL;
+ break;
+ default:
+ /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
+ tinfo->_sifields._rt._pid = info->si_pid;
+ tinfo->_sifields._rt._uid = info->si_uid;
+ /* XXX: potential problem if 64 bit */
+ tinfo->_sifields._rt._sigval.sival_ptr
= (abi_ulong)(unsigned long)info->si_value.sival_ptr;
+ si_type = QEMU_SI_RT;
+ break;
+ }
+ break;
}
+
+ tinfo->si_code = deposit32(si_code, 16, 16, si_type);
}
static void tswap_siginfo(target_siginfo_t *tinfo,
const target_siginfo_t *info)
{
- int sig = info->si_signo;
- tinfo->si_signo = tswap32(sig);
- tinfo->si_errno = tswap32(info->si_errno);
- tinfo->si_code = tswap32(info->si_code);
-
- if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
- || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
- tinfo->_sifields._sigfault._addr
- = tswapal(info->_sifields._sigfault._addr);
- } else if (sig == TARGET_SIGIO) {
- tinfo->_sifields._sigpoll._band
- = tswap32(info->_sifields._sigpoll._band);
- tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd);
- } else if (sig == TARGET_SIGCHLD) {
- tinfo->_sifields._sigchld._pid
- = tswap32(info->_sifields._sigchld._pid);
- tinfo->_sifields._sigchld._uid
- = tswap32(info->_sifields._sigchld._uid);
- tinfo->_sifields._sigchld._status
- = tswap32(info->_sifields._sigchld._status);
- tinfo->_sifields._sigchld._utime
- = tswapal(info->_sifields._sigchld._utime);
- tinfo->_sifields._sigchld._stime
- = tswapal(info->_sifields._sigchld._stime);
- } else if (sig >= TARGET_SIGRTMIN) {
- tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid);
- tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid);
- tinfo->_sifields._rt._sigval.sival_ptr
- = tswapal(info->_sifields._rt._sigval.sival_ptr);
+ int si_type = extract32(info->si_code, 16, 16);
+ int si_code = sextract32(info->si_code, 0, 16);
+
+ __put_user(info->si_signo, &tinfo->si_signo);
+ __put_user(info->si_errno, &tinfo->si_errno);
+ __put_user(si_code, &tinfo->si_code);
+
+ /* We can use our internal marker of which fields in the structure
+ * are valid, rather than duplicating the guesswork of
+ * host_to_target_siginfo_noswap() here.
+ */
+ switch (si_type) {
+ case QEMU_SI_KILL:
+ __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
+ __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
+ break;
+ case QEMU_SI_TIMER:
+ __put_user(info->_sifields._timer._timer1,
+ &tinfo->_sifields._timer._timer1);
+ __put_user(info->_sifields._timer._timer2,
+ &tinfo->_sifields._timer._timer2);
+ break;
+ case QEMU_SI_POLL:
+ __put_user(info->_sifields._sigpoll._band,
+ &tinfo->_sifields._sigpoll._band);
+ __put_user(info->_sifields._sigpoll._fd,
+ &tinfo->_sifields._sigpoll._fd);
+ break;
+ case QEMU_SI_FAULT:
+ __put_user(info->_sifields._sigfault._addr,
+ &tinfo->_sifields._sigfault._addr);
+ break;
+ case QEMU_SI_CHLD:
+ __put_user(info->_sifields._sigchld._pid,
+ &tinfo->_sifields._sigchld._pid);
+ __put_user(info->_sifields._sigchld._uid,
+ &tinfo->_sifields._sigchld._uid);
+ __put_user(info->_sifields._sigchld._status,
+ &tinfo->_sifields._sigchld._status);
+ __put_user(info->_sifields._sigchld._utime,
+ &tinfo->_sifields._sigchld._utime);
+ __put_user(info->_sifields._sigchld._stime,
+ &tinfo->_sifields._sigchld._stime);
+ break;
+ case QEMU_SI_RT:
+ __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
+ __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
+ __put_user(info->_sifields._rt._sigval.sival_ptr,
+ &tinfo->_sifields._rt._sigval.sival_ptr);
+ break;
+ default:
+ g_assert_not_reached();
}
}
-
void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
{
- host_to_target_siginfo_noswap(tinfo, info);
- tswap_siginfo(tinfo, tinfo);
+ target_siginfo_t tgt_tmp;
+ host_to_target_siginfo_noswap(&tgt_tmp, info);
+ tswap_siginfo(tinfo, &tgt_tmp);
}
/* XXX: we support only POSIX RT signals are used. */
/* XXX: find a solution for 64 bit (additional malloced data is needed) */
void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
{
- info->si_signo = tswap32(tinfo->si_signo);
- info->si_errno = tswap32(tinfo->si_errno);
- info->si_code = tswap32(tinfo->si_code);
- info->si_pid = tswap32(tinfo->_sifields._rt._pid);
- info->si_uid = tswap32(tinfo->_sifields._rt._uid);
- info->si_value.sival_ptr =
- (void *)(long)tswapal(tinfo->_sifields._rt._sigval.sival_ptr);
+ /* This conversion is used only for the rt_sigqueueinfo syscall,
+ * and so we know that the _rt fields are the valid ones.
+ */
+ abi_ulong sival_ptr;
+
+ __get_user(info->si_signo, &tinfo->si_signo);
+ __get_user(info->si_errno, &tinfo->si_errno);
+ __get_user(info->si_code, &tinfo->si_code);
+ __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
+ __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
+ __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
+ info->si_value.sival_ptr = (void *)(long)sival_ptr;
}
static int fatal_signal (int sig)
void signal_init(void)
{
+ TaskState *ts = (TaskState *)thread_cpu->opaque;
struct sigaction act;
struct sigaction oact;
int i, j;
target_to_host_signal_table[j] = i;
}
+ /* Set the signal mask from the host mask. */
+ sigprocmask(0, 0, &ts->signal_mask);
+
/* set all host signal handlers. ALL signals are blocked during
the handlers to serialize them. */
memset(sigact_table, 0, sizeof(sigact_table));
}
}
-/* signal queue handling */
-
-static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
+#if !(defined(TARGET_X86_64) || defined(TARGET_UNICORE32))
+/* Force a synchronously taken signal. The kernel force_sig() function
+ * also forces the signal to "not blocked, not ignored", but for QEMU
+ * that work is done in process_pending_signals().
+ */
+static void force_sig(int sig)
{
- CPUState *cpu = ENV_GET_CPU(env);
- TaskState *ts = cpu->opaque;
- struct sigqueue *q = ts->first_free;
- if (!q)
- return NULL;
- ts->first_free = q->next;
- return q;
+ CPUState *cpu = thread_cpu;
+ CPUArchState *env = cpu->env_ptr;
+ target_siginfo_t info;
+
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = TARGET_SI_KERNEL;
+ info._sifields._kill._pid = 0;
+ info._sifields._kill._uid = 0;
+ queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
}
-static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
+/* Force a SIGSEGV if we couldn't write to memory trying to set
+ * up the signal frame. oldsig is the signal we were trying to handle
+ * at the point of failure.
+ */
+static void force_sigsegv(int oldsig)
{
- CPUState *cpu = ENV_GET_CPU(env);
- TaskState *ts = cpu->opaque;
-
- q->next = ts->first_free;
- ts->first_free = q;
+ if (oldsig == SIGSEGV) {
+ /* Make sure we don't try to deliver the signal again; this will
+ * end up with handle_pending_signal() calling dump_core_and_abort().
+ */
+ sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
+ }
+ force_sig(TARGET_SIGSEGV);
}
+#endif
/* abort execution with signal */
-static void QEMU_NORETURN force_sig(int target_sig)
+static void QEMU_NORETURN dump_core_and_abort(int target_sig)
{
CPUState *cpu = thread_cpu;
CPUArchState *env = cpu->env_ptr;
/* queue a signal so that it will be send to the virtual CPU as soon
as possible */
-int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
+int queue_signal(CPUArchState *env, int sig, int si_type,
+ target_siginfo_t *info)
{
CPUState *cpu = ENV_GET_CPU(env);
TaskState *ts = cpu->opaque;
- struct emulated_sigtable *k;
- struct sigqueue *q, **pq;
- abi_ulong handler;
- int queue;
trace_user_queue_signal(env, sig);
- k = &ts->sigtab[sig - 1];
- queue = gdb_queuesig ();
- handler = sigact_table[sig - 1]._sa_handler;
-
- if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
- /* Guest has blocked SIGSEGV but we got one anyway. Assume this
- * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
- * because it got a real MMU fault). A blocked SIGSEGV in that
- * situation is treated as if using the default handler. This is
- * not correct if some other process has randomly sent us a SIGSEGV
- * via kill(), but that is not easy to distinguish at this point,
- * so we assume it doesn't happen.
- */
- handler = TARGET_SIG_DFL;
- }
- if (!queue && handler == TARGET_SIG_DFL) {
- if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
- kill(getpid(),SIGSTOP);
- return 0;
- } else
- /* default handler : ignore some signal. The other are fatal */
- if (sig != TARGET_SIGCHLD &&
- sig != TARGET_SIGURG &&
- sig != TARGET_SIGWINCH &&
- sig != TARGET_SIGCONT) {
- force_sig(sig);
- } else {
- return 0; /* indicate ignored */
- }
- } else if (!queue && handler == TARGET_SIG_IGN) {
- /* ignore signal */
- return 0;
- } else if (!queue && handler == TARGET_SIG_ERR) {
- force_sig(sig);
- } else {
- pq = &k->first;
- if (sig < TARGET_SIGRTMIN) {
- /* if non real time signal, we queue exactly one signal */
- if (!k->pending)
- q = &k->info;
- else
- return 0;
- } else {
- if (!k->pending) {
- /* first signal */
- q = &k->info;
- } else {
- q = alloc_sigqueue(env);
- if (!q)
- return -EAGAIN;
- while (*pq != NULL)
- pq = &(*pq)->next;
- }
- }
- *pq = q;
- q->info = *info;
- q->next = NULL;
- k->pending = 1;
- /* signal that a new signal is pending */
- ts->signal_pending = 1;
- return 1; /* indicates that the signal was queued */
- }
+ info->si_code = deposit32(info->si_code, 16, 16, si_type);
+
+ ts->sync_signal.info = *info;
+ ts->sync_signal.pending = sig;
+ /* signal that a new signal is pending */
+ atomic_set(&ts->signal_pending, 1);
+ return 1; /* indicates that the signal was queued */
}
#ifndef HAVE_SAFE_SYSCALL
void *puc)
{
CPUArchState *env = thread_cpu->env_ptr;
+ CPUState *cpu = ENV_GET_CPU(env);
+ TaskState *ts = cpu->opaque;
+
int sig;
target_siginfo_t tinfo;
+ ucontext_t *uc = puc;
+ struct emulated_sigtable *k;
/* the CPU emulator uses some host signals to detect exceptions,
we forward to it some signals */
rewind_if_in_safe_syscall(puc);
host_to_target_siginfo_noswap(&tinfo, info);
- if (queue_signal(env, sig, &tinfo) == 1) {
- /* interrupt the virtual CPU as soon as possible */
- cpu_exit(thread_cpu);
- }
+ k = &ts->sigtab[sig - 1];
+ k->info = tinfo;
+ k->pending = sig;
+ ts->signal_pending = 1;
+
+ /* Block host signals until target signal handler entered. We
+ * can't block SIGSEGV or SIGBUS while we're executing guest
+ * code in case the guest code provokes one in the window between
+ * now and it getting out to the main loop. Signals will be
+ * unblocked again in process_pending_signals().
+ *
+ * WARNING: we cannot use sigfillset() here because the uc_sigmask
+ * field is a kernel sigset_t, which is much smaller than the
+ * libc sigset_t which sigfillset() operates on. Using sigfillset()
+ * would write 0xff bytes off the end of the structure and trash
+ * data on the struct.
+ * We can't use sizeof(uc->uc_sigmask) either, because the libc
+ * headers define the struct field with the wrong (too large) type.
+ */
+ memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
+ sigdelset(&uc->uc_sigmask, SIGSEGV);
+ sigdelset(&uc->uc_sigmask, SIGBUS);
+
+ /* interrupt the virtual CPU as soon as possible */
+ cpu_exit(thread_cpu);
}
/* do_sigaltstack() returns target values and errnos. */
return ret;
}
-/* do_sigaction() return host values and errnos */
+/* do_sigaction() return target values and host errnos */
int do_sigaction(int sig, const struct target_sigaction *act,
struct target_sigaction *oact)
{
int host_sig;
int ret = 0;
- if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)
- return -EINVAL;
+ if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
+ return -TARGET_EINVAL;
+ }
+
+ if (block_signals()) {
+ return -TARGET_ERESTARTSYS;
+ }
+
k = &sigact_table[sig - 1];
if (oact) {
__put_user(k->_sa_handler, &oact->_sa_handler);
return;
give_sigsegv:
- if (sig == TARGET_SIGSEGV) {
- ka->_sa_handler = TARGET_SIG_DFL;
- }
- force_sig(TARGET_SIGSEGV /* , current */);
+ force_sigsegv(sig);
}
/* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
return;
give_sigsegv:
- if (sig == TARGET_SIGSEGV) {
- ka->_sa_handler = TARGET_SIG_DFL;
- }
- force_sig(TARGET_SIGSEGV /* , current */);
+ force_sigsegv(sig);
}
static int
}
target_to_host_sigset_internal(&set, &target_set);
- do_sigprocmask(SIG_SETMASK, &set, NULL);
+ set_sigmask(&set);
/* restore registers */
if (restore_sigcontext(env, &frame->sc))
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return 0;
+ return -TARGET_QEMU_ESIGRETURN;
}
long do_rt_sigreturn(CPUX86State *env)
if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
goto badframe;
target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
- do_sigprocmask(SIG_SETMASK, &set, NULL);
+ set_sigmask(&set);
if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
goto badframe;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return 0;
+ return -TARGET_QEMU_ESIGRETURN;
}
#elif defined(TARGET_AARCH64)
uint64_t pstate;
target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
- do_sigprocmask(SIG_SETMASK, &set, NULL);
+ set_sigmask(&set);
for (i = 0; i < 31; i++) {
__get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
give_sigsegv:
unlock_user_struct(frame, frame_addr, 1);
- force_sig(TARGET_SIGSEGV);
+ force_sigsegv(usig);
}
static void setup_rt_frame(int sig, struct target_sigaction *ka,
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return 0;
+ return -TARGET_QEMU_ESIGRETURN;
}
long do_sigreturn(CPUARMState *env)
trace_user_setup_frame(regs, frame_addr);
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
- return;
+ goto sigsegv;
}
setup_sigcontext(&frame->sc, regs, set->sig[0]);
frame_addr + offsetof(struct sigframe_v1, retcode));
unlock_user_struct(frame, frame_addr, 1);
+ return;
+sigsegv:
+ force_sigsegv(usig);
}
static void setup_frame_v2(int usig, struct target_sigaction *ka,
trace_user_setup_frame(regs, frame_addr);
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
- return;
+ goto sigsegv;
}
setup_sigframe_v2(&frame->uc, set, regs);
frame_addr + offsetof(struct sigframe_v2, retcode));
unlock_user_struct(frame, frame_addr, 1);
+ return;
+sigsegv:
+ force_sigsegv(usig);
}
static void setup_frame(int usig, struct target_sigaction *ka,
trace_user_setup_rt_frame(env, frame_addr);
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
- return /* 1 */;
+ goto sigsegv;
}
info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
env->regs[2] = uc_addr;
unlock_user_struct(frame, frame_addr, 1);
+ return;
+sigsegv:
+ force_sigsegv(usig);
}
static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
trace_user_setup_rt_frame(env, frame_addr);
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
- return /* 1 */;
+ goto sigsegv;
}
info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
env->regs[2] = uc_addr;
unlock_user_struct(frame, frame_addr, 1);
+ return;
+sigsegv:
+ force_sigsegv(usig);
}
static void setup_rt_frame(int usig, struct target_sigaction *ka,
}
target_to_host_sigset_internal(&host_set, &set);
- do_sigprocmask(SIG_SETMASK, &host_set, NULL);
+ set_sigmask(&host_set);
if (restore_sigcontext(env, &frame->sc)) {
goto badframe;
return -TARGET_QEMU_ESIGRETURN;
badframe:
- force_sig(TARGET_SIGSEGV /* , current */);
- return 0;
+ force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
}
static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
return (abi_ulong*)(iwmmxtframe + 1);
}
-static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
+static int do_sigframe_return_v2(CPUARMState *env,
+ target_ulong context_addr,
struct target_ucontext_v2 *uc)
{
sigset_t host_set;
abi_ulong *regspace;
target_to_host_sigset(&host_set, &uc->tuc_sigmask);
- do_sigprocmask(SIG_SETMASK, &host_set, NULL);
+ set_sigmask(&host_set);
if (restore_sigcontext(env, &uc->tuc_mcontext))
return 1;
}
}
- if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
+ if (do_sigaltstack(context_addr
+ + offsetof(struct target_ucontext_v2, tuc_stack),
+ 0, get_sp_from_cpustate(env)) == -EFAULT) {
return 1;
+ }
#if 0
/* Send SIGTRAP if we're single-stepping */
goto badframe;
}
- if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
+ if (do_sigframe_return_v2(env,
+ frame_addr
+ + offsetof(struct sigframe_v2, uc),
+ &frame->uc)) {
goto badframe;
}
badframe:
unlock_user_struct(frame, frame_addr, 0);
- force_sig(TARGET_SIGSEGV /* , current */);
- return 0;
+ force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
}
long do_sigreturn(CPUARMState *env)
}
target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
- do_sigprocmask(SIG_SETMASK, &host_set, NULL);
+ set_sigmask(&host_set);
if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
goto badframe;
badframe:
unlock_user_struct(frame, frame_addr, 0);
- force_sig(TARGET_SIGSEGV /* , current */);
- return 0;
+ force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
}
static long do_rt_sigreturn_v2(CPUARMState *env)
goto badframe;
}
- if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
+ if (do_sigframe_return_v2(env,
+ frame_addr
+ + offsetof(struct rt_sigframe_v2, uc),
+ &frame->uc)) {
goto badframe;
}
badframe:
unlock_user_struct(frame, frame_addr, 0);
- force_sig(TARGET_SIGSEGV /* , current */);
- return 0;
+ force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
}
long do_rt_sigreturn(CPUARMState *env)
#endif
sigsegv:
unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
- force_sig(TARGET_SIGSEGV);
+ force_sigsegv(sig);
}
static void setup_rt_frame(int sig, struct target_sigaction *ka,
}
target_to_host_sigset_internal(&host_set, &set);
- do_sigprocmask(SIG_SETMASK, &host_set, NULL);
+ set_sigmask(&host_set);
if (err) {
goto segv_and_exit;
segv_and_exit:
unlock_user_struct(sf, sf_addr, 0);
force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
}
long do_rt_sigreturn(CPUSPARCState *env)
}
}
target_to_host_sigset_internal(&set, &target_set);
- do_sigprocmask(SIG_SETMASK, &set, NULL);
+ set_sigmask(&set);
}
env->pc = pc;
env->npc = npc;
env->pc = env->npc;
env->npc += 4;
- err = 0;
-
- do_sigprocmask(0, NULL, &set);
+ /* If we're only reading the signal mask then do_sigprocmask()
+ * is guaranteed not to fail, which is important because we don't
+ * have any way to signal a failure or restart this operation since
+ * this is not a normal syscall.
+ */
+ err = do_sigprocmask(0, NULL, &set);
+ assert(err == 0);
host_to_target_sigset_internal(&target_set, &set);
if (TARGET_NSIG_WORDS == 1) {
__put_user(target_set.sig[0],
return;
give_sigsegv:
- force_sig(TARGET_SIGSEGV/*, current*/);
+ force_sigsegv(sig);
}
long do_sigreturn(CPUMIPSState *regs)
}
target_to_host_sigset_internal(&blocked, &target_set);
- do_sigprocmask(SIG_SETMASK, &blocked, NULL);
+ set_sigmask(&blocked);
restore_sigcontext(regs, &frame->sf_sc);
return -TARGET_QEMU_ESIGRETURN;
badframe:
- force_sig(TARGET_SIGSEGV/*, current*/);
- return 0;
+ force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
}
# endif /* O32 */
give_sigsegv:
unlock_user_struct(frame, frame_addr, 1);
- force_sig(TARGET_SIGSEGV/*, current*/);
+ force_sigsegv(sig);
}
long do_rt_sigreturn(CPUMIPSState *env)
}
target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
- do_sigprocmask(SIG_SETMASK, &blocked, NULL);
+ set_sigmask(&blocked);
restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
return -TARGET_QEMU_ESIGRETURN;
badframe:
- force_sig(TARGET_SIGSEGV/*, current*/);
- return 0;
+ force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
}
#elif defined(TARGET_SH4)
give_sigsegv:
unlock_user_struct(frame, frame_addr, 1);
- force_sig(TARGET_SIGSEGV);
+ force_sigsegv(sig);
}
static void setup_rt_frame(int sig, struct target_sigaction *ka,
give_sigsegv:
unlock_user_struct(frame, frame_addr, 1);
- force_sig(TARGET_SIGSEGV);
+ force_sigsegv(sig);
}
long do_sigreturn(CPUSH4State *regs)
goto badframe;
target_to_host_sigset_internal(&blocked, &target_set);
- do_sigprocmask(SIG_SETMASK, &blocked, NULL);
+ set_sigmask(&blocked);
restore_sigcontext(regs, &frame->sc);
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return 0;
+ return -TARGET_QEMU_ESIGRETURN;
}
long do_rt_sigreturn(CPUSH4State *regs)
}
target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
- do_sigprocmask(SIG_SETMASK, &blocked, NULL);
+ set_sigmask(&blocked);
restore_sigcontext(regs, &frame->uc.tuc_mcontext);
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return 0;
+ return -TARGET_QEMU_ESIGRETURN;
}
#elif defined(TARGET_MICROBLAZE)
unlock_user_struct(frame, frame_addr, 1);
return;
badframe:
- force_sig(TARGET_SIGSEGV);
+ force_sigsegv(sig);
}
static void setup_rt_frame(int sig, struct target_sigaction *ka,
__get_user(target_set.sig[i], &frame->extramask[i - 1]);
}
target_to_host_sigset_internal(&set, &target_set);
- do_sigprocmask(SIG_SETMASK, &set, NULL);
+ set_sigmask(&set);
restore_sigcontext(&frame->uc.tuc_mcontext, env);
/* We got here through a sigreturn syscall, our path back is via an
return -TARGET_QEMU_ESIGRETURN;
badframe:
force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
}
long do_rt_sigreturn(CPUMBState *env)
unlock_user_struct(frame, frame_addr, 1);
return;
badframe:
- force_sig(TARGET_SIGSEGV);
+ force_sigsegv(sig);
}
static void setup_rt_frame(int sig, struct target_sigaction *ka,
__get_user(target_set.sig[i], &frame->extramask[i - 1]);
}
target_to_host_sigset_internal(&set, &target_set);
- do_sigprocmask(SIG_SETMASK, &set, NULL);
+ set_sigmask(&set);
restore_sigcontext(&frame->sc, env);
unlock_user_struct(frame, frame_addr, 0);
return -TARGET_QEMU_ESIGRETURN;
badframe:
force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
}
long do_rt_sigreturn(CPUCRISState *env)
static inline unsigned long align_sigframe(unsigned long sp)
{
- unsigned long i;
- i = sp & ~3UL;
- return i;
+ return sp & ~3UL;
}
static inline abi_ulong get_sigframe(struct target_sigaction *ka,
give_sigsegv:
unlock_user_struct(frame, frame_addr, 1);
- if (sig == TARGET_SIGSEGV) {
- ka->_sa_handler = TARGET_SIG_DFL;
- }
- force_sig(TARGET_SIGSEGV);
+ force_sigsegv(sig);
}
long do_sigreturn(CPUOpenRISCState *env)
env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
/* Place signal number on stack to allow backtrace from handler. */
- __put_user(env->regs[2], (int *) &frame->signo);
+ __put_user(env->regs[2], &frame->signo);
unlock_user_struct(frame, frame_addr, 1);
return;
give_sigsegv:
- force_sig(TARGET_SIGSEGV);
+ force_sigsegv(sig);
}
static void setup_rt_frame(int sig, struct target_sigaction *ka,
return;
give_sigsegv:
- force_sig(TARGET_SIGSEGV);
+ force_sigsegv(sig);
}
static int
__get_user(target_set.sig[0], &frame->sc.oldmask[0]);
target_to_host_sigset_internal(&set, &target_set);
- do_sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */
+ set_sigmask(&set); /* ~_BLOCKABLE? */
if (restore_sigregs(env, &frame->sregs)) {
goto badframe;
badframe:
force_sig(TARGET_SIGSEGV);
- return 0;
+ return -TARGET_QEMU_ESIGRETURN;
}
long do_rt_sigreturn(CPUS390XState *env)
}
target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
- do_sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */
+ set_sigmask(&set); /* ~_BLOCKABLE? */
if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
goto badframe;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return 0;
+ return -TARGET_QEMU_ESIGRETURN;
}
#elif defined(TARGET_PPC)
target_ulong mc_gregs[48];
/* Includes fpscr. */
uint64_t mc_fregs[33];
+#if defined(TARGET_PPC64)
+ /* Pointer to the vector regs */
+ target_ulong v_regs;
+#else
target_ulong mc_pad[2];
+#endif
/* We need to handle Altivec and SPE at the same time, which no
kernel needs to do. Fortunately, the kernel defines this bit to
be Altivec-register-large all the time, rather than trying to
uint32_t spe[33];
/* Altivec vector registers. The packing of VSCR and VRSAVE
varies depending on whether we're PPC64 or not: PPC64 splits
- them apart; PPC32 stuffs them together. */
+ them apart; PPC32 stuffs them together.
+ We also need to account for the VSX registers on PPC64
+ */
#if defined(TARGET_PPC64)
-#define QEMU_NVRREG 34
+#define QEMU_NVRREG (34 + 16)
+ /* On ppc64, this mcontext structure is naturally *unaligned*,
+ * or rather it is aligned on a 8 bytes boundary but not on
+ * a 16 bytes one. This pad fixes it up. This is also why the
+ * vector regs are referenced by the v_regs pointer above so
+ * any amount of padding can be added here
+ */
+ target_ulong pad;
#else
+ /* On ppc32, we are already aligned to 16 bytes */
#define QEMU_NVRREG 33
#endif
- ppc_avr_t altivec[QEMU_NVRREG];
+ /* We cannot use ppc_avr_t here as we do *not* want the implied
+ * 16-bytes alignment that would result from it. This would have
+ * the effect of making the whole struct target_mcontext aligned
+ * which breaks the layout of struct target_ucontext on ppc64.
+ */
+ uint64_t altivec[QEMU_NVRREG][2];
#undef QEMU_NVRREG
- } mc_vregs __attribute__((__aligned__(16)));
+ } mc_vregs;
};
/* See arch/powerpc/include/asm/sigcontext.h. */
CPUPPCState *env,
int frame_size)
{
- target_ulong oldsp, newsp;
+ target_ulong oldsp;
oldsp = env->gpr[1];
+ target_sigaltstack_used.ss_size);
}
- newsp = (oldsp - frame_size) & ~0xFUL;
-
- return newsp;
+ return (oldsp - frame_size) & ~0xFUL;
}
+#if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
+ (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
+#define PPC_VEC_HI 0
+#define PPC_VEC_LO 1
+#else
+#define PPC_VEC_HI 1
+#define PPC_VEC_LO 0
+#endif
+
+
static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
{
target_ulong msr = env->msr;
/* Save Altivec registers if necessary. */
if (env->insns_flags & PPC_ALTIVEC) {
+ uint32_t *vrsave;
for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
ppc_avr_t *avr = &env->avr[i];
- ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
+ ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
- __put_user(avr->u64[0], &vreg->u64[0]);
- __put_user(avr->u64[1], &vreg->u64[1]);
+ __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
+ __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
}
/* Set MSR_VR in the saved MSR value to indicate that
frame->mc_vregs contains valid data. */
msr |= MSR_VR;
- __put_user((uint32_t)env->spr[SPR_VRSAVE],
- &frame->mc_vregs.altivec[32].u32[3]);
+#if defined(TARGET_PPC64)
+ vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
+ /* 64-bit needs to put a pointer to the vectors in the frame */
+ __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
+#else
+ vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
+#endif
+ __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
+ }
+
+ /* Save VSX second halves */
+ if (env->insns_flags2 & PPC2_VSX) {
+ uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
+ for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
+ __put_user(env->vsr[i], &vsregs[i]);
+ }
}
/* Save floating point registers. */
/* Restore Altivec registers if necessary. */
if (env->insns_flags & PPC_ALTIVEC) {
+ ppc_avr_t *v_regs;
+ uint32_t *vrsave;
+#if defined(TARGET_PPC64)
+ uint64_t v_addr;
+ /* 64-bit needs to recover the pointer to the vectors from the frame */
+ __get_user(v_addr, &frame->v_regs);
+ v_regs = g2h(v_addr);
+#else
+ v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
+#endif
for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
ppc_avr_t *avr = &env->avr[i];
- ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
+ ppc_avr_t *vreg = &v_regs[i];
- __get_user(avr->u64[0], &vreg->u64[0]);
- __get_user(avr->u64[1], &vreg->u64[1]);
+ __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
+ __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
}
/* Set MSR_VEC in the saved MSR value to indicate that
frame->mc_vregs contains valid data. */
- __get_user(env->spr[SPR_VRSAVE],
- (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3]));
+#if defined(TARGET_PPC64)
+ vrsave = (uint32_t *)&v_regs[33];
+#else
+ vrsave = (uint32_t *)&v_regs[32];
+#endif
+ __get_user(env->spr[SPR_VRSAVE], vrsave);
+ }
+
+ /* Restore VSX second halves */
+ if (env->insns_flags2 & PPC2_VSX) {
+ uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
+ for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
+ __get_user(env->vsr[i], &vsregs[i]);
+ }
}
/* Restore floating point registers. */
}
}
+#if !defined(TARGET_PPC64)
static void setup_frame(int sig, struct target_sigaction *ka,
target_sigset_t *set, CPUPPCState *env)
{
struct target_sigcontext *sc;
target_ulong frame_addr, newsp;
int err = 0;
-#if defined(TARGET_PPC64)
- struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
-#endif
frame_addr = get_sigframe(ka, env, sizeof(*frame));
trace_user_setup_frame(env, frame_addr);
__put_user(ka->_sa_handler, &sc->handler);
__put_user(set->sig[0], &sc->oldmask);
-#if TARGET_ABI_BITS == 64
- __put_user(set->sig[0] >> 32, &sc->_unused[3]);
-#else
__put_user(set->sig[1], &sc->_unused[3]);
-#endif
__put_user(h2g(&frame->mctx), &sc->regs);
__put_user(sig, &sc->signal);
env->gpr[3] = sig;
env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
-#if defined(TARGET_PPC64)
- if (get_ppc64_abi(image) < 2) {
- /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
- struct target_func_ptr *handler =
- (struct target_func_ptr *)g2h(ka->_sa_handler);
- env->nip = tswapl(handler->entry);
- env->gpr[2] = tswapl(handler->toc);
- } else {
- /* ELFv2 PPC64 function pointers are entry points, but R12
- * must also be set */
- env->nip = tswapl((target_ulong) ka->_sa_handler);
- env->gpr[12] = env->nip;
- }
-#else
env->nip = (target_ulong) ka->_sa_handler;
-#endif
/* Signal handlers are entered in big-endian mode. */
env->msr &= ~(1ull << MSR_LE);
sigsegv:
unlock_user_struct(frame, frame_addr, 1);
- force_sig(TARGET_SIGSEGV);
+ force_sigsegv(sig);
}
+#endif /* !defined(TARGET_PPC64) */
static void setup_rt_frame(int sig, struct target_sigaction *ka,
target_siginfo_t *info,
sigsegv:
unlock_user_struct(rt_sf, rt_sf_addr, 1);
- force_sig(TARGET_SIGSEGV);
+ force_sigsegv(sig);
}
+#if !defined(TARGET_PPC64)
long do_sigreturn(CPUPPCState *env)
{
struct target_sigcontext *sc = NULL;
__get_user(set.sig[1], &sc->_unused[3]);
#endif
target_to_host_sigset_internal(&blocked, &set);
- do_sigprocmask(SIG_SETMASK, &blocked, NULL);
+ set_sigmask(&blocked);
__get_user(sr_addr, &sc->regs);
if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
unlock_user_struct(sr, sr_addr, 1);
unlock_user_struct(sc, sc_addr, 1);
force_sig(TARGET_SIGSEGV);
- return 0;
+ return -TARGET_QEMU_ESIGRETURN;
}
+#endif /* !defined(TARGET_PPC64) */
/* See arch/powerpc/kernel/signal_32.c. */
static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
return 1;
target_to_host_sigset_internal(&blocked, &set);
- do_sigprocmask(SIG_SETMASK, &blocked, NULL);
+ set_sigmask(&blocked);
restore_user_regs(env, mcp, sig);
unlock_user_struct(mcp, mcp_addr, 1);
sigsegv:
unlock_user_struct(rt_sf, rt_sf_addr, 1);
force_sig(TARGET_SIGSEGV);
- return 0;
+ return -TARGET_QEMU_ESIGRETURN;
}
#elif defined(TARGET_M68K)
return;
give_sigsegv:
- force_sig(TARGET_SIGSEGV);
+ force_sigsegv(sig);
}
static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
give_sigsegv:
unlock_user_struct(frame, frame_addr, 1);
- force_sig(TARGET_SIGSEGV);
+ force_sigsegv(sig);
}
long do_sigreturn(CPUM68KState *env)
}
target_to_host_sigset_internal(&set, &target_set);
- do_sigprocmask(SIG_SETMASK, &set, NULL);
+ set_sigmask(&set);
/* restore registers */
badframe:
force_sig(TARGET_SIGSEGV);
- return 0;
+ return -TARGET_QEMU_ESIGRETURN;
}
long do_rt_sigreturn(CPUM68KState *env)
goto badframe;
target_to_host_sigset_internal(&set, &target_set);
- do_sigprocmask(SIG_SETMASK, &set, NULL);
+ set_sigmask(&set);
/* restore registers */
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
- return 0;
+ return -TARGET_QEMU_ESIGRETURN;
}
#elif defined(TARGET_ALPHA)
if (err) {
give_sigsegv:
- if (sig == TARGET_SIGSEGV) {
- ka->_sa_handler = TARGET_SIG_DFL;
- }
- force_sig(TARGET_SIGSEGV);
+ force_sigsegv(sig);
+ return;
}
env->ir[IR_RA] = r26;
if (err) {
give_sigsegv:
- if (sig == TARGET_SIGSEGV) {
- ka->_sa_handler = TARGET_SIG_DFL;
- }
- force_sig(TARGET_SIGSEGV);
+ force_sigsegv(sig);
+ return;
}
env->ir[IR_RA] = r26;
__get_user(target_set.sig[0], &sc->sc_mask);
target_to_host_sigset_internal(&set, &target_set);
- do_sigprocmask(SIG_SETMASK, &set, NULL);
+ set_sigmask(&set);
restore_sigcontext(env, sc);
unlock_user_struct(sc, sc_addr, 0);
badframe:
force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
}
long do_rt_sigreturn(CPUAlphaState *env)
goto badframe;
}
target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
- do_sigprocmask(SIG_SETMASK, &set, NULL);
+ set_sigmask(&set);
restore_sigcontext(env, &frame->uc.tuc_mcontext);
if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
}
#elif defined(TARGET_TILEGX)
return;
give_sigsegv:
- if (sig == TARGET_SIGSEGV) {
- ka->_sa_handler = TARGET_SIG_DFL;
- }
- force_sig(TARGET_SIGSEGV /* , current */);
+ force_sigsegv(sig);
}
long do_rt_sigreturn(CPUTLGState *env)
goto badframe;
}
target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
- do_sigprocmask(SIG_SETMASK, &set, NULL);
+ set_sigmask(&set);
restore_sigcontext(env, &frame->uc.tuc_mcontext);
if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
}
#else
#endif
-void process_pending_signals(CPUArchState *cpu_env)
+static void handle_pending_signal(CPUArchState *cpu_env, int sig,
+ struct emulated_sigtable *k)
{
CPUState *cpu = ENV_GET_CPU(cpu_env);
- int sig;
abi_ulong handler;
- sigset_t set, old_set;
+ sigset_t set;
target_sigset_t target_old_set;
- struct emulated_sigtable *k;
struct target_sigaction *sa;
- struct sigqueue *q;
TaskState *ts = cpu->opaque;
- if (!ts->signal_pending)
- return;
-
- /* FIXME: This is not threadsafe. */
- k = ts->sigtab;
- for(sig = 1; sig <= TARGET_NSIG; sig++) {
- if (k->pending)
- goto handle_signal;
- k++;
- }
- /* if no signal is pending, just return */
- ts->signal_pending = 0;
- return;
-
- handle_signal:
trace_user_handle_signal(cpu_env, sig);
/* dequeue signal */
- q = k->first;
- k->first = q->next;
- if (!k->first)
- k->pending = 0;
+ k->pending = 0;
sig = gdb_handlesig(cpu, sig);
if (!sig) {
handler = sa->_sa_handler;
}
- if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
- /* Guest has blocked SIGSEGV but we got one anyway. Assume this
- * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
- * because it got a real MMU fault), and treat as if default handler.
- */
- handler = TARGET_SIG_DFL;
+ if (do_strace) {
+ print_taken_signal(sig, &k->info);
}
if (handler == TARGET_SIG_DFL) {
sig != TARGET_SIGURG &&
sig != TARGET_SIGWINCH &&
sig != TARGET_SIGCONT) {
- force_sig(sig);
+ dump_core_and_abort(sig);
}
} else if (handler == TARGET_SIG_IGN) {
/* ignore sig */
} else if (handler == TARGET_SIG_ERR) {
- force_sig(sig);
+ dump_core_and_abort(sig);
} else {
/* compute the blocked signals during the handler execution */
+ sigset_t *blocked_set;
+
target_to_host_sigset(&set, &sa->sa_mask);
/* SA_NODEFER indicates that the current signal should not be
blocked during the handler */
if (!(sa->sa_flags & TARGET_SA_NODEFER))
sigaddset(&set, target_to_host_signal(sig));
- /* block signals in the handler using Linux */
- do_sigprocmask(SIG_BLOCK, &set, &old_set);
/* save the previous blocked signal state to restore it at the
end of the signal execution (see do_sigreturn) */
- host_to_target_sigset_internal(&target_old_set, &old_set);
+ host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
+
+ /* block signals in the handler */
+ blocked_set = ts->in_sigsuspend ?
+ &ts->sigsuspend_mask : &ts->signal_mask;
+ sigorset(&ts->signal_mask, blocked_set, &set);
+ ts->in_sigsuspend = 0;
/* if the CPU is in VM86 mode, we restore the 32 bit values */
#if defined(TARGET_I386) && !defined(TARGET_X86_64)
#endif
/* prepare the stack frame of the virtual CPU */
#if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
- || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX)
+ || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
+ || defined(TARGET_PPC64)
/* These targets do not have traditional signals. */
- setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env);
+ setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
#else
if (sa->sa_flags & TARGET_SA_SIGINFO)
- setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env);
+ setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
else
setup_frame(sig, sa, &target_old_set, cpu_env);
#endif
- if (sa->sa_flags & TARGET_SA_RESETHAND)
+ if (sa->sa_flags & TARGET_SA_RESETHAND) {
sa->_sa_handler = TARGET_SIG_DFL;
+ }
}
- if (q != &k->info)
- free_sigqueue(cpu_env, q);
+}
+
+void process_pending_signals(CPUArchState *cpu_env)
+{
+ CPUState *cpu = ENV_GET_CPU(cpu_env);
+ int sig;
+ TaskState *ts = cpu->opaque;
+ sigset_t set;
+ sigset_t *blocked_set;
+
+ while (atomic_read(&ts->signal_pending)) {
+ /* FIXME: This is not threadsafe. */
+ sigfillset(&set);
+ sigprocmask(SIG_SETMASK, &set, 0);
+
+ restart_scan:
+ sig = ts->sync_signal.pending;
+ if (sig) {
+ /* Synchronous signals are forced,
+ * see force_sig_info() and callers in Linux
+ * Note that not all of our queue_signal() calls in QEMU correspond
+ * to force_sig_info() calls in Linux (some are send_sig_info()).
+ * However it seems like a kernel bug to me to allow the process
+ * to block a synchronous signal since it could then just end up
+ * looping round and round indefinitely.
+ */
+ if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
+ || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
+ sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
+ sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
+ }
+
+ handle_pending_signal(cpu_env, sig, &ts->sync_signal);
+ }
+
+ for (sig = 1; sig <= TARGET_NSIG; sig++) {
+ blocked_set = ts->in_sigsuspend ?
+ &ts->sigsuspend_mask : &ts->signal_mask;
+
+ if (ts->sigtab[sig - 1].pending &&
+ (!sigismember(blocked_set,
+ target_to_host_signal_table[sig]))) {
+ handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
+ /* Restart scan from the beginning, as handle_pending_signal
+ * might have resulted in a new synchronous signal (eg SIGSEGV).
+ */
+ goto restart_scan;
+ }
+ }
+
+ /* if no signal is pending, unblock signals and recheck (the act
+ * of unblocking might cause us to take another host signal which
+ * will set signal_pending again).
+ */
+ atomic_set(&ts->signal_pending, 0);
+ ts->in_sigsuspend = 0;
+ set = ts->signal_mask;
+ sigdelset(&set, SIGSEGV);
+ sigdelset(&set, SIGBUS);
+ sigprocmask(SIG_SETMASK, &set, 0);
+ }
+ ts->in_sigsuspend = 0;
}