#include "exec/cpu_ldst.h"
#include "translate-all.h"
#include "exec/helper-proto.h"
+#include "qemu/atomic128.h"
#undef EAX
#undef ECX
the effective address of the memory exception. 'is_write' is 1 if a
write caused the exception and otherwise 0'. 'old_set' is the
signal set which should be restored */
-static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
+static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
int is_write, sigset_t *old_set)
{
CPUState *cpu = current_cpu;
CPUClass *cc;
int ret;
+ unsigned long address = (unsigned long)info->si_addr;
/* We must handle PC addresses from two different sources:
* a call return address and a signal frame address.
pc, address, is_write, *(unsigned long *)old_set);
#endif
/* XXX: locking issue */
- if (is_write && h2g_valid(address)) {
+ /* Note that it is important that we don't call page_unprotect() unless
+ * this is really a "write to nonwriteable page" fault, because
+ * page_unprotect() assumes that if it is called for an access to
+ * a page that's writeable this means we had two threads racing and
+ * another thread got there first and already made the page writeable;
+ * so we will retry the access. If we were to call page_unprotect()
+ * for some other kind of fault that should really be passed to the
+ * guest, we'd end up in an infinite loop of retrying the faulting
+ * access.
+ */
+ if (is_write && info->si_signo == SIGSEGV && info->si_code == SEGV_ACCERR &&
+ h2g_valid(address)) {
switch (page_unprotect(h2g(address), pc)) {
case 0:
/* Fault not caused by a page marked unwritable to protect
cc = CPU_GET_CLASS(cpu);
/* see if it is an MMU fault */
g_assert(cc->handle_mmu_fault);
- ret = cc->handle_mmu_fault(cpu, address, is_write, MMU_USER_IDX);
+ ret = cc->handle_mmu_fault(cpu, address, 0, is_write, MMU_USER_IDX);
if (ret == 0) {
/* The MMU fault was handled without causing real CPU fault.
}
/* Now we have a real cpu fault. */
- cpu_restore_state(cpu, pc);
+ cpu_restore_state(cpu, pc, true);
sigprocmask(SIG_SETMASK, old_set, NULL);
cpu_loop_exit(cpu);
#endif
pc = EIP_sig(uc);
trapno = TRAP_sig(uc);
- return handle_cpu_signal(pc, (unsigned long)info->si_addr,
- trapno == 0xe ?
- (ERROR_sig(uc) >> 1) & 1 : 0,
+ return handle_cpu_signal(pc, info,
+ trapno == 0xe ? (ERROR_sig(uc) >> 1) & 1 : 0,
&MASK_sig(uc));
}
#endif
pc = PC_sig(uc);
- return handle_cpu_signal(pc, (unsigned long)info->si_addr,
- TRAP_sig(uc) == 0xe ?
- (ERROR_sig(uc) >> 1) & 1 : 0,
+ return handle_cpu_signal(pc, info,
+ TRAP_sig(uc) == 0xe ? (ERROR_sig(uc) >> 1) & 1 : 0,
&MASK_sig(uc));
}
is_write = 1;
}
#endif
- return handle_cpu_signal(pc, (unsigned long)info->si_addr,
- is_write, &uc->uc_sigmask);
+ return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
}
#elif defined(__alpha__)
is_write = 1;
}
- return handle_cpu_signal(pc, (unsigned long)info->si_addr,
- is_write, &uc->uc_sigmask);
+ return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
}
#elif defined(__sparc__)
break;
}
}
- return handle_cpu_signal(pc, (unsigned long)info->si_addr,
- is_write, sigmask);
+ return handle_cpu_signal(pc, info, is_write, sigmask);
}
#elif defined(__arm__)
* later processor; on v5 we will always report this as a read).
*/
is_write = extract32(uc->uc_mcontext.error_code, 11, 1);
- return handle_cpu_signal(pc, (unsigned long)info->si_addr,
- is_write,
- &uc->uc_sigmask);
+ return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
}
#elif defined(__aarch64__)
/* Ignore bits 23 & 24, controlling indexing. */
|| (insn & 0x3a400000) == 0x28000000); /* C3.3.7,14-16 */
- return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
- is_write, &uc->uc_sigmask);
-}
-
-#elif defined(__ia64)
-
-#ifndef __ISR_VALID
- /* This ought to be in <bits/siginfo.h>... */
-# define __ISR_VALID 1
-#endif
-
-int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
-{
- siginfo_t *info = pinfo;
- ucontext_t *uc = puc;
- unsigned long ip;
- int is_write = 0;
-
- ip = uc->uc_mcontext.sc_ip;
- switch (host_signum) {
- case SIGILL:
- case SIGFPE:
- case SIGSEGV:
- case SIGBUS:
- case SIGTRAP:
- if (info->si_code && (info->si_segvflags & __ISR_VALID)) {
- /* ISR.W (write-access) is bit 33: */
- is_write = (info->si_isr >> 33) & 1;
- }
- break;
-
- default:
- break;
- }
- return handle_cpu_signal(ip, (unsigned long)info->si_addr,
- is_write,
- (sigset_t *)&uc->uc_sigmask);
+ return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
}
#elif defined(__s390__)
}
break;
}
- return handle_cpu_signal(pc, (unsigned long)info->si_addr,
- is_write, &uc->uc_sigmask);
+ return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
}
#elif defined(__mips__)
/* XXX: compute is_write */
is_write = 0;
- return handle_cpu_signal(pc, (unsigned long)info->si_addr,
- is_write, &uc->uc_sigmask);
+ return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
+}
+
+#elif defined(__riscv)
+
+int cpu_signal_handler(int host_signum, void *pinfo,
+ void *puc)
+{
+ siginfo_t *info = pinfo;
+ ucontext_t *uc = puc;
+ greg_t pc = uc->uc_mcontext.__gregs[REG_PC];
+ uint32_t insn = *(uint32_t *)pc;
+ int is_write = 0;
+
+ /* Detect store by reading the instruction at the program
+ counter. Note: we currently only generate 32-bit
+ instructions so we thus only detect 32-bit stores */
+ switch (((insn >> 0) & 0b11)) {
+ case 3:
+ switch (((insn >> 2) & 0b11111)) {
+ case 8:
+ switch (((insn >> 12) & 0b111)) {
+ case 0: /* sb */
+ case 1: /* sh */
+ case 2: /* sw */
+ case 3: /* sd */
+ case 4: /* sq */
+ is_write = 1;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 9:
+ switch (((insn >> 12) & 0b111)) {
+ case 2: /* fsw */
+ case 3: /* fsd */
+ case 4: /* fsq */
+ is_write = 1;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Check for compressed instructions */
+ switch (((insn >> 13) & 0b111)) {
+ case 7:
+ switch (insn & 0b11) {
+ case 0: /*c.sd */
+ case 2: /* c.sdsp */
+ is_write = 1;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ switch (insn & 0b11) {
+ case 0: /* c.sw */
+ case 3: /* c.swsp */
+ is_write = 1;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask);
}
#else
}
/* Macro to call the above, with local variables from the use context. */
+#define ATOMIC_MMU_DECLS do {} while (0)
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
#define ATOMIC_MMU_CLEANUP do { helper_retaddr = 0; } while (0)
/* The following is only callable from other helpers, and matches up
with the softmmu version. */
-#ifdef CONFIG_ATOMIC128
+#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
#undef EXTRA_ARGS
#undef ATOMIC_NAME
#define DATA_SIZE 16
#include "atomic_template.h"
-#endif /* CONFIG_ATOMIC128 */
+#endif