2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
26 #if !defined(CONFIG_SOFTMMU)
37 #include <sys/ucontext.h>
40 int tb_invalidated_flag;
41 static unsigned long next_tb;
44 //#define DEBUG_SIGNAL
46 #define SAVE_GLOBALS()
47 #define RESTORE_GLOBALS()
49 #if defined(__sparc__) && !defined(HOST_SOLARIS)
51 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
52 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
53 // Work around ugly bugs in glibc that mangle global register contents
55 static volatile void *saved_env;
56 static volatile unsigned long saved_t0, saved_i7;
58 #define SAVE_GLOBALS() do { \
61 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
64 #undef RESTORE_GLOBALS
65 #define RESTORE_GLOBALS() do { \
66 env = (void *)saved_env; \
68 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
71 static int sparc_setjmp(jmp_buf buf)
81 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
83 static void sparc_longjmp(jmp_buf buf, int val)
88 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
92 void cpu_loop_exit(void)
94 /* NOTE: the register at this point must be saved by hand because
95 longjmp restore them */
97 longjmp(env->jmp_env, 1);
100 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
104 /* exit the current TB from a signal handler. The host registers are
105 restored in a state compatible with the CPU emulator
107 void cpu_resume_from_signal(CPUState *env1, void *puc)
109 #if !defined(CONFIG_SOFTMMU)
110 struct ucontext *uc = puc;
115 /* XXX: restore cpu registers saved in host registers */
117 #if !defined(CONFIG_SOFTMMU)
119 /* XXX: use siglongjmp ? */
120 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
123 longjmp(env->jmp_env, 1);
126 static TranslationBlock *tb_find_slow(target_ulong pc,
127 target_ulong cs_base,
130 TranslationBlock *tb, **ptb1;
133 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
138 tb_invalidated_flag = 0;
140 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
142 /* find translated block using physical mappings */
143 phys_pc = get_phys_addr_code(env, pc);
144 phys_page1 = phys_pc & TARGET_PAGE_MASK;
146 h = tb_phys_hash_func(phys_pc);
147 ptb1 = &tb_phys_hash[h];
153 tb->page_addr[0] == phys_page1 &&
154 tb->cs_base == cs_base &&
155 tb->flags == flags) {
156 /* check next page if needed */
157 if (tb->page_addr[1] != -1) {
158 virt_page2 = (pc & TARGET_PAGE_MASK) +
160 phys_page2 = get_phys_addr_code(env, virt_page2);
161 if (tb->page_addr[1] == phys_page2)
167 ptb1 = &tb->phys_hash_next;
170 /* if no translated code available, then translate it now */
173 /* flush must be done */
175 /* cannot fail at this point */
177 /* don't forget to invalidate previous TB info */
178 tb_invalidated_flag = 1;
180 tc_ptr = code_gen_ptr;
182 tb->cs_base = cs_base;
185 cpu_gen_code(env, tb, &code_gen_size);
187 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
189 /* check next page if needed */
190 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
192 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
193 phys_page2 = get_phys_addr_code(env, virt_page2);
195 tb_link_phys(tb, phys_pc, phys_page2);
198 /* we add the TB in the virtual pc hash table */
199 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
200 spin_unlock(&tb_lock);
204 static inline TranslationBlock *tb_find_fast(void)
206 TranslationBlock *tb;
207 target_ulong cs_base, pc;
210 /* we record a subset of the CPU state. It will
211 always be the same before a given translated block
213 #if defined(TARGET_I386)
215 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
216 flags |= env->intercept;
217 cs_base = env->segs[R_CS].base;
218 pc = cs_base + env->eip;
219 #elif defined(TARGET_ARM)
220 flags = env->thumb | (env->vfp.vec_len << 1)
221 | (env->vfp.vec_stride << 4);
222 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
224 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
226 flags |= (env->condexec_bits << 8);
229 #elif defined(TARGET_SPARC)
230 #ifdef TARGET_SPARC64
231 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
232 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
233 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
235 // FPU enable . Supervisor
236 flags = (env->psref << 4) | env->psrs;
240 #elif defined(TARGET_PPC)
244 #elif defined(TARGET_MIPS)
245 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
247 pc = env->PC[env->current_tc];
248 #elif defined(TARGET_M68K)
249 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
250 | (env->sr & SR_S) /* Bit 13 */
251 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
254 #elif defined(TARGET_SH4)
258 #elif defined(TARGET_ALPHA)
262 #elif defined(TARGET_CRIS)
263 flags = env->pregs[PR_CCS] & U_FLAG;
267 #error unsupported CPU
269 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
270 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
271 tb->flags != flags, 0)) {
272 tb = tb_find_slow(pc, cs_base, flags);
273 /* Note: we do it here to avoid a gcc bug on Mac OS X when
274 doing it in tb_find_slow */
275 if (tb_invalidated_flag) {
276 /* as some TB could have been invalidated because
277 of memory exceptions while generating the code, we
278 must recompute the hash index here */
285 /* main execution loop */
287 int cpu_exec(CPUState *env1)
289 #define DECLARE_HOST_REGS 1
290 #include "hostregs_helper.h"
291 #if defined(TARGET_SPARC)
292 #if defined(reg_REGWPTR)
293 uint32_t *saved_regwptr;
296 int ret, interrupt_request;
297 TranslationBlock *tb;
300 if (cpu_halted(env1) == EXCP_HALTED)
303 cpu_single_env = env1;
305 /* first we save global registers */
306 #define SAVE_HOST_REGS 1
307 #include "hostregs_helper.h"
312 #if defined(TARGET_I386)
313 /* put eflags in CPU temporary format */
314 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
315 DF = 1 - (2 * ((env->eflags >> 10) & 1));
316 CC_OP = CC_OP_EFLAGS;
317 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
318 #elif defined(TARGET_SPARC)
319 #if defined(reg_REGWPTR)
320 saved_regwptr = REGWPTR;
322 #elif defined(TARGET_M68K)
323 env->cc_op = CC_OP_FLAGS;
324 env->cc_dest = env->sr & 0xf;
325 env->cc_x = (env->sr >> 4) & 1;
326 #elif defined(TARGET_ALPHA)
327 #elif defined(TARGET_ARM)
328 #elif defined(TARGET_PPC)
329 #elif defined(TARGET_MIPS)
330 #elif defined(TARGET_SH4)
331 #elif defined(TARGET_CRIS)
334 #error unsupported target CPU
336 env->exception_index = -1;
338 /* prepare setjmp context for exception handling */
340 if (setjmp(env->jmp_env) == 0) {
341 env->current_tb = NULL;
342 /* if an exception is pending, we execute it here */
343 if (env->exception_index >= 0) {
344 if (env->exception_index >= EXCP_INTERRUPT) {
345 /* exit request from the cpu execution loop */
346 ret = env->exception_index;
348 } else if (env->user_mode_only) {
349 /* if user mode only, we simulate a fake exception
350 which will be handled outside the cpu execution
352 #if defined(TARGET_I386)
353 do_interrupt_user(env->exception_index,
354 env->exception_is_int,
356 env->exception_next_eip);
358 ret = env->exception_index;
361 #if defined(TARGET_I386)
362 /* simulate a real cpu exception. On i386, it can
363 trigger new exceptions, but we do not handle
364 double or triple faults yet. */
365 do_interrupt(env->exception_index,
366 env->exception_is_int,
368 env->exception_next_eip, 0);
369 /* successfully delivered */
370 env->old_exception = -1;
371 #elif defined(TARGET_PPC)
373 #elif defined(TARGET_MIPS)
375 #elif defined(TARGET_SPARC)
376 do_interrupt(env->exception_index);
377 #elif defined(TARGET_ARM)
379 #elif defined(TARGET_SH4)
381 #elif defined(TARGET_ALPHA)
383 #elif defined(TARGET_CRIS)
385 #elif defined(TARGET_M68K)
389 env->exception_index = -1;
392 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
394 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
395 ret = kqemu_cpu_exec(env);
396 /* put eflags in CPU temporary format */
397 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
398 DF = 1 - (2 * ((env->eflags >> 10) & 1));
399 CC_OP = CC_OP_EFLAGS;
400 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
403 longjmp(env->jmp_env, 1);
404 } else if (ret == 2) {
405 /* softmmu execution needed */
407 if (env->interrupt_request != 0) {
408 /* hardware interrupt will be executed just after */
410 /* otherwise, we restart */
411 longjmp(env->jmp_env, 1);
417 next_tb = 0; /* force lookup of first TB */
420 interrupt_request = env->interrupt_request;
421 if (__builtin_expect(interrupt_request, 0)
422 #if defined(TARGET_I386)
423 && env->hflags & HF_GIF_MASK
425 && !(env->singlestep_enabled & SSTEP_NOIRQ)) {
426 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
427 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
428 env->exception_index = EXCP_DEBUG;
431 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
432 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
433 if (interrupt_request & CPU_INTERRUPT_HALT) {
434 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
436 env->exception_index = EXCP_HLT;
440 #if defined(TARGET_I386)
441 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
442 !(env->hflags & HF_SMM_MASK)) {
443 svm_check_intercept(SVM_EXIT_SMI);
444 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
447 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
448 !(env->hflags & HF_NMI_MASK)) {
449 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
450 env->hflags |= HF_NMI_MASK;
451 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
453 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
454 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
455 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
457 svm_check_intercept(SVM_EXIT_INTR);
458 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
459 intno = cpu_get_pic_interrupt(env);
460 if (loglevel & CPU_LOG_TB_IN_ASM) {
461 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
463 do_interrupt(intno, 0, 0, 0, 1);
464 /* ensure that no TB jump will be modified as
465 the program flow was changed */
467 #if !defined(CONFIG_USER_ONLY)
468 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
469 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
471 /* FIXME: this should respect TPR */
472 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
473 svm_check_intercept(SVM_EXIT_VINTR);
474 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
475 if (loglevel & CPU_LOG_TB_IN_ASM)
476 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
477 do_interrupt(intno, 0, 0, -1, 1);
478 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
479 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
483 #elif defined(TARGET_PPC)
485 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
489 if (interrupt_request & CPU_INTERRUPT_HARD) {
490 ppc_hw_interrupt(env);
491 if (env->pending_interrupts == 0)
492 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
495 #elif defined(TARGET_MIPS)
496 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
497 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
498 (env->CP0_Status & (1 << CP0St_IE)) &&
499 !(env->CP0_Status & (1 << CP0St_EXL)) &&
500 !(env->CP0_Status & (1 << CP0St_ERL)) &&
501 !(env->hflags & MIPS_HFLAG_DM)) {
503 env->exception_index = EXCP_EXT_INTERRUPT;
508 #elif defined(TARGET_SPARC)
509 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
511 int pil = env->interrupt_index & 15;
512 int type = env->interrupt_index & 0xf0;
514 if (((type == TT_EXTINT) &&
515 (pil == 15 || pil > env->psrpil)) ||
517 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
518 do_interrupt(env->interrupt_index);
519 env->interrupt_index = 0;
520 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
525 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
526 //do_interrupt(0, 0, 0, 0, 0);
527 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
529 #elif defined(TARGET_ARM)
530 if (interrupt_request & CPU_INTERRUPT_FIQ
531 && !(env->uncached_cpsr & CPSR_F)) {
532 env->exception_index = EXCP_FIQ;
536 /* ARMv7-M interrupt return works by loading a magic value
537 into the PC. On real hardware the load causes the
538 return to occur. The qemu implementation performs the
539 jump normally, then does the exception return when the
540 CPU tries to execute code at the magic address.
541 This will cause the magic PC value to be pushed to
542 the stack if an interrupt occured at the wrong time.
543 We avoid this by disabling interrupts when
544 pc contains a magic address. */
545 if (interrupt_request & CPU_INTERRUPT_HARD
546 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
547 || !(env->uncached_cpsr & CPSR_I))) {
548 env->exception_index = EXCP_IRQ;
552 #elif defined(TARGET_SH4)
553 if (interrupt_request & CPU_INTERRUPT_HARD) {
557 #elif defined(TARGET_ALPHA)
558 if (interrupt_request & CPU_INTERRUPT_HARD) {
562 #elif defined(TARGET_CRIS)
563 if (interrupt_request & CPU_INTERRUPT_HARD) {
567 #elif defined(TARGET_M68K)
568 if (interrupt_request & CPU_INTERRUPT_HARD
569 && ((env->sr & SR_I) >> SR_I_SHIFT)
570 < env->pending_level) {
571 /* Real hardware gets the interrupt vector via an
572 IACK cycle at this point. Current emulated
573 hardware doesn't rely on this, so we
574 provide/save the vector when the interrupt is
576 env->exception_index = env->pending_vector;
581 /* Don't use the cached interupt_request value,
582 do_interrupt may have updated the EXITTB flag. */
583 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
584 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
585 /* ensure that no TB jump will be modified as
586 the program flow was changed */
589 if (interrupt_request & CPU_INTERRUPT_EXIT) {
590 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
591 env->exception_index = EXCP_INTERRUPT;
596 if ((loglevel & CPU_LOG_TB_CPU)) {
597 /* restore flags in standard format */
599 #if defined(TARGET_I386)
600 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
601 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
602 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
603 #elif defined(TARGET_ARM)
604 cpu_dump_state(env, logfile, fprintf, 0);
605 #elif defined(TARGET_SPARC)
606 REGWPTR = env->regbase + (env->cwp * 16);
607 env->regwptr = REGWPTR;
608 cpu_dump_state(env, logfile, fprintf, 0);
609 #elif defined(TARGET_PPC)
610 cpu_dump_state(env, logfile, fprintf, 0);
611 #elif defined(TARGET_M68K)
612 cpu_m68k_flush_flags(env, env->cc_op);
613 env->cc_op = CC_OP_FLAGS;
614 env->sr = (env->sr & 0xffe0)
615 | env->cc_dest | (env->cc_x << 4);
616 cpu_dump_state(env, logfile, fprintf, 0);
617 #elif defined(TARGET_MIPS)
618 cpu_dump_state(env, logfile, fprintf, 0);
619 #elif defined(TARGET_SH4)
620 cpu_dump_state(env, logfile, fprintf, 0);
621 #elif defined(TARGET_ALPHA)
622 cpu_dump_state(env, logfile, fprintf, 0);
623 #elif defined(TARGET_CRIS)
624 cpu_dump_state(env, logfile, fprintf, 0);
626 #error unsupported target CPU
632 if ((loglevel & CPU_LOG_EXEC)) {
633 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
634 (long)tb->tc_ptr, tb->pc,
635 lookup_symbol(tb->pc));
639 /* see if we can patch the calling TB. When the TB
640 spans two pages, we cannot safely do a direct
645 (env->kqemu_enabled != 2) &&
647 tb->page_addr[1] == -1) {
649 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
650 spin_unlock(&tb_lock);
654 env->current_tb = tb;
655 /* execute the generated code */
656 next_tb = tcg_qemu_tb_exec(tc_ptr);
657 env->current_tb = NULL;
658 /* reset soft MMU for next block (it can currently
659 only be set by a memory fault) */
660 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
661 if (env->hflags & HF_SOFTMMU_MASK) {
662 env->hflags &= ~HF_SOFTMMU_MASK;
663 /* do not allow linking to another block */
667 #if defined(USE_KQEMU)
668 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
669 if (kqemu_is_ok(env) &&
670 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
681 #if defined(TARGET_I386)
682 /* restore flags in standard format */
683 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
684 #elif defined(TARGET_ARM)
685 /* XXX: Save/restore host fpu exception state?. */
686 #elif defined(TARGET_SPARC)
687 #if defined(reg_REGWPTR)
688 REGWPTR = saved_regwptr;
690 #elif defined(TARGET_PPC)
691 #elif defined(TARGET_M68K)
692 cpu_m68k_flush_flags(env, env->cc_op);
693 env->cc_op = CC_OP_FLAGS;
694 env->sr = (env->sr & 0xffe0)
695 | env->cc_dest | (env->cc_x << 4);
696 #elif defined(TARGET_MIPS)
697 #elif defined(TARGET_SH4)
698 #elif defined(TARGET_ALPHA)
699 #elif defined(TARGET_CRIS)
702 #error unsupported target CPU
705 /* restore global registers */
707 #include "hostregs_helper.h"
709 /* fail safe : never use cpu_single_env outside cpu_exec() */
710 cpu_single_env = NULL;
714 /* must only be called from the generated code as an exception can be
716 void tb_invalidate_page_range(target_ulong start, target_ulong end)
718 /* XXX: cannot enable it yet because it yields to MMU exception
719 where NIP != read address on PowerPC */
721 target_ulong phys_addr;
722 phys_addr = get_phys_addr_code(env, start);
723 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
727 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
729 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
731 CPUX86State *saved_env;
735 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
737 cpu_x86_load_seg_cache(env, seg_reg, selector,
738 (selector << 4), 0xffff, 0);
740 load_seg(seg_reg, selector);
745 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
747 CPUX86State *saved_env;
752 helper_fsave(ptr, data32);
757 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
759 CPUX86State *saved_env;
764 helper_frstor(ptr, data32);
769 #endif /* TARGET_I386 */
771 #if !defined(CONFIG_SOFTMMU)
773 #if defined(TARGET_I386)
775 /* 'pc' is the host PC at which the exception was raised. 'address' is
776 the effective address of the memory exception. 'is_write' is 1 if a
777 write caused the exception and otherwise 0'. 'old_set' is the
778 signal set which should be restored */
779 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
780 int is_write, sigset_t *old_set,
783 TranslationBlock *tb;
787 env = cpu_single_env; /* XXX: find a correct solution for multithread */
788 #if defined(DEBUG_SIGNAL)
789 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
790 pc, address, is_write, *(unsigned long *)old_set);
792 /* XXX: locking issue */
793 if (is_write && page_unprotect(h2g(address), pc, puc)) {
797 /* see if it is an MMU fault */
798 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
800 return 0; /* not an MMU fault */
802 return 1; /* the MMU fault was handled without causing real CPU fault */
803 /* now we have a real cpu fault */
806 /* the PC is inside the translated code. It means that we have
807 a virtual CPU fault */
808 cpu_restore_state(tb, env, pc, puc);
812 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
813 env->eip, env->cr[2], env->error_code);
815 /* we restore the process signal mask as the sigreturn should
816 do it (XXX: use sigsetjmp) */
817 sigprocmask(SIG_SETMASK, old_set, NULL);
818 raise_exception_err(env->exception_index, env->error_code);
820 /* activate soft MMU for this block */
821 env->hflags |= HF_SOFTMMU_MASK;
822 cpu_resume_from_signal(env, puc);
824 /* never comes here */
828 #elif defined(TARGET_ARM)
829 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
830 int is_write, sigset_t *old_set,
833 TranslationBlock *tb;
837 env = cpu_single_env; /* XXX: find a correct solution for multithread */
838 #if defined(DEBUG_SIGNAL)
839 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
840 pc, address, is_write, *(unsigned long *)old_set);
842 /* XXX: locking issue */
843 if (is_write && page_unprotect(h2g(address), pc, puc)) {
846 /* see if it is an MMU fault */
847 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
849 return 0; /* not an MMU fault */
851 return 1; /* the MMU fault was handled without causing real CPU fault */
852 /* now we have a real cpu fault */
855 /* the PC is inside the translated code. It means that we have
856 a virtual CPU fault */
857 cpu_restore_state(tb, env, pc, puc);
859 /* we restore the process signal mask as the sigreturn should
860 do it (XXX: use sigsetjmp) */
861 sigprocmask(SIG_SETMASK, old_set, NULL);
863 /* never comes here */
866 #elif defined(TARGET_SPARC)
867 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
868 int is_write, sigset_t *old_set,
871 TranslationBlock *tb;
875 env = cpu_single_env; /* XXX: find a correct solution for multithread */
876 #if defined(DEBUG_SIGNAL)
877 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
878 pc, address, is_write, *(unsigned long *)old_set);
880 /* XXX: locking issue */
881 if (is_write && page_unprotect(h2g(address), pc, puc)) {
884 /* see if it is an MMU fault */
885 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
887 return 0; /* not an MMU fault */
889 return 1; /* the MMU fault was handled without causing real CPU fault */
890 /* now we have a real cpu fault */
893 /* the PC is inside the translated code. It means that we have
894 a virtual CPU fault */
895 cpu_restore_state(tb, env, pc, puc);
897 /* we restore the process signal mask as the sigreturn should
898 do it (XXX: use sigsetjmp) */
899 sigprocmask(SIG_SETMASK, old_set, NULL);
901 /* never comes here */
904 #elif defined (TARGET_PPC)
905 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
906 int is_write, sigset_t *old_set,
909 TranslationBlock *tb;
913 env = cpu_single_env; /* XXX: find a correct solution for multithread */
914 #if defined(DEBUG_SIGNAL)
915 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
916 pc, address, is_write, *(unsigned long *)old_set);
918 /* XXX: locking issue */
919 if (is_write && page_unprotect(h2g(address), pc, puc)) {
923 /* see if it is an MMU fault */
924 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
926 return 0; /* not an MMU fault */
928 return 1; /* the MMU fault was handled without causing real CPU fault */
930 /* now we have a real cpu fault */
933 /* the PC is inside the translated code. It means that we have
934 a virtual CPU fault */
935 cpu_restore_state(tb, env, pc, puc);
939 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
940 env->nip, env->error_code, tb);
942 /* we restore the process signal mask as the sigreturn should
943 do it (XXX: use sigsetjmp) */
944 sigprocmask(SIG_SETMASK, old_set, NULL);
945 do_raise_exception_err(env->exception_index, env->error_code);
947 /* activate soft MMU for this block */
948 cpu_resume_from_signal(env, puc);
950 /* never comes here */
954 #elif defined(TARGET_M68K)
955 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
956 int is_write, sigset_t *old_set,
959 TranslationBlock *tb;
963 env = cpu_single_env; /* XXX: find a correct solution for multithread */
964 #if defined(DEBUG_SIGNAL)
965 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
966 pc, address, is_write, *(unsigned long *)old_set);
968 /* XXX: locking issue */
969 if (is_write && page_unprotect(address, pc, puc)) {
972 /* see if it is an MMU fault */
973 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
975 return 0; /* not an MMU fault */
977 return 1; /* the MMU fault was handled without causing real CPU fault */
978 /* now we have a real cpu fault */
981 /* the PC is inside the translated code. It means that we have
982 a virtual CPU fault */
983 cpu_restore_state(tb, env, pc, puc);
985 /* we restore the process signal mask as the sigreturn should
986 do it (XXX: use sigsetjmp) */
987 sigprocmask(SIG_SETMASK, old_set, NULL);
989 /* never comes here */
993 #elif defined (TARGET_MIPS)
994 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
995 int is_write, sigset_t *old_set,
998 TranslationBlock *tb;
1002 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1003 #if defined(DEBUG_SIGNAL)
1004 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1005 pc, address, is_write, *(unsigned long *)old_set);
1007 /* XXX: locking issue */
1008 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1012 /* see if it is an MMU fault */
1013 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1015 return 0; /* not an MMU fault */
1017 return 1; /* the MMU fault was handled without causing real CPU fault */
1019 /* now we have a real cpu fault */
1020 tb = tb_find_pc(pc);
1022 /* the PC is inside the translated code. It means that we have
1023 a virtual CPU fault */
1024 cpu_restore_state(tb, env, pc, puc);
1028 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1029 env->PC, env->error_code, tb);
1031 /* we restore the process signal mask as the sigreturn should
1032 do it (XXX: use sigsetjmp) */
1033 sigprocmask(SIG_SETMASK, old_set, NULL);
1034 do_raise_exception_err(env->exception_index, env->error_code);
1036 /* activate soft MMU for this block */
1037 cpu_resume_from_signal(env, puc);
1039 /* never comes here */
1043 #elif defined (TARGET_SH4)
1044 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1045 int is_write, sigset_t *old_set,
1048 TranslationBlock *tb;
1052 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1053 #if defined(DEBUG_SIGNAL)
1054 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1055 pc, address, is_write, *(unsigned long *)old_set);
1057 /* XXX: locking issue */
1058 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1062 /* see if it is an MMU fault */
1063 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1065 return 0; /* not an MMU fault */
1067 return 1; /* the MMU fault was handled without causing real CPU fault */
1069 /* now we have a real cpu fault */
1070 tb = tb_find_pc(pc);
1072 /* the PC is inside the translated code. It means that we have
1073 a virtual CPU fault */
1074 cpu_restore_state(tb, env, pc, puc);
1077 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1078 env->nip, env->error_code, tb);
1080 /* we restore the process signal mask as the sigreturn should
1081 do it (XXX: use sigsetjmp) */
1082 sigprocmask(SIG_SETMASK, old_set, NULL);
1084 /* never comes here */
1088 #elif defined (TARGET_ALPHA)
1089 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1090 int is_write, sigset_t *old_set,
1093 TranslationBlock *tb;
1097 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1098 #if defined(DEBUG_SIGNAL)
1099 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1100 pc, address, is_write, *(unsigned long *)old_set);
1102 /* XXX: locking issue */
1103 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1107 /* see if it is an MMU fault */
1108 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1110 return 0; /* not an MMU fault */
1112 return 1; /* the MMU fault was handled without causing real CPU fault */
1114 /* now we have a real cpu fault */
1115 tb = tb_find_pc(pc);
1117 /* the PC is inside the translated code. It means that we have
1118 a virtual CPU fault */
1119 cpu_restore_state(tb, env, pc, puc);
1122 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1123 env->nip, env->error_code, tb);
1125 /* we restore the process signal mask as the sigreturn should
1126 do it (XXX: use sigsetjmp) */
1127 sigprocmask(SIG_SETMASK, old_set, NULL);
1129 /* never comes here */
1132 #elif defined (TARGET_CRIS)
1133 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1134 int is_write, sigset_t *old_set,
1137 TranslationBlock *tb;
1141 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1142 #if defined(DEBUG_SIGNAL)
1143 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1144 pc, address, is_write, *(unsigned long *)old_set);
1146 /* XXX: locking issue */
1147 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1151 /* see if it is an MMU fault */
1152 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1154 return 0; /* not an MMU fault */
1156 return 1; /* the MMU fault was handled without causing real CPU fault */
1158 /* now we have a real cpu fault */
1159 tb = tb_find_pc(pc);
1161 /* the PC is inside the translated code. It means that we have
1162 a virtual CPU fault */
1163 cpu_restore_state(tb, env, pc, puc);
1165 /* we restore the process signal mask as the sigreturn should
1166 do it (XXX: use sigsetjmp) */
1167 sigprocmask(SIG_SETMASK, old_set, NULL);
1169 /* never comes here */
1174 #error unsupported target CPU
1177 #if defined(__i386__)
1179 #if defined(__APPLE__)
1180 # include <sys/ucontext.h>
1182 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1183 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1184 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1186 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1187 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1188 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1191 int cpu_signal_handler(int host_signum, void *pinfo,
1194 siginfo_t *info = pinfo;
1195 struct ucontext *uc = puc;
1203 #define REG_TRAPNO TRAPNO
1206 trapno = TRAP_sig(uc);
1207 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1209 (ERROR_sig(uc) >> 1) & 1 : 0,
1210 &uc->uc_sigmask, puc);
1213 #elif defined(__x86_64__)
1215 int cpu_signal_handler(int host_signum, void *pinfo,
1218 siginfo_t *info = pinfo;
1219 struct ucontext *uc = puc;
1222 pc = uc->uc_mcontext.gregs[REG_RIP];
1223 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1224 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1225 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1226 &uc->uc_sigmask, puc);
1229 #elif defined(__powerpc__)
1231 /***********************************************************************
1232 * signal context platform-specific definitions
1236 /* All Registers access - only for local access */
1237 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1238 /* Gpr Registers access */
1239 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1240 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1241 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1242 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1243 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1244 # define LR_sig(context) REG_sig(link, context) /* Link register */
1245 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1246 /* Float Registers access */
1247 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1248 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1249 /* Exception Registers access */
1250 # define DAR_sig(context) REG_sig(dar, context)
1251 # define DSISR_sig(context) REG_sig(dsisr, context)
1252 # define TRAP_sig(context) REG_sig(trap, context)
1256 # include <sys/ucontext.h>
1257 typedef struct ucontext SIGCONTEXT;
1258 /* All Registers access - only for local access */
1259 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1260 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1261 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1262 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1263 /* Gpr Registers access */
1264 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1265 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1266 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1267 # define CTR_sig(context) REG_sig(ctr, context)
1268 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1269 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1270 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1271 /* Float Registers access */
1272 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1273 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1274 /* Exception Registers access */
1275 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1276 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1277 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1278 #endif /* __APPLE__ */
1280 int cpu_signal_handler(int host_signum, void *pinfo,
1283 siginfo_t *info = pinfo;
1284 struct ucontext *uc = puc;
1292 if (DSISR_sig(uc) & 0x00800000)
1295 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1298 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1299 is_write, &uc->uc_sigmask, puc);
1302 #elif defined(__alpha__)
1304 int cpu_signal_handler(int host_signum, void *pinfo,
1307 siginfo_t *info = pinfo;
1308 struct ucontext *uc = puc;
1309 uint32_t *pc = uc->uc_mcontext.sc_pc;
1310 uint32_t insn = *pc;
1313 /* XXX: need kernel patch to get write flag faster */
1314 switch (insn >> 26) {
1329 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1330 is_write, &uc->uc_sigmask, puc);
1332 #elif defined(__sparc__)
1334 int cpu_signal_handler(int host_signum, void *pinfo,
1337 siginfo_t *info = pinfo;
1338 uint32_t *regs = (uint32_t *)(info + 1);
1339 void *sigmask = (regs + 20);
1344 /* XXX: is there a standard glibc define ? */
1346 /* XXX: need kernel patch to get write flag faster */
1348 insn = *(uint32_t *)pc;
1349 if ((insn >> 30) == 3) {
1350 switch((insn >> 19) & 0x3f) {
1362 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1363 is_write, sigmask, NULL);
1366 #elif defined(__arm__)
1368 int cpu_signal_handler(int host_signum, void *pinfo,
1371 siginfo_t *info = pinfo;
1372 struct ucontext *uc = puc;
1376 pc = uc->uc_mcontext.arm_pc;
1377 /* XXX: compute is_write */
1379 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1381 &uc->uc_sigmask, puc);
1384 #elif defined(__mc68000)
1386 int cpu_signal_handler(int host_signum, void *pinfo,
1389 siginfo_t *info = pinfo;
1390 struct ucontext *uc = puc;
1394 pc = uc->uc_mcontext.gregs[16];
1395 /* XXX: compute is_write */
1397 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1399 &uc->uc_sigmask, puc);
1402 #elif defined(__ia64)
1405 /* This ought to be in <bits/siginfo.h>... */
1406 # define __ISR_VALID 1
1409 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1411 siginfo_t *info = pinfo;
1412 struct ucontext *uc = puc;
1416 ip = uc->uc_mcontext.sc_ip;
1417 switch (host_signum) {
1423 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1424 /* ISR.W (write-access) is bit 33: */
1425 is_write = (info->si_isr >> 33) & 1;
1431 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1433 &uc->uc_sigmask, puc);
1436 #elif defined(__s390__)
1438 int cpu_signal_handler(int host_signum, void *pinfo,
1441 siginfo_t *info = pinfo;
1442 struct ucontext *uc = puc;
1446 pc = uc->uc_mcontext.psw.addr;
1447 /* XXX: compute is_write */
1449 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1450 is_write, &uc->uc_sigmask, puc);
1453 #elif defined(__mips__)
1455 int cpu_signal_handler(int host_signum, void *pinfo,
1458 siginfo_t *info = pinfo;
1459 struct ucontext *uc = puc;
1460 greg_t pc = uc->uc_mcontext.pc;
1463 /* XXX: compute is_write */
1465 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1466 is_write, &uc->uc_sigmask, puc);
1469 #elif defined(__hppa__)
1471 int cpu_signal_handler(int host_signum, void *pinfo,
1474 struct siginfo *info = pinfo;
1475 struct ucontext *uc = puc;
1479 pc = uc->uc_mcontext.sc_iaoq[0];
1480 /* FIXME: compute is_write */
1482 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1484 &uc->uc_sigmask, puc);
1489 #error host CPU specific signal handler needed
1493 #endif /* !defined(CONFIG_SOFTMMU) */