2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
26 #if !defined(CONFIG_SOFTMMU)
37 #include <sys/ucontext.h>
40 int tb_invalidated_flag;
41 static unsigned long next_tb;
44 //#define DEBUG_SIGNAL
46 #define SAVE_GLOBALS()
47 #define RESTORE_GLOBALS()
49 #if defined(__sparc__) && !defined(HOST_SOLARIS)
51 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
52 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
53 // Work around ugly bugs in glibc that mangle global register contents
55 static volatile void *saved_env;
56 static volatile unsigned long saved_t0, saved_i7;
58 #define SAVE_GLOBALS() do { \
61 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
64 #undef RESTORE_GLOBALS
65 #define RESTORE_GLOBALS() do { \
66 env = (void *)saved_env; \
68 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
71 static int sparc_setjmp(jmp_buf buf)
81 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
83 static void sparc_longjmp(jmp_buf buf, int val)
88 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
92 void cpu_loop_exit(void)
94 /* NOTE: the register at this point must be saved by hand because
95 longjmp restore them */
97 longjmp(env->jmp_env, 1);
100 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
104 /* exit the current TB from a signal handler. The host registers are
105 restored in a state compatible with the CPU emulator
107 void cpu_resume_from_signal(CPUState *env1, void *puc)
109 #if !defined(CONFIG_SOFTMMU)
110 struct ucontext *uc = puc;
115 /* XXX: restore cpu registers saved in host registers */
117 #if !defined(CONFIG_SOFTMMU)
119 /* XXX: use siglongjmp ? */
120 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
123 longjmp(env->jmp_env, 1);
126 static TranslationBlock *tb_find_slow(target_ulong pc,
127 target_ulong cs_base,
130 TranslationBlock *tb, **ptb1;
133 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
138 tb_invalidated_flag = 0;
140 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
142 /* find translated block using physical mappings */
143 phys_pc = get_phys_addr_code(env, pc);
144 phys_page1 = phys_pc & TARGET_PAGE_MASK;
146 h = tb_phys_hash_func(phys_pc);
147 ptb1 = &tb_phys_hash[h];
153 tb->page_addr[0] == phys_page1 &&
154 tb->cs_base == cs_base &&
155 tb->flags == flags) {
156 /* check next page if needed */
157 if (tb->page_addr[1] != -1) {
158 virt_page2 = (pc & TARGET_PAGE_MASK) +
160 phys_page2 = get_phys_addr_code(env, virt_page2);
161 if (tb->page_addr[1] == phys_page2)
167 ptb1 = &tb->phys_hash_next;
170 /* if no translated code available, then translate it now */
173 /* flush must be done */
175 /* cannot fail at this point */
177 /* don't forget to invalidate previous TB info */
178 tb_invalidated_flag = 1;
180 tc_ptr = code_gen_ptr;
182 tb->cs_base = cs_base;
185 cpu_gen_code(env, tb, &code_gen_size);
187 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
189 /* check next page if needed */
190 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
192 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
193 phys_page2 = get_phys_addr_code(env, virt_page2);
195 tb_link_phys(tb, phys_pc, phys_page2);
198 /* we add the TB in the virtual pc hash table */
199 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
200 spin_unlock(&tb_lock);
204 static inline TranslationBlock *tb_find_fast(void)
206 TranslationBlock *tb;
207 target_ulong cs_base, pc;
210 /* we record a subset of the CPU state. It will
211 always be the same before a given translated block
213 #if defined(TARGET_I386)
215 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
216 flags |= env->intercept;
217 cs_base = env->segs[R_CS].base;
218 pc = cs_base + env->eip;
219 #elif defined(TARGET_ARM)
220 flags = env->thumb | (env->vfp.vec_len << 1)
221 | (env->vfp.vec_stride << 4);
222 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
224 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
226 flags |= (env->condexec_bits << 8);
229 #elif defined(TARGET_SPARC)
230 #ifdef TARGET_SPARC64
231 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
232 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
233 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
235 // FPU enable . Supervisor
236 flags = (env->psref << 4) | env->psrs;
240 #elif defined(TARGET_PPC)
244 #elif defined(TARGET_MIPS)
245 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
247 pc = env->PC[env->current_tc];
248 #elif defined(TARGET_M68K)
249 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
250 | (env->sr & SR_S) /* Bit 13 */
251 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
254 #elif defined(TARGET_SH4)
258 #elif defined(TARGET_ALPHA)
262 #elif defined(TARGET_CRIS)
263 flags = env->pregs[PR_CCS] & U_FLAG;
267 #error unsupported CPU
269 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
270 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
271 tb->flags != flags, 0)) {
272 tb = tb_find_slow(pc, cs_base, flags);
273 /* Note: we do it here to avoid a gcc bug on Mac OS X when
274 doing it in tb_find_slow */
275 if (tb_invalidated_flag) {
276 /* as some TB could have been invalidated because
277 of memory exceptions while generating the code, we
278 must recompute the hash index here */
285 /* main execution loop */
287 int cpu_exec(CPUState *env1)
289 #define DECLARE_HOST_REGS 1
290 #include "hostregs_helper.h"
291 #if defined(TARGET_SPARC)
292 #if defined(reg_REGWPTR)
293 uint32_t *saved_regwptr;
296 int ret, interrupt_request;
297 TranslationBlock *tb;
300 if (cpu_halted(env1) == EXCP_HALTED)
303 cpu_single_env = env1;
305 /* first we save global registers */
306 #define SAVE_HOST_REGS 1
307 #include "hostregs_helper.h"
312 #if defined(TARGET_I386)
313 /* put eflags in CPU temporary format */
314 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
315 DF = 1 - (2 * ((env->eflags >> 10) & 1));
316 CC_OP = CC_OP_EFLAGS;
317 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
318 #elif defined(TARGET_SPARC)
319 #if defined(reg_REGWPTR)
320 saved_regwptr = REGWPTR;
322 #elif defined(TARGET_M68K)
323 env->cc_op = CC_OP_FLAGS;
324 env->cc_dest = env->sr & 0xf;
325 env->cc_x = (env->sr >> 4) & 1;
326 #elif defined(TARGET_ALPHA)
327 #elif defined(TARGET_ARM)
328 #elif defined(TARGET_PPC)
329 #elif defined(TARGET_MIPS)
330 #elif defined(TARGET_SH4)
331 #elif defined(TARGET_CRIS)
334 #error unsupported target CPU
336 env->exception_index = -1;
338 /* prepare setjmp context for exception handling */
340 if (setjmp(env->jmp_env) == 0) {
341 env->current_tb = NULL;
342 /* if an exception is pending, we execute it here */
343 if (env->exception_index >= 0) {
344 if (env->exception_index >= EXCP_INTERRUPT) {
345 /* exit request from the cpu execution loop */
346 ret = env->exception_index;
348 } else if (env->user_mode_only) {
349 /* if user mode only, we simulate a fake exception
350 which will be handled outside the cpu execution
352 #if defined(TARGET_I386)
353 do_interrupt_user(env->exception_index,
354 env->exception_is_int,
356 env->exception_next_eip);
357 /* successfully delivered */
358 env->old_exception = -1;
360 ret = env->exception_index;
363 #if defined(TARGET_I386)
364 /* simulate a real cpu exception. On i386, it can
365 trigger new exceptions, but we do not handle
366 double or triple faults yet. */
367 do_interrupt(env->exception_index,
368 env->exception_is_int,
370 env->exception_next_eip, 0);
371 /* successfully delivered */
372 env->old_exception = -1;
373 #elif defined(TARGET_PPC)
375 #elif defined(TARGET_MIPS)
377 #elif defined(TARGET_SPARC)
378 do_interrupt(env->exception_index);
379 #elif defined(TARGET_ARM)
381 #elif defined(TARGET_SH4)
383 #elif defined(TARGET_ALPHA)
385 #elif defined(TARGET_CRIS)
387 #elif defined(TARGET_M68K)
391 env->exception_index = -1;
394 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
396 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
397 ret = kqemu_cpu_exec(env);
398 /* put eflags in CPU temporary format */
399 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
400 DF = 1 - (2 * ((env->eflags >> 10) & 1));
401 CC_OP = CC_OP_EFLAGS;
402 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
405 longjmp(env->jmp_env, 1);
406 } else if (ret == 2) {
407 /* softmmu execution needed */
409 if (env->interrupt_request != 0) {
410 /* hardware interrupt will be executed just after */
412 /* otherwise, we restart */
413 longjmp(env->jmp_env, 1);
419 next_tb = 0; /* force lookup of first TB */
422 interrupt_request = env->interrupt_request;
423 if (__builtin_expect(interrupt_request, 0)
424 #if defined(TARGET_I386)
425 && env->hflags & HF_GIF_MASK
427 && !(env->singlestep_enabled & SSTEP_NOIRQ)) {
428 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
429 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
430 env->exception_index = EXCP_DEBUG;
433 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
434 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
435 if (interrupt_request & CPU_INTERRUPT_HALT) {
436 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
438 env->exception_index = EXCP_HLT;
442 #if defined(TARGET_I386)
443 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
444 !(env->hflags & HF_SMM_MASK)) {
445 svm_check_intercept(SVM_EXIT_SMI);
446 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
449 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
450 !(env->hflags & HF_NMI_MASK)) {
451 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
452 env->hflags |= HF_NMI_MASK;
453 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
455 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
456 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
457 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
459 svm_check_intercept(SVM_EXIT_INTR);
460 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
461 intno = cpu_get_pic_interrupt(env);
462 if (loglevel & CPU_LOG_TB_IN_ASM) {
463 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
465 do_interrupt(intno, 0, 0, 0, 1);
466 /* ensure that no TB jump will be modified as
467 the program flow was changed */
469 #if !defined(CONFIG_USER_ONLY)
470 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
471 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
473 /* FIXME: this should respect TPR */
474 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
475 svm_check_intercept(SVM_EXIT_VINTR);
476 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
477 if (loglevel & CPU_LOG_TB_IN_ASM)
478 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
479 do_interrupt(intno, 0, 0, -1, 1);
480 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
481 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
485 #elif defined(TARGET_PPC)
487 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
491 if (interrupt_request & CPU_INTERRUPT_HARD) {
492 ppc_hw_interrupt(env);
493 if (env->pending_interrupts == 0)
494 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
497 #elif defined(TARGET_MIPS)
498 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
499 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
500 (env->CP0_Status & (1 << CP0St_IE)) &&
501 !(env->CP0_Status & (1 << CP0St_EXL)) &&
502 !(env->CP0_Status & (1 << CP0St_ERL)) &&
503 !(env->hflags & MIPS_HFLAG_DM)) {
505 env->exception_index = EXCP_EXT_INTERRUPT;
510 #elif defined(TARGET_SPARC)
511 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
513 int pil = env->interrupt_index & 15;
514 int type = env->interrupt_index & 0xf0;
516 if (((type == TT_EXTINT) &&
517 (pil == 15 || pil > env->psrpil)) ||
519 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
520 do_interrupt(env->interrupt_index);
521 env->interrupt_index = 0;
522 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
527 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
528 //do_interrupt(0, 0, 0, 0, 0);
529 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
531 #elif defined(TARGET_ARM)
532 if (interrupt_request & CPU_INTERRUPT_FIQ
533 && !(env->uncached_cpsr & CPSR_F)) {
534 env->exception_index = EXCP_FIQ;
538 /* ARMv7-M interrupt return works by loading a magic value
539 into the PC. On real hardware the load causes the
540 return to occur. The qemu implementation performs the
541 jump normally, then does the exception return when the
542 CPU tries to execute code at the magic address.
543 This will cause the magic PC value to be pushed to
544 the stack if an interrupt occured at the wrong time.
545 We avoid this by disabling interrupts when
546 pc contains a magic address. */
547 if (interrupt_request & CPU_INTERRUPT_HARD
548 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
549 || !(env->uncached_cpsr & CPSR_I))) {
550 env->exception_index = EXCP_IRQ;
554 #elif defined(TARGET_SH4)
555 if (interrupt_request & CPU_INTERRUPT_HARD) {
559 #elif defined(TARGET_ALPHA)
560 if (interrupt_request & CPU_INTERRUPT_HARD) {
564 #elif defined(TARGET_CRIS)
565 if (interrupt_request & CPU_INTERRUPT_HARD) {
569 #elif defined(TARGET_M68K)
570 if (interrupt_request & CPU_INTERRUPT_HARD
571 && ((env->sr & SR_I) >> SR_I_SHIFT)
572 < env->pending_level) {
573 /* Real hardware gets the interrupt vector via an
574 IACK cycle at this point. Current emulated
575 hardware doesn't rely on this, so we
576 provide/save the vector when the interrupt is
578 env->exception_index = env->pending_vector;
583 /* Don't use the cached interupt_request value,
584 do_interrupt may have updated the EXITTB flag. */
585 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
586 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
587 /* ensure that no TB jump will be modified as
588 the program flow was changed */
591 if (interrupt_request & CPU_INTERRUPT_EXIT) {
592 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
593 env->exception_index = EXCP_INTERRUPT;
598 if ((loglevel & CPU_LOG_TB_CPU)) {
599 /* restore flags in standard format */
601 #if defined(TARGET_I386)
602 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
603 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
604 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
605 #elif defined(TARGET_ARM)
606 cpu_dump_state(env, logfile, fprintf, 0);
607 #elif defined(TARGET_SPARC)
608 REGWPTR = env->regbase + (env->cwp * 16);
609 env->regwptr = REGWPTR;
610 cpu_dump_state(env, logfile, fprintf, 0);
611 #elif defined(TARGET_PPC)
612 cpu_dump_state(env, logfile, fprintf, 0);
613 #elif defined(TARGET_M68K)
614 cpu_m68k_flush_flags(env, env->cc_op);
615 env->cc_op = CC_OP_FLAGS;
616 env->sr = (env->sr & 0xffe0)
617 | env->cc_dest | (env->cc_x << 4);
618 cpu_dump_state(env, logfile, fprintf, 0);
619 #elif defined(TARGET_MIPS)
620 cpu_dump_state(env, logfile, fprintf, 0);
621 #elif defined(TARGET_SH4)
622 cpu_dump_state(env, logfile, fprintf, 0);
623 #elif defined(TARGET_ALPHA)
624 cpu_dump_state(env, logfile, fprintf, 0);
625 #elif defined(TARGET_CRIS)
626 cpu_dump_state(env, logfile, fprintf, 0);
628 #error unsupported target CPU
634 if ((loglevel & CPU_LOG_EXEC)) {
635 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
636 (long)tb->tc_ptr, tb->pc,
637 lookup_symbol(tb->pc));
641 /* see if we can patch the calling TB. When the TB
642 spans two pages, we cannot safely do a direct
647 (env->kqemu_enabled != 2) &&
649 tb->page_addr[1] == -1) {
651 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
652 spin_unlock(&tb_lock);
656 env->current_tb = tb;
657 /* execute the generated code */
658 next_tb = tcg_qemu_tb_exec(tc_ptr);
659 env->current_tb = NULL;
660 /* reset soft MMU for next block (it can currently
661 only be set by a memory fault) */
662 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
663 if (env->hflags & HF_SOFTMMU_MASK) {
664 env->hflags &= ~HF_SOFTMMU_MASK;
665 /* do not allow linking to another block */
669 #if defined(USE_KQEMU)
670 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
671 if (kqemu_is_ok(env) &&
672 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
683 #if defined(TARGET_I386)
684 /* restore flags in standard format */
685 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
686 #elif defined(TARGET_ARM)
687 /* XXX: Save/restore host fpu exception state?. */
688 #elif defined(TARGET_SPARC)
689 #if defined(reg_REGWPTR)
690 REGWPTR = saved_regwptr;
692 #elif defined(TARGET_PPC)
693 #elif defined(TARGET_M68K)
694 cpu_m68k_flush_flags(env, env->cc_op);
695 env->cc_op = CC_OP_FLAGS;
696 env->sr = (env->sr & 0xffe0)
697 | env->cc_dest | (env->cc_x << 4);
698 #elif defined(TARGET_MIPS)
699 #elif defined(TARGET_SH4)
700 #elif defined(TARGET_ALPHA)
701 #elif defined(TARGET_CRIS)
704 #error unsupported target CPU
707 /* restore global registers */
709 #include "hostregs_helper.h"
711 /* fail safe : never use cpu_single_env outside cpu_exec() */
712 cpu_single_env = NULL;
716 /* must only be called from the generated code as an exception can be
718 void tb_invalidate_page_range(target_ulong start, target_ulong end)
720 /* XXX: cannot enable it yet because it yields to MMU exception
721 where NIP != read address on PowerPC */
723 target_ulong phys_addr;
724 phys_addr = get_phys_addr_code(env, start);
725 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
729 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
731 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
733 CPUX86State *saved_env;
737 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
739 cpu_x86_load_seg_cache(env, seg_reg, selector,
740 (selector << 4), 0xffff, 0);
742 load_seg(seg_reg, selector);
747 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
749 CPUX86State *saved_env;
754 helper_fsave(ptr, data32);
759 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
761 CPUX86State *saved_env;
766 helper_frstor(ptr, data32);
771 #endif /* TARGET_I386 */
773 #if !defined(CONFIG_SOFTMMU)
775 #if defined(TARGET_I386)
777 /* 'pc' is the host PC at which the exception was raised. 'address' is
778 the effective address of the memory exception. 'is_write' is 1 if a
779 write caused the exception and otherwise 0'. 'old_set' is the
780 signal set which should be restored */
781 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
782 int is_write, sigset_t *old_set,
785 TranslationBlock *tb;
789 env = cpu_single_env; /* XXX: find a correct solution for multithread */
790 #if defined(DEBUG_SIGNAL)
791 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
792 pc, address, is_write, *(unsigned long *)old_set);
794 /* XXX: locking issue */
795 if (is_write && page_unprotect(h2g(address), pc, puc)) {
799 /* see if it is an MMU fault */
800 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
802 return 0; /* not an MMU fault */
804 return 1; /* the MMU fault was handled without causing real CPU fault */
805 /* now we have a real cpu fault */
808 /* the PC is inside the translated code. It means that we have
809 a virtual CPU fault */
810 cpu_restore_state(tb, env, pc, puc);
814 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
815 env->eip, env->cr[2], env->error_code);
817 /* we restore the process signal mask as the sigreturn should
818 do it (XXX: use sigsetjmp) */
819 sigprocmask(SIG_SETMASK, old_set, NULL);
820 raise_exception_err(env->exception_index, env->error_code);
822 /* activate soft MMU for this block */
823 env->hflags |= HF_SOFTMMU_MASK;
824 cpu_resume_from_signal(env, puc);
826 /* never comes here */
830 #elif defined(TARGET_ARM)
831 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
832 int is_write, sigset_t *old_set,
835 TranslationBlock *tb;
839 env = cpu_single_env; /* XXX: find a correct solution for multithread */
840 #if defined(DEBUG_SIGNAL)
841 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
842 pc, address, is_write, *(unsigned long *)old_set);
844 /* XXX: locking issue */
845 if (is_write && page_unprotect(h2g(address), pc, puc)) {
848 /* see if it is an MMU fault */
849 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
851 return 0; /* not an MMU fault */
853 return 1; /* the MMU fault was handled without causing real CPU fault */
854 /* now we have a real cpu fault */
857 /* the PC is inside the translated code. It means that we have
858 a virtual CPU fault */
859 cpu_restore_state(tb, env, pc, puc);
861 /* we restore the process signal mask as the sigreturn should
862 do it (XXX: use sigsetjmp) */
863 sigprocmask(SIG_SETMASK, old_set, NULL);
865 /* never comes here */
868 #elif defined(TARGET_SPARC)
869 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
870 int is_write, sigset_t *old_set,
873 TranslationBlock *tb;
877 env = cpu_single_env; /* XXX: find a correct solution for multithread */
878 #if defined(DEBUG_SIGNAL)
879 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
880 pc, address, is_write, *(unsigned long *)old_set);
882 /* XXX: locking issue */
883 if (is_write && page_unprotect(h2g(address), pc, puc)) {
886 /* see if it is an MMU fault */
887 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
889 return 0; /* not an MMU fault */
891 return 1; /* the MMU fault was handled without causing real CPU fault */
892 /* now we have a real cpu fault */
895 /* the PC is inside the translated code. It means that we have
896 a virtual CPU fault */
897 cpu_restore_state(tb, env, pc, puc);
899 /* we restore the process signal mask as the sigreturn should
900 do it (XXX: use sigsetjmp) */
901 sigprocmask(SIG_SETMASK, old_set, NULL);
903 /* never comes here */
906 #elif defined (TARGET_PPC)
907 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
908 int is_write, sigset_t *old_set,
911 TranslationBlock *tb;
915 env = cpu_single_env; /* XXX: find a correct solution for multithread */
916 #if defined(DEBUG_SIGNAL)
917 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
918 pc, address, is_write, *(unsigned long *)old_set);
920 /* XXX: locking issue */
921 if (is_write && page_unprotect(h2g(address), pc, puc)) {
925 /* see if it is an MMU fault */
926 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
928 return 0; /* not an MMU fault */
930 return 1; /* the MMU fault was handled without causing real CPU fault */
932 /* now we have a real cpu fault */
935 /* the PC is inside the translated code. It means that we have
936 a virtual CPU fault */
937 cpu_restore_state(tb, env, pc, puc);
941 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
942 env->nip, env->error_code, tb);
944 /* we restore the process signal mask as the sigreturn should
945 do it (XXX: use sigsetjmp) */
946 sigprocmask(SIG_SETMASK, old_set, NULL);
947 do_raise_exception_err(env->exception_index, env->error_code);
949 /* activate soft MMU for this block */
950 cpu_resume_from_signal(env, puc);
952 /* never comes here */
956 #elif defined(TARGET_M68K)
957 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
958 int is_write, sigset_t *old_set,
961 TranslationBlock *tb;
965 env = cpu_single_env; /* XXX: find a correct solution for multithread */
966 #if defined(DEBUG_SIGNAL)
967 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
968 pc, address, is_write, *(unsigned long *)old_set);
970 /* XXX: locking issue */
971 if (is_write && page_unprotect(address, pc, puc)) {
974 /* see if it is an MMU fault */
975 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
977 return 0; /* not an MMU fault */
979 return 1; /* the MMU fault was handled without causing real CPU fault */
980 /* now we have a real cpu fault */
983 /* the PC is inside the translated code. It means that we have
984 a virtual CPU fault */
985 cpu_restore_state(tb, env, pc, puc);
987 /* we restore the process signal mask as the sigreturn should
988 do it (XXX: use sigsetjmp) */
989 sigprocmask(SIG_SETMASK, old_set, NULL);
991 /* never comes here */
995 #elif defined (TARGET_MIPS)
996 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
997 int is_write, sigset_t *old_set,
1000 TranslationBlock *tb;
1004 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1005 #if defined(DEBUG_SIGNAL)
1006 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1007 pc, address, is_write, *(unsigned long *)old_set);
1009 /* XXX: locking issue */
1010 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1014 /* see if it is an MMU fault */
1015 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1017 return 0; /* not an MMU fault */
1019 return 1; /* the MMU fault was handled without causing real CPU fault */
1021 /* now we have a real cpu fault */
1022 tb = tb_find_pc(pc);
1024 /* the PC is inside the translated code. It means that we have
1025 a virtual CPU fault */
1026 cpu_restore_state(tb, env, pc, puc);
1030 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1031 env->PC, env->error_code, tb);
1033 /* we restore the process signal mask as the sigreturn should
1034 do it (XXX: use sigsetjmp) */
1035 sigprocmask(SIG_SETMASK, old_set, NULL);
1036 do_raise_exception_err(env->exception_index, env->error_code);
1038 /* activate soft MMU for this block */
1039 cpu_resume_from_signal(env, puc);
1041 /* never comes here */
1045 #elif defined (TARGET_SH4)
1046 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1047 int is_write, sigset_t *old_set,
1050 TranslationBlock *tb;
1054 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1055 #if defined(DEBUG_SIGNAL)
1056 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1057 pc, address, is_write, *(unsigned long *)old_set);
1059 /* XXX: locking issue */
1060 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1064 /* see if it is an MMU fault */
1065 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1067 return 0; /* not an MMU fault */
1069 return 1; /* the MMU fault was handled without causing real CPU fault */
1071 /* now we have a real cpu fault */
1072 tb = tb_find_pc(pc);
1074 /* the PC is inside the translated code. It means that we have
1075 a virtual CPU fault */
1076 cpu_restore_state(tb, env, pc, puc);
1079 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1080 env->nip, env->error_code, tb);
1082 /* we restore the process signal mask as the sigreturn should
1083 do it (XXX: use sigsetjmp) */
1084 sigprocmask(SIG_SETMASK, old_set, NULL);
1086 /* never comes here */
1090 #elif defined (TARGET_ALPHA)
1091 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1092 int is_write, sigset_t *old_set,
1095 TranslationBlock *tb;
1099 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1100 #if defined(DEBUG_SIGNAL)
1101 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1102 pc, address, is_write, *(unsigned long *)old_set);
1104 /* XXX: locking issue */
1105 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1109 /* see if it is an MMU fault */
1110 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1112 return 0; /* not an MMU fault */
1114 return 1; /* the MMU fault was handled without causing real CPU fault */
1116 /* now we have a real cpu fault */
1117 tb = tb_find_pc(pc);
1119 /* the PC is inside the translated code. It means that we have
1120 a virtual CPU fault */
1121 cpu_restore_state(tb, env, pc, puc);
1124 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1125 env->nip, env->error_code, tb);
1127 /* we restore the process signal mask as the sigreturn should
1128 do it (XXX: use sigsetjmp) */
1129 sigprocmask(SIG_SETMASK, old_set, NULL);
1131 /* never comes here */
1134 #elif defined (TARGET_CRIS)
1135 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1136 int is_write, sigset_t *old_set,
1139 TranslationBlock *tb;
1143 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1144 #if defined(DEBUG_SIGNAL)
1145 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1146 pc, address, is_write, *(unsigned long *)old_set);
1148 /* XXX: locking issue */
1149 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1153 /* see if it is an MMU fault */
1154 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1156 return 0; /* not an MMU fault */
1158 return 1; /* the MMU fault was handled without causing real CPU fault */
1160 /* now we have a real cpu fault */
1161 tb = tb_find_pc(pc);
1163 /* the PC is inside the translated code. It means that we have
1164 a virtual CPU fault */
1165 cpu_restore_state(tb, env, pc, puc);
1167 /* we restore the process signal mask as the sigreturn should
1168 do it (XXX: use sigsetjmp) */
1169 sigprocmask(SIG_SETMASK, old_set, NULL);
1171 /* never comes here */
1176 #error unsupported target CPU
1179 #if defined(__i386__)
1181 #if defined(__APPLE__)
1182 # include <sys/ucontext.h>
1184 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1185 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1186 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1188 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1189 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1190 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1193 int cpu_signal_handler(int host_signum, void *pinfo,
1196 siginfo_t *info = pinfo;
1197 struct ucontext *uc = puc;
1205 #define REG_TRAPNO TRAPNO
1208 trapno = TRAP_sig(uc);
1209 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1211 (ERROR_sig(uc) >> 1) & 1 : 0,
1212 &uc->uc_sigmask, puc);
1215 #elif defined(__x86_64__)
1217 int cpu_signal_handler(int host_signum, void *pinfo,
1220 siginfo_t *info = pinfo;
1221 struct ucontext *uc = puc;
1224 pc = uc->uc_mcontext.gregs[REG_RIP];
1225 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1226 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1227 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1228 &uc->uc_sigmask, puc);
1231 #elif defined(__powerpc__)
1233 /***********************************************************************
1234 * signal context platform-specific definitions
1238 /* All Registers access - only for local access */
1239 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1240 /* Gpr Registers access */
1241 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1242 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1243 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1244 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1245 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1246 # define LR_sig(context) REG_sig(link, context) /* Link register */
1247 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1248 /* Float Registers access */
1249 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1250 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1251 /* Exception Registers access */
1252 # define DAR_sig(context) REG_sig(dar, context)
1253 # define DSISR_sig(context) REG_sig(dsisr, context)
1254 # define TRAP_sig(context) REG_sig(trap, context)
1258 # include <sys/ucontext.h>
1259 typedef struct ucontext SIGCONTEXT;
1260 /* All Registers access - only for local access */
1261 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1262 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1263 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1264 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1265 /* Gpr Registers access */
1266 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1267 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1268 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1269 # define CTR_sig(context) REG_sig(ctr, context)
1270 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1271 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1272 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1273 /* Float Registers access */
1274 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1275 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1276 /* Exception Registers access */
1277 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1278 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1279 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1280 #endif /* __APPLE__ */
1282 int cpu_signal_handler(int host_signum, void *pinfo,
1285 siginfo_t *info = pinfo;
1286 struct ucontext *uc = puc;
1294 if (DSISR_sig(uc) & 0x00800000)
1297 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1300 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1301 is_write, &uc->uc_sigmask, puc);
1304 #elif defined(__alpha__)
1306 int cpu_signal_handler(int host_signum, void *pinfo,
1309 siginfo_t *info = pinfo;
1310 struct ucontext *uc = puc;
1311 uint32_t *pc = uc->uc_mcontext.sc_pc;
1312 uint32_t insn = *pc;
1315 /* XXX: need kernel patch to get write flag faster */
1316 switch (insn >> 26) {
1331 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1332 is_write, &uc->uc_sigmask, puc);
1334 #elif defined(__sparc__)
1336 int cpu_signal_handler(int host_signum, void *pinfo,
1339 siginfo_t *info = pinfo;
1340 uint32_t *regs = (uint32_t *)(info + 1);
1341 void *sigmask = (regs + 20);
1346 /* XXX: is there a standard glibc define ? */
1348 /* XXX: need kernel patch to get write flag faster */
1350 insn = *(uint32_t *)pc;
1351 if ((insn >> 30) == 3) {
1352 switch((insn >> 19) & 0x3f) {
1364 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1365 is_write, sigmask, NULL);
1368 #elif defined(__arm__)
1370 int cpu_signal_handler(int host_signum, void *pinfo,
1373 siginfo_t *info = pinfo;
1374 struct ucontext *uc = puc;
1378 pc = uc->uc_mcontext.arm_pc;
1379 /* XXX: compute is_write */
1381 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1383 &uc->uc_sigmask, puc);
1386 #elif defined(__mc68000)
1388 int cpu_signal_handler(int host_signum, void *pinfo,
1391 siginfo_t *info = pinfo;
1392 struct ucontext *uc = puc;
1396 pc = uc->uc_mcontext.gregs[16];
1397 /* XXX: compute is_write */
1399 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1401 &uc->uc_sigmask, puc);
1404 #elif defined(__ia64)
1407 /* This ought to be in <bits/siginfo.h>... */
1408 # define __ISR_VALID 1
1411 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1413 siginfo_t *info = pinfo;
1414 struct ucontext *uc = puc;
1418 ip = uc->uc_mcontext.sc_ip;
1419 switch (host_signum) {
1425 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1426 /* ISR.W (write-access) is bit 33: */
1427 is_write = (info->si_isr >> 33) & 1;
1433 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1435 &uc->uc_sigmask, puc);
1438 #elif defined(__s390__)
1440 int cpu_signal_handler(int host_signum, void *pinfo,
1443 siginfo_t *info = pinfo;
1444 struct ucontext *uc = puc;
1448 pc = uc->uc_mcontext.psw.addr;
1449 /* XXX: compute is_write */
1451 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1452 is_write, &uc->uc_sigmask, puc);
1455 #elif defined(__mips__)
1457 int cpu_signal_handler(int host_signum, void *pinfo,
1460 siginfo_t *info = pinfo;
1461 struct ucontext *uc = puc;
1462 greg_t pc = uc->uc_mcontext.pc;
1465 /* XXX: compute is_write */
1467 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1468 is_write, &uc->uc_sigmask, puc);
1471 #elif defined(__hppa__)
1473 int cpu_signal_handler(int host_signum, void *pinfo,
1476 struct siginfo *info = pinfo;
1477 struct ucontext *uc = puc;
1481 pc = uc->uc_mcontext.sc_iaoq[0];
1482 /* FIXME: compute is_write */
1484 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1486 &uc->uc_sigmask, puc);
1491 #error host CPU specific signal handler needed
1495 #endif /* !defined(CONFIG_SOFTMMU) */