2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag;
41 //#define DEBUG_SIGNAL
43 void cpu_loop_exit(void)
45 /* NOTE: the register at this point must be saved by hand because
46 longjmp restore them */
48 longjmp(env->jmp_env, 1);
51 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
55 /* exit the current TB from a signal handler. The host registers are
56 restored in a state compatible with the CPU emulator
58 void cpu_resume_from_signal(CPUState *env1, void *puc)
60 #if !defined(CONFIG_SOFTMMU)
61 struct ucontext *uc = puc;
66 /* XXX: restore cpu registers saved in host registers */
68 #if !defined(CONFIG_SOFTMMU)
70 /* XXX: use siglongjmp ? */
71 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
74 longjmp(env->jmp_env, 1);
78 static TranslationBlock *tb_find_slow(target_ulong pc,
82 TranslationBlock *tb, **ptb1;
85 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
90 tb_invalidated_flag = 0;
92 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
94 /* find translated block using physical mappings */
95 phys_pc = get_phys_addr_code(env, pc);
96 phys_page1 = phys_pc & TARGET_PAGE_MASK;
98 h = tb_phys_hash_func(phys_pc);
99 ptb1 = &tb_phys_hash[h];
105 tb->page_addr[0] == phys_page1 &&
106 tb->cs_base == cs_base &&
107 tb->flags == flags) {
108 /* check next page if needed */
109 if (tb->page_addr[1] != -1) {
110 virt_page2 = (pc & TARGET_PAGE_MASK) +
112 phys_page2 = get_phys_addr_code(env, virt_page2);
113 if (tb->page_addr[1] == phys_page2)
119 ptb1 = &tb->phys_hash_next;
122 /* if no translated code available, then translate it now */
125 /* flush must be done */
127 /* cannot fail at this point */
129 /* don't forget to invalidate previous TB info */
130 tb_invalidated_flag = 1;
132 tc_ptr = code_gen_ptr;
134 tb->cs_base = cs_base;
136 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
137 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
139 /* check next page if needed */
140 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
142 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
143 phys_page2 = get_phys_addr_code(env, virt_page2);
145 tb_link_phys(tb, phys_pc, phys_page2);
148 /* we add the TB in the virtual pc hash table */
149 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
150 spin_unlock(&tb_lock);
154 static inline TranslationBlock *tb_find_fast(void)
156 TranslationBlock *tb;
157 target_ulong cs_base, pc;
160 /* we record a subset of the CPU state. It will
161 always be the same before a given translated block
163 #if defined(TARGET_I386)
165 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
166 flags |= env->intercept;
167 cs_base = env->segs[R_CS].base;
168 pc = cs_base + env->eip;
169 #elif defined(TARGET_ARM)
170 flags = env->thumb | (env->vfp.vec_len << 1)
171 | (env->vfp.vec_stride << 4);
172 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
174 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
176 flags |= (env->condexec_bits << 8);
179 #elif defined(TARGET_SPARC)
180 #ifdef TARGET_SPARC64
181 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
182 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
183 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
185 // FPU enable . Supervisor
186 flags = (env->psref << 4) | env->psrs;
190 #elif defined(TARGET_PPC)
194 #elif defined(TARGET_MIPS)
195 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
197 pc = env->PC[env->current_tc];
198 #elif defined(TARGET_M68K)
199 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
200 | (env->sr & SR_S) /* Bit 13 */
201 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
204 #elif defined(TARGET_SH4)
208 #elif defined(TARGET_ALPHA)
212 #elif defined(TARGET_CRIS)
217 #error unsupported CPU
219 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
220 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
221 tb->flags != flags, 0)) {
222 tb = tb_find_slow(pc, cs_base, flags);
223 /* Note: we do it here to avoid a gcc bug on Mac OS X when
224 doing it in tb_find_slow */
225 if (tb_invalidated_flag) {
226 /* as some TB could have been invalidated because
227 of memory exceptions while generating the code, we
228 must recompute the hash index here */
235 #if defined(__sparc__) && !defined(HOST_SOLARIS)
236 #define BREAK_CHAIN tmp_T0 = 0
238 #define BREAK_CHAIN T0 = 0
241 /* main execution loop */
243 int cpu_exec(CPUState *env1)
245 #define DECLARE_HOST_REGS 1
246 #include "hostregs_helper.h"
247 #if defined(TARGET_SPARC)
248 #if defined(reg_REGWPTR)
249 uint32_t *saved_regwptr;
252 #if defined(__sparc__) && !defined(HOST_SOLARIS)
256 int ret, interrupt_request;
257 void (*gen_func)(void);
258 TranslationBlock *tb;
261 if (cpu_halted(env1) == EXCP_HALTED)
264 cpu_single_env = env1;
266 /* first we save global registers */
267 #define SAVE_HOST_REGS 1
268 #include "hostregs_helper.h"
270 #if defined(__sparc__) && !defined(HOST_SOLARIS)
271 /* we also save i7 because longjmp may not restore it */
272 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
276 #if defined(TARGET_I386)
277 /* put eflags in CPU temporary format */
278 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
279 DF = 1 - (2 * ((env->eflags >> 10) & 1));
280 CC_OP = CC_OP_EFLAGS;
281 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
282 #elif defined(TARGET_SPARC)
283 #if defined(reg_REGWPTR)
284 saved_regwptr = REGWPTR;
286 #elif defined(TARGET_M68K)
287 env->cc_op = CC_OP_FLAGS;
288 env->cc_dest = env->sr & 0xf;
289 env->cc_x = (env->sr >> 4) & 1;
290 #elif defined(TARGET_ALPHA)
291 #elif defined(TARGET_ARM)
292 #elif defined(TARGET_PPC)
293 #elif defined(TARGET_MIPS)
294 #elif defined(TARGET_SH4)
295 #elif defined(TARGET_CRIS)
298 #error unsupported target CPU
300 env->exception_index = -1;
302 /* prepare setjmp context for exception handling */
304 if (setjmp(env->jmp_env) == 0) {
305 env->current_tb = NULL;
306 /* if an exception is pending, we execute it here */
307 if (env->exception_index >= 0) {
308 if (env->exception_index >= EXCP_INTERRUPT) {
309 /* exit request from the cpu execution loop */
310 ret = env->exception_index;
312 } else if (env->user_mode_only) {
313 /* if user mode only, we simulate a fake exception
314 which will be handled outside the cpu execution
316 #if defined(TARGET_I386)
317 do_interrupt_user(env->exception_index,
318 env->exception_is_int,
320 env->exception_next_eip);
322 ret = env->exception_index;
325 #if defined(TARGET_I386)
326 /* simulate a real cpu exception. On i386, it can
327 trigger new exceptions, but we do not handle
328 double or triple faults yet. */
329 do_interrupt(env->exception_index,
330 env->exception_is_int,
332 env->exception_next_eip, 0);
333 /* successfully delivered */
334 env->old_exception = -1;
335 #elif defined(TARGET_PPC)
337 #elif defined(TARGET_MIPS)
339 #elif defined(TARGET_SPARC)
340 do_interrupt(env->exception_index);
341 #elif defined(TARGET_ARM)
343 #elif defined(TARGET_SH4)
345 #elif defined(TARGET_ALPHA)
347 #elif defined(TARGET_CRIS)
349 #elif defined(TARGET_M68K)
353 env->exception_index = -1;
356 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
358 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
359 ret = kqemu_cpu_exec(env);
360 /* put eflags in CPU temporary format */
361 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
362 DF = 1 - (2 * ((env->eflags >> 10) & 1));
363 CC_OP = CC_OP_EFLAGS;
364 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
367 longjmp(env->jmp_env, 1);
368 } else if (ret == 2) {
369 /* softmmu execution needed */
371 if (env->interrupt_request != 0) {
372 /* hardware interrupt will be executed just after */
374 /* otherwise, we restart */
375 longjmp(env->jmp_env, 1);
381 T0 = 0; /* force lookup of first TB */
383 #if defined(__sparc__) && !defined(HOST_SOLARIS)
384 /* g1 can be modified by some libc? functions */
387 interrupt_request = env->interrupt_request;
388 if (__builtin_expect(interrupt_request, 0)
389 #if defined(TARGET_I386)
390 && env->hflags & HF_GIF_MASK
393 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
394 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
395 env->exception_index = EXCP_DEBUG;
398 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
399 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
400 if (interrupt_request & CPU_INTERRUPT_HALT) {
401 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
403 env->exception_index = EXCP_HLT;
407 #if defined(TARGET_I386)
408 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
409 !(env->hflags & HF_SMM_MASK)) {
410 svm_check_intercept(SVM_EXIT_SMI);
411 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
414 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
415 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
416 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
418 svm_check_intercept(SVM_EXIT_INTR);
419 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
420 intno = cpu_get_pic_interrupt(env);
421 if (loglevel & CPU_LOG_TB_IN_ASM) {
422 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
424 do_interrupt(intno, 0, 0, 0, 1);
425 /* ensure that no TB jump will be modified as
426 the program flow was changed */
428 #if !defined(CONFIG_USER_ONLY)
429 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
430 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
432 /* FIXME: this should respect TPR */
433 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
434 svm_check_intercept(SVM_EXIT_VINTR);
435 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
436 if (loglevel & CPU_LOG_TB_IN_ASM)
437 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
438 do_interrupt(intno, 0, 0, -1, 1);
439 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
440 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
444 #elif defined(TARGET_PPC)
446 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
450 if (interrupt_request & CPU_INTERRUPT_HARD) {
451 ppc_hw_interrupt(env);
452 if (env->pending_interrupts == 0)
453 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
456 #elif defined(TARGET_MIPS)
457 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
458 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
459 (env->CP0_Status & (1 << CP0St_IE)) &&
460 !(env->CP0_Status & (1 << CP0St_EXL)) &&
461 !(env->CP0_Status & (1 << CP0St_ERL)) &&
462 !(env->hflags & MIPS_HFLAG_DM)) {
464 env->exception_index = EXCP_EXT_INTERRUPT;
469 #elif defined(TARGET_SPARC)
470 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
472 int pil = env->interrupt_index & 15;
473 int type = env->interrupt_index & 0xf0;
475 if (((type == TT_EXTINT) &&
476 (pil == 15 || pil > env->psrpil)) ||
478 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
479 do_interrupt(env->interrupt_index);
480 env->interrupt_index = 0;
481 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
486 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
487 //do_interrupt(0, 0, 0, 0, 0);
488 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
490 #elif defined(TARGET_ARM)
491 if (interrupt_request & CPU_INTERRUPT_FIQ
492 && !(env->uncached_cpsr & CPSR_F)) {
493 env->exception_index = EXCP_FIQ;
497 /* ARMv7-M interrupt return works by loading a magic value
498 into the PC. On real hardware the load causes the
499 return to occur. The qemu implementation performs the
500 jump normally, then does the exception return when the
501 CPU tries to execute code at the magic address.
502 This will cause the magic PC value to be pushed to
503 the stack if an interrupt occured at the wrong time.
504 We avoid this by disabling interrupts when
505 pc contains a magic address. */
506 if (interrupt_request & CPU_INTERRUPT_HARD
507 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
508 || !(env->uncached_cpsr & CPSR_I))) {
509 env->exception_index = EXCP_IRQ;
513 #elif defined(TARGET_SH4)
515 #elif defined(TARGET_ALPHA)
516 if (interrupt_request & CPU_INTERRUPT_HARD) {
520 #elif defined(TARGET_CRIS)
521 if (interrupt_request & CPU_INTERRUPT_HARD) {
523 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
526 #elif defined(TARGET_M68K)
527 if (interrupt_request & CPU_INTERRUPT_HARD
528 && ((env->sr & SR_I) >> SR_I_SHIFT)
529 < env->pending_level) {
530 /* Real hardware gets the interrupt vector via an
531 IACK cycle at this point. Current emulated
532 hardware doesn't rely on this, so we
533 provide/save the vector when the interrupt is
535 env->exception_index = env->pending_vector;
540 /* Don't use the cached interupt_request value,
541 do_interrupt may have updated the EXITTB flag. */
542 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
543 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
544 /* ensure that no TB jump will be modified as
545 the program flow was changed */
548 if (interrupt_request & CPU_INTERRUPT_EXIT) {
549 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
550 env->exception_index = EXCP_INTERRUPT;
555 if ((loglevel & CPU_LOG_TB_CPU)) {
556 /* restore flags in standard format */
558 #if defined(TARGET_I386)
559 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
560 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
561 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
562 #elif defined(TARGET_ARM)
563 cpu_dump_state(env, logfile, fprintf, 0);
564 #elif defined(TARGET_SPARC)
565 REGWPTR = env->regbase + (env->cwp * 16);
566 env->regwptr = REGWPTR;
567 cpu_dump_state(env, logfile, fprintf, 0);
568 #elif defined(TARGET_PPC)
569 cpu_dump_state(env, logfile, fprintf, 0);
570 #elif defined(TARGET_M68K)
571 cpu_m68k_flush_flags(env, env->cc_op);
572 env->cc_op = CC_OP_FLAGS;
573 env->sr = (env->sr & 0xffe0)
574 | env->cc_dest | (env->cc_x << 4);
575 cpu_dump_state(env, logfile, fprintf, 0);
576 #elif defined(TARGET_MIPS)
577 cpu_dump_state(env, logfile, fprintf, 0);
578 #elif defined(TARGET_SH4)
579 cpu_dump_state(env, logfile, fprintf, 0);
580 #elif defined(TARGET_ALPHA)
581 cpu_dump_state(env, logfile, fprintf, 0);
582 #elif defined(TARGET_CRIS)
583 cpu_dump_state(env, logfile, fprintf, 0);
585 #error unsupported target CPU
591 if ((loglevel & CPU_LOG_EXEC)) {
592 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
593 (long)tb->tc_ptr, tb->pc,
594 lookup_symbol(tb->pc));
597 #if defined(__sparc__) && !defined(HOST_SOLARIS)
600 /* see if we can patch the calling TB. When the TB
601 spans two pages, we cannot safely do a direct
606 (env->kqemu_enabled != 2) &&
608 tb->page_addr[1] == -1) {
610 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
611 spin_unlock(&tb_lock);
615 env->current_tb = tb;
616 /* execute the generated code */
617 gen_func = (void *)tc_ptr;
618 #if defined(__sparc__)
619 __asm__ __volatile__("call %0\n\t"
623 : "i0", "i1", "i2", "i3", "i4", "i5",
624 "o0", "o1", "o2", "o3", "o4", "o5",
625 "l0", "l1", "l2", "l3", "l4", "l5",
627 #elif defined(__arm__)
628 asm volatile ("mov pc, %0\n\t"
629 ".global exec_loop\n\t"
633 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
634 #elif defined(__ia64)
641 fp.gp = code_gen_buffer + 2 * (1 << 20);
642 (*(void (*)(void)) &fp)();
646 env->current_tb = NULL;
647 /* reset soft MMU for next block (it can currently
648 only be set by a memory fault) */
649 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
650 if (env->hflags & HF_SOFTMMU_MASK) {
651 env->hflags &= ~HF_SOFTMMU_MASK;
652 /* do not allow linking to another block */
656 #if defined(USE_KQEMU)
657 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
658 if (kqemu_is_ok(env) &&
659 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
670 #if defined(TARGET_I386)
671 /* restore flags in standard format */
672 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
673 #elif defined(TARGET_ARM)
674 /* XXX: Save/restore host fpu exception state?. */
675 #elif defined(TARGET_SPARC)
676 #if defined(reg_REGWPTR)
677 REGWPTR = saved_regwptr;
679 #elif defined(TARGET_PPC)
680 #elif defined(TARGET_M68K)
681 cpu_m68k_flush_flags(env, env->cc_op);
682 env->cc_op = CC_OP_FLAGS;
683 env->sr = (env->sr & 0xffe0)
684 | env->cc_dest | (env->cc_x << 4);
685 #elif defined(TARGET_MIPS)
686 #elif defined(TARGET_SH4)
687 #elif defined(TARGET_ALPHA)
688 #elif defined(TARGET_CRIS)
691 #error unsupported target CPU
694 /* restore global registers */
695 #if defined(__sparc__) && !defined(HOST_SOLARIS)
696 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
698 #include "hostregs_helper.h"
700 /* fail safe : never use cpu_single_env outside cpu_exec() */
701 cpu_single_env = NULL;
705 /* must only be called from the generated code as an exception can be
707 void tb_invalidate_page_range(target_ulong start, target_ulong end)
709 /* XXX: cannot enable it yet because it yields to MMU exception
710 where NIP != read address on PowerPC */
712 target_ulong phys_addr;
713 phys_addr = get_phys_addr_code(env, start);
714 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
718 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
720 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
722 CPUX86State *saved_env;
726 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
728 cpu_x86_load_seg_cache(env, seg_reg, selector,
729 (selector << 4), 0xffff, 0);
731 load_seg(seg_reg, selector);
736 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
738 CPUX86State *saved_env;
743 helper_fsave(ptr, data32);
748 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
750 CPUX86State *saved_env;
755 helper_frstor(ptr, data32);
760 #endif /* TARGET_I386 */
762 #if !defined(CONFIG_SOFTMMU)
764 #if defined(TARGET_I386)
766 /* 'pc' is the host PC at which the exception was raised. 'address' is
767 the effective address of the memory exception. 'is_write' is 1 if a
768 write caused the exception and otherwise 0'. 'old_set' is the
769 signal set which should be restored */
770 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
771 int is_write, sigset_t *old_set,
774 TranslationBlock *tb;
778 env = cpu_single_env; /* XXX: find a correct solution for multithread */
779 #if defined(DEBUG_SIGNAL)
780 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
781 pc, address, is_write, *(unsigned long *)old_set);
783 /* XXX: locking issue */
784 if (is_write && page_unprotect(h2g(address), pc, puc)) {
788 /* see if it is an MMU fault */
789 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
791 return 0; /* not an MMU fault */
793 return 1; /* the MMU fault was handled without causing real CPU fault */
794 /* now we have a real cpu fault */
797 /* the PC is inside the translated code. It means that we have
798 a virtual CPU fault */
799 cpu_restore_state(tb, env, pc, puc);
803 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
804 env->eip, env->cr[2], env->error_code);
806 /* we restore the process signal mask as the sigreturn should
807 do it (XXX: use sigsetjmp) */
808 sigprocmask(SIG_SETMASK, old_set, NULL);
809 raise_exception_err(env->exception_index, env->error_code);
811 /* activate soft MMU for this block */
812 env->hflags |= HF_SOFTMMU_MASK;
813 cpu_resume_from_signal(env, puc);
815 /* never comes here */
819 #elif defined(TARGET_ARM)
820 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
821 int is_write, sigset_t *old_set,
824 TranslationBlock *tb;
828 env = cpu_single_env; /* XXX: find a correct solution for multithread */
829 #if defined(DEBUG_SIGNAL)
830 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
831 pc, address, is_write, *(unsigned long *)old_set);
833 /* XXX: locking issue */
834 if (is_write && page_unprotect(h2g(address), pc, puc)) {
837 /* see if it is an MMU fault */
838 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
840 return 0; /* not an MMU fault */
842 return 1; /* the MMU fault was handled without causing real CPU fault */
843 /* now we have a real cpu fault */
846 /* the PC is inside the translated code. It means that we have
847 a virtual CPU fault */
848 cpu_restore_state(tb, env, pc, puc);
850 /* we restore the process signal mask as the sigreturn should
851 do it (XXX: use sigsetjmp) */
852 sigprocmask(SIG_SETMASK, old_set, NULL);
855 #elif defined(TARGET_SPARC)
856 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
857 int is_write, sigset_t *old_set,
860 TranslationBlock *tb;
864 env = cpu_single_env; /* XXX: find a correct solution for multithread */
865 #if defined(DEBUG_SIGNAL)
866 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
867 pc, address, is_write, *(unsigned long *)old_set);
869 /* XXX: locking issue */
870 if (is_write && page_unprotect(h2g(address), pc, puc)) {
873 /* see if it is an MMU fault */
874 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
876 return 0; /* not an MMU fault */
878 return 1; /* the MMU fault was handled without causing real CPU fault */
879 /* now we have a real cpu fault */
882 /* the PC is inside the translated code. It means that we have
883 a virtual CPU fault */
884 cpu_restore_state(tb, env, pc, puc);
886 /* we restore the process signal mask as the sigreturn should
887 do it (XXX: use sigsetjmp) */
888 sigprocmask(SIG_SETMASK, old_set, NULL);
891 #elif defined (TARGET_PPC)
892 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
893 int is_write, sigset_t *old_set,
896 TranslationBlock *tb;
900 env = cpu_single_env; /* XXX: find a correct solution for multithread */
901 #if defined(DEBUG_SIGNAL)
902 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
903 pc, address, is_write, *(unsigned long *)old_set);
905 /* XXX: locking issue */
906 if (is_write && page_unprotect(h2g(address), pc, puc)) {
910 /* see if it is an MMU fault */
911 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
913 return 0; /* not an MMU fault */
915 return 1; /* the MMU fault was handled without causing real CPU fault */
917 /* now we have a real cpu fault */
920 /* the PC is inside the translated code. It means that we have
921 a virtual CPU fault */
922 cpu_restore_state(tb, env, pc, puc);
926 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
927 env->nip, env->error_code, tb);
929 /* we restore the process signal mask as the sigreturn should
930 do it (XXX: use sigsetjmp) */
931 sigprocmask(SIG_SETMASK, old_set, NULL);
932 do_raise_exception_err(env->exception_index, env->error_code);
934 /* activate soft MMU for this block */
935 cpu_resume_from_signal(env, puc);
937 /* never comes here */
941 #elif defined(TARGET_M68K)
942 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
943 int is_write, sigset_t *old_set,
946 TranslationBlock *tb;
950 env = cpu_single_env; /* XXX: find a correct solution for multithread */
951 #if defined(DEBUG_SIGNAL)
952 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
953 pc, address, is_write, *(unsigned long *)old_set);
955 /* XXX: locking issue */
956 if (is_write && page_unprotect(address, pc, puc)) {
959 /* see if it is an MMU fault */
960 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
962 return 0; /* not an MMU fault */
964 return 1; /* the MMU fault was handled without causing real CPU fault */
965 /* now we have a real cpu fault */
968 /* the PC is inside the translated code. It means that we have
969 a virtual CPU fault */
970 cpu_restore_state(tb, env, pc, puc);
972 /* we restore the process signal mask as the sigreturn should
973 do it (XXX: use sigsetjmp) */
974 sigprocmask(SIG_SETMASK, old_set, NULL);
976 /* never comes here */
980 #elif defined (TARGET_MIPS)
981 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
982 int is_write, sigset_t *old_set,
985 TranslationBlock *tb;
989 env = cpu_single_env; /* XXX: find a correct solution for multithread */
990 #if defined(DEBUG_SIGNAL)
991 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
992 pc, address, is_write, *(unsigned long *)old_set);
994 /* XXX: locking issue */
995 if (is_write && page_unprotect(h2g(address), pc, puc)) {
999 /* see if it is an MMU fault */
1000 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1002 return 0; /* not an MMU fault */
1004 return 1; /* the MMU fault was handled without causing real CPU fault */
1006 /* now we have a real cpu fault */
1007 tb = tb_find_pc(pc);
1009 /* the PC is inside the translated code. It means that we have
1010 a virtual CPU fault */
1011 cpu_restore_state(tb, env, pc, puc);
1015 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1016 env->PC, env->error_code, tb);
1018 /* we restore the process signal mask as the sigreturn should
1019 do it (XXX: use sigsetjmp) */
1020 sigprocmask(SIG_SETMASK, old_set, NULL);
1021 do_raise_exception_err(env->exception_index, env->error_code);
1023 /* activate soft MMU for this block */
1024 cpu_resume_from_signal(env, puc);
1026 /* never comes here */
1030 #elif defined (TARGET_SH4)
1031 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1032 int is_write, sigset_t *old_set,
1035 TranslationBlock *tb;
1039 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1040 #if defined(DEBUG_SIGNAL)
1041 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1042 pc, address, is_write, *(unsigned long *)old_set);
1044 /* XXX: locking issue */
1045 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1049 /* see if it is an MMU fault */
1050 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1052 return 0; /* not an MMU fault */
1054 return 1; /* the MMU fault was handled without causing real CPU fault */
1056 /* now we have a real cpu fault */
1057 tb = tb_find_pc(pc);
1059 /* the PC is inside the translated code. It means that we have
1060 a virtual CPU fault */
1061 cpu_restore_state(tb, env, pc, puc);
1064 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1065 env->nip, env->error_code, tb);
1067 /* we restore the process signal mask as the sigreturn should
1068 do it (XXX: use sigsetjmp) */
1069 sigprocmask(SIG_SETMASK, old_set, NULL);
1071 /* never comes here */
1075 #elif defined (TARGET_ALPHA)
1076 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1077 int is_write, sigset_t *old_set,
1080 TranslationBlock *tb;
1084 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1085 #if defined(DEBUG_SIGNAL)
1086 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1087 pc, address, is_write, *(unsigned long *)old_set);
1089 /* XXX: locking issue */
1090 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1094 /* see if it is an MMU fault */
1095 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1097 return 0; /* not an MMU fault */
1099 return 1; /* the MMU fault was handled without causing real CPU fault */
1101 /* now we have a real cpu fault */
1102 tb = tb_find_pc(pc);
1104 /* the PC is inside the translated code. It means that we have
1105 a virtual CPU fault */
1106 cpu_restore_state(tb, env, pc, puc);
1109 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1110 env->nip, env->error_code, tb);
1112 /* we restore the process signal mask as the sigreturn should
1113 do it (XXX: use sigsetjmp) */
1114 sigprocmask(SIG_SETMASK, old_set, NULL);
1116 /* never comes here */
1119 #elif defined (TARGET_CRIS)
1120 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1121 int is_write, sigset_t *old_set,
1124 TranslationBlock *tb;
1128 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1129 #if defined(DEBUG_SIGNAL)
1130 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1131 pc, address, is_write, *(unsigned long *)old_set);
1133 /* XXX: locking issue */
1134 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1138 /* see if it is an MMU fault */
1139 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1141 return 0; /* not an MMU fault */
1143 return 1; /* the MMU fault was handled without causing real CPU fault */
1145 /* now we have a real cpu fault */
1146 tb = tb_find_pc(pc);
1148 /* the PC is inside the translated code. It means that we have
1149 a virtual CPU fault */
1150 cpu_restore_state(tb, env, pc, puc);
1153 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1154 env->nip, env->error_code, tb);
1156 /* we restore the process signal mask as the sigreturn should
1157 do it (XXX: use sigsetjmp) */
1158 sigprocmask(SIG_SETMASK, old_set, NULL);
1160 /* never comes here */
1165 #error unsupported target CPU
1168 #if defined(__i386__)
1170 #if defined(__APPLE__)
1171 # include <sys/ucontext.h>
1173 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1174 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1175 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1177 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1178 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1179 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1182 int cpu_signal_handler(int host_signum, void *pinfo,
1185 siginfo_t *info = pinfo;
1186 struct ucontext *uc = puc;
1194 #define REG_TRAPNO TRAPNO
1197 trapno = TRAP_sig(uc);
1198 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1200 (ERROR_sig(uc) >> 1) & 1 : 0,
1201 &uc->uc_sigmask, puc);
1204 #elif defined(__x86_64__)
1206 int cpu_signal_handler(int host_signum, void *pinfo,
1209 siginfo_t *info = pinfo;
1210 struct ucontext *uc = puc;
1213 pc = uc->uc_mcontext.gregs[REG_RIP];
1214 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1215 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1216 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1217 &uc->uc_sigmask, puc);
1220 #elif defined(__powerpc__)
1222 /***********************************************************************
1223 * signal context platform-specific definitions
1227 /* All Registers access - only for local access */
1228 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1229 /* Gpr Registers access */
1230 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1231 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1232 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1233 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1234 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1235 # define LR_sig(context) REG_sig(link, context) /* Link register */
1236 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1237 /* Float Registers access */
1238 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1239 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1240 /* Exception Registers access */
1241 # define DAR_sig(context) REG_sig(dar, context)
1242 # define DSISR_sig(context) REG_sig(dsisr, context)
1243 # define TRAP_sig(context) REG_sig(trap, context)
1247 # include <sys/ucontext.h>
1248 typedef struct ucontext SIGCONTEXT;
1249 /* All Registers access - only for local access */
1250 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1251 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1252 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1253 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1254 /* Gpr Registers access */
1255 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1256 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1257 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1258 # define CTR_sig(context) REG_sig(ctr, context)
1259 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1260 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1261 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1262 /* Float Registers access */
1263 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1264 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1265 /* Exception Registers access */
1266 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1267 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1268 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1269 #endif /* __APPLE__ */
1271 int cpu_signal_handler(int host_signum, void *pinfo,
1274 siginfo_t *info = pinfo;
1275 struct ucontext *uc = puc;
1283 if (DSISR_sig(uc) & 0x00800000)
1286 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1289 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1290 is_write, &uc->uc_sigmask, puc);
1293 #elif defined(__alpha__)
1295 int cpu_signal_handler(int host_signum, void *pinfo,
1298 siginfo_t *info = pinfo;
1299 struct ucontext *uc = puc;
1300 uint32_t *pc = uc->uc_mcontext.sc_pc;
1301 uint32_t insn = *pc;
1304 /* XXX: need kernel patch to get write flag faster */
1305 switch (insn >> 26) {
1320 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1321 is_write, &uc->uc_sigmask, puc);
1323 #elif defined(__sparc__)
1325 int cpu_signal_handler(int host_signum, void *pinfo,
1328 siginfo_t *info = pinfo;
1329 uint32_t *regs = (uint32_t *)(info + 1);
1330 void *sigmask = (regs + 20);
1335 /* XXX: is there a standard glibc define ? */
1337 /* XXX: need kernel patch to get write flag faster */
1339 insn = *(uint32_t *)pc;
1340 if ((insn >> 30) == 3) {
1341 switch((insn >> 19) & 0x3f) {
1353 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1354 is_write, sigmask, NULL);
1357 #elif defined(__arm__)
1359 int cpu_signal_handler(int host_signum, void *pinfo,
1362 siginfo_t *info = pinfo;
1363 struct ucontext *uc = puc;
1367 pc = uc->uc_mcontext.gregs[R15];
1368 /* XXX: compute is_write */
1370 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1372 &uc->uc_sigmask, puc);
1375 #elif defined(__mc68000)
1377 int cpu_signal_handler(int host_signum, void *pinfo,
1380 siginfo_t *info = pinfo;
1381 struct ucontext *uc = puc;
1385 pc = uc->uc_mcontext.gregs[16];
1386 /* XXX: compute is_write */
1388 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1390 &uc->uc_sigmask, puc);
1393 #elif defined(__ia64)
1396 /* This ought to be in <bits/siginfo.h>... */
1397 # define __ISR_VALID 1
1400 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1402 siginfo_t *info = pinfo;
1403 struct ucontext *uc = puc;
1407 ip = uc->uc_mcontext.sc_ip;
1408 switch (host_signum) {
1414 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1415 /* ISR.W (write-access) is bit 33: */
1416 is_write = (info->si_isr >> 33) & 1;
1422 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1424 &uc->uc_sigmask, puc);
1427 #elif defined(__s390__)
1429 int cpu_signal_handler(int host_signum, void *pinfo,
1432 siginfo_t *info = pinfo;
1433 struct ucontext *uc = puc;
1437 pc = uc->uc_mcontext.psw.addr;
1438 /* XXX: compute is_write */
1440 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1441 is_write, &uc->uc_sigmask, puc);
1444 #elif defined(__mips__)
1446 int cpu_signal_handler(int host_signum, void *pinfo,
1449 siginfo_t *info = pinfo;
1450 struct ucontext *uc = puc;
1451 greg_t pc = uc->uc_mcontext.pc;
1454 /* XXX: compute is_write */
1456 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1457 is_write, &uc->uc_sigmask, puc);
1462 #error host CPU specific signal handler needed
1466 #endif /* !defined(CONFIG_SOFTMMU) */