2 * i386 emulator main execution loop
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag;
41 //#define DEBUG_SIGNAL
43 #if defined(TARGET_ARM) || defined(TARGET_SPARC)
44 /* XXX: unify with i386 target */
45 void cpu_loop_exit(void)
47 longjmp(env->jmp_env, 1);
51 /* exit the current TB from a signal handler. The host registers are
52 restored in a state compatible with the CPU emulator
54 void cpu_resume_from_signal(CPUState *env1, void *puc)
56 #if !defined(CONFIG_SOFTMMU)
57 struct ucontext *uc = puc;
62 /* XXX: restore cpu registers saved in host registers */
64 #if !defined(CONFIG_SOFTMMU)
66 /* XXX: use siglongjmp ? */
67 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
70 longjmp(env->jmp_env, 1);
73 /* main execution loop */
75 int cpu_exec(CPUState *env1)
77 int saved_T0, saved_T1, saved_T2;
104 int saved_i7, tmp_T0;
106 int code_gen_size, ret, interrupt_request;
107 void (*gen_func)(void);
108 TranslationBlock *tb, **ptb;
109 target_ulong cs_base, pc;
113 /* first we save global registers */
120 /* we also save i7 because longjmp may not restore it */
121 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
124 #if defined(TARGET_I386)
151 /* put eflags in CPU temporary format */
152 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
153 DF = 1 - (2 * ((env->eflags >> 10) & 1));
154 CC_OP = CC_OP_EFLAGS;
155 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
156 #elif defined(TARGET_ARM)
160 env->CF = (psr >> 29) & 1;
161 env->NZF = (psr & 0xc0000000) ^ 0x40000000;
162 env->VF = (psr << 3) & 0x80000000;
163 env->cpsr = psr & ~0xf0000000;
165 #elif defined(TARGET_SPARC)
166 #elif defined(TARGET_PPC)
168 #error unsupported target CPU
170 env->exception_index = -1;
172 /* prepare setjmp context for exception handling */
174 if (setjmp(env->jmp_env) == 0) {
175 env->current_tb = NULL;
176 /* if an exception is pending, we execute it here */
177 if (env->exception_index >= 0) {
178 if (env->exception_index >= EXCP_INTERRUPT) {
179 /* exit request from the cpu execution loop */
180 ret = env->exception_index;
182 } else if (env->user_mode_only) {
183 /* if user mode only, we simulate a fake exception
184 which will be hanlded outside the cpu execution
186 #if defined(TARGET_I386)
187 do_interrupt_user(env->exception_index,
188 env->exception_is_int,
190 env->exception_next_eip);
192 ret = env->exception_index;
195 #if defined(TARGET_I386)
196 /* simulate a real cpu exception. On i386, it can
197 trigger new exceptions, but we do not handle
198 double or triple faults yet. */
199 do_interrupt(env->exception_index,
200 env->exception_is_int,
202 env->exception_next_eip, 0);
203 #elif defined(TARGET_PPC)
205 #elif defined(TARGET_SPARC)
206 do_interrupt(env->exception_index,
209 env->exception_next_pc, 0);
212 env->exception_index = -1;
214 T0 = 0; /* force lookup of first TB */
217 /* g1 can be modified by some libc? functions */
220 interrupt_request = env->interrupt_request;
221 if (__builtin_expect(interrupt_request, 0)) {
222 #if defined(TARGET_I386)
223 /* if hardware interrupt pending, we execute it */
224 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
225 (env->eflags & IF_MASK) &&
226 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
228 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
229 intno = cpu_get_pic_interrupt(env);
230 if (loglevel & CPU_LOG_TB_IN_ASM) {
231 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
233 do_interrupt(intno, 0, 0, 0, 1);
234 /* ensure that no TB jump will be modified as
235 the program flow was changed */
242 #elif defined(TARGET_PPC)
244 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
249 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
251 env->exception_index = EXCP_EXTERNAL;
254 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
255 } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {
257 env->exception_index = EXCP_DECR;
260 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
263 #elif defined(TARGET_SPARC)
264 if (interrupt_request & CPU_INTERRUPT_HARD) {
265 do_interrupt(env->interrupt_index, 0, 0, 0, 0);
266 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
267 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
268 //do_interrupt(0, 0, 0, 0, 0);
269 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
272 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
273 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
274 /* ensure that no TB jump will be modified as
275 the program flow was changed */
282 if (interrupt_request & CPU_INTERRUPT_EXIT) {
283 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
284 env->exception_index = EXCP_INTERRUPT;
289 if ((loglevel & CPU_LOG_EXEC)) {
290 #if defined(TARGET_I386)
291 /* restore flags in standard format */
292 env->regs[R_EAX] = EAX;
293 env->regs[R_EBX] = EBX;
294 env->regs[R_ECX] = ECX;
295 env->regs[R_EDX] = EDX;
296 env->regs[R_ESI] = ESI;
297 env->regs[R_EDI] = EDI;
298 env->regs[R_EBP] = EBP;
299 env->regs[R_ESP] = ESP;
300 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
301 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
302 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
303 #elif defined(TARGET_ARM)
304 env->cpsr = compute_cpsr();
305 cpu_dump_state(env, logfile, fprintf, 0);
306 env->cpsr &= ~0xf0000000;
307 #elif defined(TARGET_SPARC)
308 cpu_dump_state (env, logfile, fprintf, 0);
309 #elif defined(TARGET_PPC)
310 cpu_dump_state(env, logfile, fprintf, 0);
312 #error unsupported target CPU
316 /* we record a subset of the CPU state. It will
317 always be the same before a given translated block
319 #if defined(TARGET_I386)
321 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
322 cs_base = env->segs[R_CS].base;
323 pc = cs_base + env->eip;
324 #elif defined(TARGET_ARM)
328 #elif defined(TARGET_SPARC)
332 #elif defined(TARGET_PPC)
337 #error unsupported CPU
339 tb = tb_find(&ptb, pc, cs_base,
342 TranslationBlock **ptb1;
344 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
349 tb_invalidated_flag = 0;
351 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
353 /* find translated block using physical mappings */
354 phys_pc = get_phys_addr_code(env, pc);
355 phys_page1 = phys_pc & TARGET_PAGE_MASK;
357 h = tb_phys_hash_func(phys_pc);
358 ptb1 = &tb_phys_hash[h];
364 tb->page_addr[0] == phys_page1 &&
365 tb->cs_base == cs_base &&
366 tb->flags == flags) {
367 /* check next page if needed */
368 if (tb->page_addr[1] != -1) {
369 virt_page2 = (pc & TARGET_PAGE_MASK) +
371 phys_page2 = get_phys_addr_code(env, virt_page2);
372 if (tb->page_addr[1] == phys_page2)
378 ptb1 = &tb->phys_hash_next;
381 /* if no translated code available, then translate it now */
384 /* flush must be done */
386 /* cannot fail at this point */
388 /* don't forget to invalidate previous TB info */
389 ptb = &tb_hash[tb_hash_func(pc)];
392 tc_ptr = code_gen_ptr;
394 tb->cs_base = cs_base;
396 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
397 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
399 /* check next page if needed */
400 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
402 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
403 phys_page2 = get_phys_addr_code(env, virt_page2);
405 tb_link_phys(tb, phys_pc, phys_page2);
408 if (tb_invalidated_flag) {
409 /* as some TB could have been invalidated because
410 of memory exceptions while generating the code, we
411 must recompute the hash index here */
412 ptb = &tb_hash[tb_hash_func(pc)];
414 ptb = &(*ptb)->hash_next;
417 /* we add the TB in the virtual pc hash table */
419 tb->hash_next = NULL;
421 spin_unlock(&tb_lock);
424 if ((loglevel & CPU_LOG_EXEC)) {
425 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
426 (long)tb->tc_ptr, tb->pc,
427 lookup_symbol(tb->pc));
433 /* see if we can patch the calling TB. */
436 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
437 && (tb->cflags & CF_CODE_COPY) ==
438 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
442 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
443 #if defined(USE_CODE_COPY)
444 /* propagates the FP use info */
445 ((TranslationBlock *)(T0 & ~3))->cflags |=
446 (tb->cflags & CF_FP_USED);
448 spin_unlock(&tb_lock);
452 env->current_tb = tb;
453 /* execute the generated code */
454 gen_func = (void *)tc_ptr;
455 #if defined(__sparc__)
456 __asm__ __volatile__("call %0\n\t"
460 : "i0", "i1", "i2", "i3", "i4", "i5");
461 #elif defined(__arm__)
462 asm volatile ("mov pc, %0\n\t"
463 ".global exec_loop\n\t"
467 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
468 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
470 if (!(tb->cflags & CF_CODE_COPY)) {
471 if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
472 save_native_fp_state(env);
476 if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
477 restore_native_fp_state(env);
479 /* we work with native eflags */
480 CC_SRC = cc_table[CC_OP].compute_all();
481 CC_OP = CC_OP_EFLAGS;
482 asm(".globl exec_loop\n"
487 " fs movl %11, %%eax\n"
488 " andl $0x400, %%eax\n"
489 " fs orl %8, %%eax\n"
492 " fs movl %%esp, %12\n"
493 " fs movl %0, %%eax\n"
494 " fs movl %1, %%ecx\n"
495 " fs movl %2, %%edx\n"
496 " fs movl %3, %%ebx\n"
497 " fs movl %4, %%esp\n"
498 " fs movl %5, %%ebp\n"
499 " fs movl %6, %%esi\n"
500 " fs movl %7, %%edi\n"
503 " fs movl %%esp, %4\n"
504 " fs movl %12, %%esp\n"
505 " fs movl %%eax, %0\n"
506 " fs movl %%ecx, %1\n"
507 " fs movl %%edx, %2\n"
508 " fs movl %%ebx, %3\n"
509 " fs movl %%ebp, %5\n"
510 " fs movl %%esi, %6\n"
511 " fs movl %%edi, %7\n"
514 " movl %%eax, %%ecx\n"
515 " andl $0x400, %%ecx\n"
517 " andl $0x8d5, %%eax\n"
518 " fs movl %%eax, %8\n"
520 " subl %%ecx, %%eax\n"
521 " fs movl %%eax, %11\n"
522 " fs movl %9, %%ebx\n" /* get T0 value */
525 : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
526 "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
527 "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
528 "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
529 "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
530 "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
531 "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
532 "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
533 "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
534 "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
536 "m" (*(uint8_t *)offsetof(CPUState, df)),
537 "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
545 env->current_tb = NULL;
546 /* reset soft MMU for next block (it can currently
547 only be set by a memory fault) */
548 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
549 if (env->hflags & HF_SOFTMMU_MASK) {
550 env->hflags &= ~HF_SOFTMMU_MASK;
551 /* do not allow linking to another block */
562 #if defined(TARGET_I386)
563 #if defined(USE_CODE_COPY)
564 if (env->native_fp_regs) {
565 save_native_fp_state(env);
568 /* restore flags in standard format */
569 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
571 /* restore global registers */
596 #elif defined(TARGET_ARM)
597 env->cpsr = compute_cpsr();
598 #elif defined(TARGET_SPARC)
599 #elif defined(TARGET_PPC)
601 #error unsupported target CPU
604 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
613 /* must only be called from the generated code as an exception can be
615 void tb_invalidate_page_range(target_ulong start, target_ulong end)
617 /* XXX: cannot enable it yet because it yields to MMU exception
618 where NIP != read address on PowerPC */
620 target_ulong phys_addr;
621 phys_addr = get_phys_addr_code(env, start);
622 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
626 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
628 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
630 CPUX86State *saved_env;
634 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
636 cpu_x86_load_seg_cache(env, seg_reg, selector,
637 (selector << 4), 0xffff, 0);
639 load_seg(seg_reg, selector);
644 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
646 CPUX86State *saved_env;
651 helper_fsave((target_ulong)ptr, data32);
656 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
658 CPUX86State *saved_env;
663 helper_frstor((target_ulong)ptr, data32);
668 #endif /* TARGET_I386 */
670 #if !defined(CONFIG_SOFTMMU)
672 #if defined(TARGET_I386)
674 /* 'pc' is the host PC at which the exception was raised. 'address' is
675 the effective address of the memory exception. 'is_write' is 1 if a
676 write caused the exception and otherwise 0'. 'old_set' is the
677 signal set which should be restored */
678 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
679 int is_write, sigset_t *old_set,
682 TranslationBlock *tb;
686 env = cpu_single_env; /* XXX: find a correct solution for multithread */
687 #if defined(DEBUG_SIGNAL)
688 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
689 pc, address, is_write, *(unsigned long *)old_set);
691 /* XXX: locking issue */
692 if (is_write && page_unprotect(address, pc, puc)) {
696 /* see if it is an MMU fault */
697 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
698 ((env->hflags & HF_CPL_MASK) == 3), 0);
700 return 0; /* not an MMU fault */
702 return 1; /* the MMU fault was handled without causing real CPU fault */
703 /* now we have a real cpu fault */
706 /* the PC is inside the translated code. It means that we have
707 a virtual CPU fault */
708 cpu_restore_state(tb, env, pc, puc);
712 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
713 env->eip, env->cr[2], env->error_code);
715 /* we restore the process signal mask as the sigreturn should
716 do it (XXX: use sigsetjmp) */
717 sigprocmask(SIG_SETMASK, old_set, NULL);
718 raise_exception_err(EXCP0E_PAGE, env->error_code);
720 /* activate soft MMU for this block */
721 env->hflags |= HF_SOFTMMU_MASK;
722 cpu_resume_from_signal(env, puc);
724 /* never comes here */
728 #elif defined(TARGET_ARM)
729 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
730 int is_write, sigset_t *old_set,
736 #elif defined(TARGET_SPARC)
737 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
738 int is_write, sigset_t *old_set,
741 /* XXX: locking issue */
742 if (is_write && page_unprotect(address, pc, puc)) {
747 #elif defined (TARGET_PPC)
748 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
749 int is_write, sigset_t *old_set,
752 TranslationBlock *tb;
757 env = cpu_single_env; /* XXX: find a correct solution for multithread */
759 #if defined(DEBUG_SIGNAL)
760 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
761 pc, address, is_write, *(unsigned long *)old_set);
763 /* XXX: locking issue */
764 if (is_write && page_unprotect(address, pc, puc)) {
768 /* see if it is an MMU fault */
769 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
771 return 0; /* not an MMU fault */
773 return 1; /* the MMU fault was handled without causing real CPU fault */
775 /* now we have a real cpu fault */
778 /* the PC is inside the translated code. It means that we have
779 a virtual CPU fault */
780 cpu_restore_state(tb, env, pc, puc);
784 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
785 env->nip, env->error_code, tb);
787 /* we restore the process signal mask as the sigreturn should
788 do it (XXX: use sigsetjmp) */
789 sigprocmask(SIG_SETMASK, old_set, NULL);
790 do_raise_exception_err(env->exception_index, env->error_code);
792 /* activate soft MMU for this block */
793 cpu_resume_from_signal(env, puc);
795 /* never comes here */
799 #error unsupported target CPU
802 #if defined(__i386__)
804 #if defined(USE_CODE_COPY)
805 static void cpu_send_trap(unsigned long pc, int trap,
808 TranslationBlock *tb;
811 env = cpu_single_env; /* XXX: find a correct solution for multithread */
812 /* now we have a real cpu fault */
815 /* the PC is inside the translated code. It means that we have
816 a virtual CPU fault */
817 cpu_restore_state(tb, env, pc, uc);
819 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
820 raise_exception_err(trap, env->error_code);
824 int cpu_signal_handler(int host_signum, struct siginfo *info,
827 struct ucontext *uc = puc;
835 #define REG_TRAPNO TRAPNO
837 pc = uc->uc_mcontext.gregs[REG_EIP];
838 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
839 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
840 if (trapno == 0x00 || trapno == 0x05) {
841 /* send division by zero or bound exception */
842 cpu_send_trap(pc, trapno, uc);
846 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
848 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
849 &uc->uc_sigmask, puc);
852 #elif defined(__x86_64__)
854 int cpu_signal_handler(int host_signum, struct siginfo *info,
857 struct ucontext *uc = puc;
860 pc = uc->uc_mcontext.gregs[REG_RIP];
861 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
862 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
863 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
864 &uc->uc_sigmask, puc);
867 #elif defined(__powerpc__)
869 /***********************************************************************
870 * signal context platform-specific definitions
874 /* All Registers access - only for local access */
875 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
876 /* Gpr Registers access */
877 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
878 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
879 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
880 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
881 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
882 # define LR_sig(context) REG_sig(link, context) /* Link register */
883 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
884 /* Float Registers access */
885 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
886 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
887 /* Exception Registers access */
888 # define DAR_sig(context) REG_sig(dar, context)
889 # define DSISR_sig(context) REG_sig(dsisr, context)
890 # define TRAP_sig(context) REG_sig(trap, context)
894 # include <sys/ucontext.h>
895 typedef struct ucontext SIGCONTEXT;
896 /* All Registers access - only for local access */
897 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
898 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
899 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
900 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
901 /* Gpr Registers access */
902 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
903 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
904 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
905 # define CTR_sig(context) REG_sig(ctr, context)
906 # define XER_sig(context) REG_sig(xer, context) /* Link register */
907 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
908 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
909 /* Float Registers access */
910 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
911 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
912 /* Exception Registers access */
913 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
914 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
915 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
916 #endif /* __APPLE__ */
918 int cpu_signal_handler(int host_signum, struct siginfo *info,
921 struct ucontext *uc = puc;
929 if (DSISR_sig(uc) & 0x00800000)
932 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
935 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
936 is_write, &uc->uc_sigmask, puc);
939 #elif defined(__alpha__)
941 int cpu_signal_handler(int host_signum, struct siginfo *info,
944 struct ucontext *uc = puc;
945 uint32_t *pc = uc->uc_mcontext.sc_pc;
949 /* XXX: need kernel patch to get write flag faster */
950 switch (insn >> 26) {
965 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
966 is_write, &uc->uc_sigmask, puc);
968 #elif defined(__sparc__)
970 int cpu_signal_handler(int host_signum, struct siginfo *info,
973 uint32_t *regs = (uint32_t *)(info + 1);
974 void *sigmask = (regs + 20);
979 /* XXX: is there a standard glibc define ? */
981 /* XXX: need kernel patch to get write flag faster */
983 insn = *(uint32_t *)pc;
984 if ((insn >> 30) == 3) {
985 switch((insn >> 19) & 0x3f) {
997 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
998 is_write, sigmask, NULL);
1001 #elif defined(__arm__)
1003 int cpu_signal_handler(int host_signum, struct siginfo *info,
1006 struct ucontext *uc = puc;
1010 pc = uc->uc_mcontext.gregs[R15];
1011 /* XXX: compute is_write */
1013 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1018 #elif defined(__mc68000)
1020 int cpu_signal_handler(int host_signum, struct siginfo *info,
1023 struct ucontext *uc = puc;
1027 pc = uc->uc_mcontext.gregs[16];
1028 /* XXX: compute is_write */
1030 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1032 &uc->uc_sigmask, puc);
1037 #error host CPU specific signal handler needed
1041 #endif /* !defined(CONFIG_SOFTMMU) */