Split CPUID from op_helper
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
7cb69cae 21#define CPU_NO_GLOBAL_REGS
93ac68bc 22#include "exec.h"
956034d7 23#include "disas.h"
7cb69cae 24#include "tcg.h"
7d13299d 25
fbf9eeb3
FB
26#if !defined(CONFIG_SOFTMMU)
27#undef EAX
28#undef ECX
29#undef EDX
30#undef EBX
31#undef ESP
32#undef EBP
33#undef ESI
34#undef EDI
35#undef EIP
36#include <signal.h>
84778508 37#ifdef __linux__
fbf9eeb3
FB
38#include <sys/ucontext.h>
39#endif
84778508 40#endif
fbf9eeb3 41
572a9d4a
BS
42#if defined(__sparc__) && !defined(HOST_SOLARIS)
43// Work around ugly bugs in glibc that mangle global register contents
44#undef env
45#define env cpu_single_env
46#endif
47
36bdbe54
FB
48int tb_invalidated_flag;
49
dc99065b 50//#define DEBUG_EXEC
9de5e440 51//#define DEBUG_SIGNAL
7d13299d 52
e4533c7a
FB
53void cpu_loop_exit(void)
54{
bfed01fc
TS
55 /* NOTE: the register at this point must be saved by hand because
56 longjmp restore them */
57 regs_to_env();
e4533c7a
FB
58 longjmp(env->jmp_env, 1);
59}
bfed01fc 60
e6e5906b 61#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
3475187d
FB
62#define reg_T2
63#endif
e4533c7a 64
fbf9eeb3
FB
65/* exit the current TB from a signal handler. The host registers are
66 restored in a state compatible with the CPU emulator
67 */
5fafdf24 68void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
69{
70#if !defined(CONFIG_SOFTMMU)
84778508 71#ifdef __linux__
fbf9eeb3 72 struct ucontext *uc = puc;
84778508
BS
73#elif defined(__OpenBSD__)
74 struct sigcontext *uc = puc;
75#endif
fbf9eeb3
FB
76#endif
77
78 env = env1;
79
80 /* XXX: restore cpu registers saved in host registers */
81
82#if !defined(CONFIG_SOFTMMU)
83 if (puc) {
84 /* XXX: use siglongjmp ? */
84778508 85#ifdef __linux__
fbf9eeb3 86 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84778508
BS
87#elif defined(__OpenBSD__)
88 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
89#endif
fbf9eeb3
FB
90 }
91#endif
92 longjmp(env->jmp_env, 1);
93}
94
2e70f6ef
PB
95/* Execute the code without caching the generated code. An interpreter
96 could be used if available. */
97static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
98{
99 unsigned long next_tb;
100 TranslationBlock *tb;
101
102 /* Should never happen.
103 We only end up here when an existing TB is too long. */
104 if (max_cycles > CF_COUNT_MASK)
105 max_cycles = CF_COUNT_MASK;
106
107 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
108 max_cycles);
109 env->current_tb = tb;
110 /* execute the generated code */
111 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
112
113 if ((next_tb & 3) == 2) {
114 /* Restore PC. This may happen if async event occurs before
115 the TB starts executing. */
116 CPU_PC_FROM_TB(env, tb);
117 }
118 tb_phys_invalidate(tb, -1);
119 tb_free(tb);
120}
121
8a40a180
FB
122static TranslationBlock *tb_find_slow(target_ulong pc,
123 target_ulong cs_base,
c068688b 124 uint64_t flags)
8a40a180
FB
125{
126 TranslationBlock *tb, **ptb1;
8a40a180
FB
127 unsigned int h;
128 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
3b46e624 129
8a40a180 130 tb_invalidated_flag = 0;
3b46e624 131
8a40a180 132 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 133
8a40a180
FB
134 /* find translated block using physical mappings */
135 phys_pc = get_phys_addr_code(env, pc);
136 phys_page1 = phys_pc & TARGET_PAGE_MASK;
137 phys_page2 = -1;
138 h = tb_phys_hash_func(phys_pc);
139 ptb1 = &tb_phys_hash[h];
140 for(;;) {
141 tb = *ptb1;
142 if (!tb)
143 goto not_found;
5fafdf24 144 if (tb->pc == pc &&
8a40a180 145 tb->page_addr[0] == phys_page1 &&
5fafdf24 146 tb->cs_base == cs_base &&
8a40a180
FB
147 tb->flags == flags) {
148 /* check next page if needed */
149 if (tb->page_addr[1] != -1) {
5fafdf24 150 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
151 TARGET_PAGE_SIZE;
152 phys_page2 = get_phys_addr_code(env, virt_page2);
153 if (tb->page_addr[1] == phys_page2)
154 goto found;
155 } else {
156 goto found;
157 }
158 }
159 ptb1 = &tb->phys_hash_next;
160 }
161 not_found:
2e70f6ef
PB
162 /* if no translated code available, then translate it now */
163 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 164
8a40a180 165 found:
8a40a180
FB
166 /* we add the TB in the virtual pc hash table */
167 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
168 return tb;
169}
170
171static inline TranslationBlock *tb_find_fast(void)
172{
173 TranslationBlock *tb;
174 target_ulong cs_base, pc;
c068688b 175 uint64_t flags;
8a40a180
FB
176
177 /* we record a subset of the CPU state. It will
178 always be the same before a given translated block
179 is executed. */
180#if defined(TARGET_I386)
181 flags = env->hflags;
182 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
183 cs_base = env->segs[R_CS].base;
184 pc = cs_base + env->eip;
185#elif defined(TARGET_ARM)
186 flags = env->thumb | (env->vfp.vec_len << 1)
b5ff1b31
FB
187 | (env->vfp.vec_stride << 4);
188 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
189 flags |= (1 << 6);
40f137e1
PB
190 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
191 flags |= (1 << 7);
9ee6e8bb 192 flags |= (env->condexec_bits << 8);
8a40a180
FB
193 cs_base = 0;
194 pc = env->regs[15];
195#elif defined(TARGET_SPARC)
196#ifdef TARGET_SPARC64
2cade6a3
BS
197 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
198 flags = ((env->pstate & PS_AM) << 2)
199 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
a80dde08 200 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
8a40a180 201#else
6d5f237a
BS
202 // FPU enable . Supervisor
203 flags = (env->psref << 4) | env->psrs;
8a40a180
FB
204#endif
205 cs_base = env->npc;
206 pc = env->pc;
207#elif defined(TARGET_PPC)
1527c87e 208 flags = env->hflags;
8a40a180
FB
209 cs_base = 0;
210 pc = env->nip;
211#elif defined(TARGET_MIPS)
56b19403 212 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
cc9442b9 213 cs_base = 0;
b5dc7732 214 pc = env->active_tc.PC;
e6e5906b 215#elif defined(TARGET_M68K)
acf930aa
PB
216 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
217 | (env->sr & SR_S) /* Bit 13 */
218 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
e6e5906b
PB
219 cs_base = 0;
220 pc = env->pc;
fdf9b3e8 221#elif defined(TARGET_SH4)
fe25591e
AJ
222 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
223 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
224 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
225 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
823029f9 226 cs_base = 0;
fdf9b3e8 227 pc = env->pc;
eddf68a6
JM
228#elif defined(TARGET_ALPHA)
229 flags = env->ps;
230 cs_base = 0;
231 pc = env->pc;
f1ccf904 232#elif defined(TARGET_CRIS)
a1aebcb8 233 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
cf1d97f0 234 flags |= env->dslot;
f1ccf904
TS
235 cs_base = 0;
236 pc = env->pc;
8a40a180
FB
237#else
238#error unsupported CPU
239#endif
bce61846 240 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
241 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
242 tb->flags != flags)) {
8a40a180
FB
243 tb = tb_find_slow(pc, cs_base, flags);
244 }
245 return tb;
246}
247
7d13299d
FB
248/* main execution loop */
249
e4533c7a 250int cpu_exec(CPUState *env1)
7d13299d 251{
1057eaa7
PB
252#define DECLARE_HOST_REGS 1
253#include "hostregs_helper.h"
8a40a180 254 int ret, interrupt_request;
8a40a180 255 TranslationBlock *tb;
c27004ec 256 uint8_t *tc_ptr;
d5975363 257 unsigned long next_tb;
8c6939c0 258
bfed01fc
TS
259 if (cpu_halted(env1) == EXCP_HALTED)
260 return EXCP_HALTED;
5a1e3cfc 261
5fafdf24 262 cpu_single_env = env1;
6a00d601 263
7d13299d 264 /* first we save global registers */
1057eaa7
PB
265#define SAVE_HOST_REGS 1
266#include "hostregs_helper.h"
c27004ec 267 env = env1;
e4533c7a 268
0d1a29f9 269 env_to_regs();
ecb644f4 270#if defined(TARGET_I386)
9de5e440 271 /* put eflags in CPU temporary format */
fc2b4c48
FB
272 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
273 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 274 CC_OP = CC_OP_EFLAGS;
fc2b4c48 275 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 276#elif defined(TARGET_SPARC)
e6e5906b
PB
277#elif defined(TARGET_M68K)
278 env->cc_op = CC_OP_FLAGS;
279 env->cc_dest = env->sr & 0xf;
280 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
281#elif defined(TARGET_ALPHA)
282#elif defined(TARGET_ARM)
283#elif defined(TARGET_PPC)
6af0bf9c 284#elif defined(TARGET_MIPS)
fdf9b3e8 285#elif defined(TARGET_SH4)
f1ccf904 286#elif defined(TARGET_CRIS)
fdf9b3e8 287 /* XXXXX */
e4533c7a
FB
288#else
289#error unsupported target CPU
290#endif
3fb2ded1 291 env->exception_index = -1;
9d27abd9 292
7d13299d 293 /* prepare setjmp context for exception handling */
3fb2ded1
FB
294 for(;;) {
295 if (setjmp(env->jmp_env) == 0) {
ee8b7021 296 env->current_tb = NULL;
3fb2ded1
FB
297 /* if an exception is pending, we execute it here */
298 if (env->exception_index >= 0) {
299 if (env->exception_index >= EXCP_INTERRUPT) {
300 /* exit request from the cpu execution loop */
301 ret = env->exception_index;
302 break;
303 } else if (env->user_mode_only) {
304 /* if user mode only, we simulate a fake exception
9f083493 305 which will be handled outside the cpu execution
3fb2ded1 306 loop */
83479e77 307#if defined(TARGET_I386)
5fafdf24
TS
308 do_interrupt_user(env->exception_index,
309 env->exception_is_int,
310 env->error_code,
3fb2ded1 311 env->exception_next_eip);
eba01623
FB
312 /* successfully delivered */
313 env->old_exception = -1;
83479e77 314#endif
3fb2ded1
FB
315 ret = env->exception_index;
316 break;
317 } else {
83479e77 318#if defined(TARGET_I386)
3fb2ded1
FB
319 /* simulate a real cpu exception. On i386, it can
320 trigger new exceptions, but we do not handle
321 double or triple faults yet. */
5fafdf24
TS
322 do_interrupt(env->exception_index,
323 env->exception_is_int,
324 env->error_code,
d05e66d2 325 env->exception_next_eip, 0);
678dde13
TS
326 /* successfully delivered */
327 env->old_exception = -1;
ce09776b
FB
328#elif defined(TARGET_PPC)
329 do_interrupt(env);
6af0bf9c
FB
330#elif defined(TARGET_MIPS)
331 do_interrupt(env);
e95c8d51 332#elif defined(TARGET_SPARC)
f2bc7e7f 333 do_interrupt(env);
b5ff1b31
FB
334#elif defined(TARGET_ARM)
335 do_interrupt(env);
fdf9b3e8
FB
336#elif defined(TARGET_SH4)
337 do_interrupt(env);
eddf68a6
JM
338#elif defined(TARGET_ALPHA)
339 do_interrupt(env);
f1ccf904
TS
340#elif defined(TARGET_CRIS)
341 do_interrupt(env);
0633879f
PB
342#elif defined(TARGET_M68K)
343 do_interrupt(0);
83479e77 344#endif
3fb2ded1
FB
345 }
346 env->exception_index = -1;
5fafdf24 347 }
9df217a3
FB
348#ifdef USE_KQEMU
349 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
350 int ret;
351 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
352 ret = kqemu_cpu_exec(env);
353 /* put eflags in CPU temporary format */
354 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
355 DF = 1 - (2 * ((env->eflags >> 10) & 1));
356 CC_OP = CC_OP_EFLAGS;
357 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
358 if (ret == 1) {
359 /* exception */
360 longjmp(env->jmp_env, 1);
361 } else if (ret == 2) {
362 /* softmmu execution needed */
363 } else {
364 if (env->interrupt_request != 0) {
365 /* hardware interrupt will be executed just after */
366 } else {
367 /* otherwise, we restart */
368 longjmp(env->jmp_env, 1);
369 }
370 }
3fb2ded1 371 }
9df217a3
FB
372#endif
373
b5fc09ae 374 next_tb = 0; /* force lookup of first TB */
3fb2ded1 375 for(;;) {
68a79315 376 interrupt_request = env->interrupt_request;
551bd27f 377 if (unlikely(interrupt_request) &&
db620f46 378 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
6658ffb8
PB
379 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
380 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
381 env->exception_index = EXCP_DEBUG;
382 cpu_loop_exit();
383 }
a90b7318 384#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 385 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
386 if (interrupt_request & CPU_INTERRUPT_HALT) {
387 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
388 env->halted = 1;
389 env->exception_index = EXCP_HLT;
390 cpu_loop_exit();
391 }
392#endif
68a79315 393#if defined(TARGET_I386)
db620f46
FB
394 if (env->hflags2 & HF2_GIF_MASK) {
395 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
396 !(env->hflags & HF_SMM_MASK)) {
397 svm_check_intercept(SVM_EXIT_SMI);
398 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
399 do_smm_enter();
400 next_tb = 0;
401 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
402 !(env->hflags2 & HF2_NMI_MASK)) {
403 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
404 env->hflags2 |= HF2_NMI_MASK;
405 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
406 next_tb = 0;
407 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
408 (((env->hflags2 & HF2_VINTR_MASK) &&
409 (env->hflags2 & HF2_HIF_MASK)) ||
410 (!(env->hflags2 & HF2_VINTR_MASK) &&
411 (env->eflags & IF_MASK &&
412 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
413 int intno;
414 svm_check_intercept(SVM_EXIT_INTR);
415 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
416 intno = cpu_get_pic_interrupt(env);
417 if (loglevel & CPU_LOG_TB_IN_ASM) {
418 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
419 }
420 do_interrupt(intno, 0, 0, 0, 1);
421 /* ensure that no TB jump will be modified as
422 the program flow was changed */
423 next_tb = 0;
0573fbfc 424#if !defined(CONFIG_USER_ONLY)
db620f46
FB
425 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
426 (env->eflags & IF_MASK) &&
427 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
428 int intno;
429 /* FIXME: this should respect TPR */
430 svm_check_intercept(SVM_EXIT_VINTR);
431 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
432 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
433 if (loglevel & CPU_LOG_TB_IN_ASM)
434 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
435 do_interrupt(intno, 0, 0, 0, 1);
436 next_tb = 0;
907a5b26 437#endif
db620f46 438 }
68a79315 439 }
ce09776b 440#elif defined(TARGET_PPC)
9fddaa0c
FB
441#if 0
442 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
443 cpu_ppc_reset(env);
444 }
445#endif
47103572 446 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
447 ppc_hw_interrupt(env);
448 if (env->pending_interrupts == 0)
449 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 450 next_tb = 0;
ce09776b 451 }
6af0bf9c
FB
452#elif defined(TARGET_MIPS)
453 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 454 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 455 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
456 !(env->CP0_Status & (1 << CP0St_EXL)) &&
457 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
458 !(env->hflags & MIPS_HFLAG_DM)) {
459 /* Raise it */
460 env->exception_index = EXCP_EXT_INTERRUPT;
461 env->error_code = 0;
462 do_interrupt(env);
b5fc09ae 463 next_tb = 0;
6af0bf9c 464 }
e95c8d51 465#elif defined(TARGET_SPARC)
66321a11
FB
466 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
467 (env->psret != 0)) {
468 int pil = env->interrupt_index & 15;
469 int type = env->interrupt_index & 0xf0;
470
471 if (((type == TT_EXTINT) &&
472 (pil == 15 || pil > env->psrpil)) ||
473 type != TT_EXTINT) {
474 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
f2bc7e7f
BS
475 env->exception_index = env->interrupt_index;
476 do_interrupt(env);
66321a11 477 env->interrupt_index = 0;
327ac2e7
BS
478#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
479 cpu_check_irqs(env);
480#endif
b5fc09ae 481 next_tb = 0;
66321a11 482 }
e95c8d51
FB
483 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
484 //do_interrupt(0, 0, 0, 0, 0);
485 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 486 }
b5ff1b31
FB
487#elif defined(TARGET_ARM)
488 if (interrupt_request & CPU_INTERRUPT_FIQ
489 && !(env->uncached_cpsr & CPSR_F)) {
490 env->exception_index = EXCP_FIQ;
491 do_interrupt(env);
b5fc09ae 492 next_tb = 0;
b5ff1b31 493 }
9ee6e8bb
PB
494 /* ARMv7-M interrupt return works by loading a magic value
495 into the PC. On real hardware the load causes the
496 return to occur. The qemu implementation performs the
497 jump normally, then does the exception return when the
498 CPU tries to execute code at the magic address.
499 This will cause the magic PC value to be pushed to
500 the stack if an interrupt occured at the wrong time.
501 We avoid this by disabling interrupts when
502 pc contains a magic address. */
b5ff1b31 503 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
504 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
505 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
506 env->exception_index = EXCP_IRQ;
507 do_interrupt(env);
b5fc09ae 508 next_tb = 0;
b5ff1b31 509 }
fdf9b3e8 510#elif defined(TARGET_SH4)
e96e2044
TS
511 if (interrupt_request & CPU_INTERRUPT_HARD) {
512 do_interrupt(env);
b5fc09ae 513 next_tb = 0;
e96e2044 514 }
eddf68a6
JM
515#elif defined(TARGET_ALPHA)
516 if (interrupt_request & CPU_INTERRUPT_HARD) {
517 do_interrupt(env);
b5fc09ae 518 next_tb = 0;
eddf68a6 519 }
f1ccf904 520#elif defined(TARGET_CRIS)
1b1a38b0
EI
521 if (interrupt_request & CPU_INTERRUPT_HARD
522 && (env->pregs[PR_CCS] & I_FLAG)) {
523 env->exception_index = EXCP_IRQ;
524 do_interrupt(env);
525 next_tb = 0;
526 }
527 if (interrupt_request & CPU_INTERRUPT_NMI
528 && (env->pregs[PR_CCS] & M_FLAG)) {
529 env->exception_index = EXCP_NMI;
f1ccf904 530 do_interrupt(env);
b5fc09ae 531 next_tb = 0;
f1ccf904 532 }
0633879f
PB
533#elif defined(TARGET_M68K)
534 if (interrupt_request & CPU_INTERRUPT_HARD
535 && ((env->sr & SR_I) >> SR_I_SHIFT)
536 < env->pending_level) {
537 /* Real hardware gets the interrupt vector via an
538 IACK cycle at this point. Current emulated
539 hardware doesn't rely on this, so we
540 provide/save the vector when the interrupt is
541 first signalled. */
542 env->exception_index = env->pending_vector;
543 do_interrupt(1);
b5fc09ae 544 next_tb = 0;
0633879f 545 }
68a79315 546#endif
9d05095e
FB
547 /* Don't use the cached interupt_request value,
548 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 549 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
550 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
551 /* ensure that no TB jump will be modified as
552 the program flow was changed */
b5fc09ae 553 next_tb = 0;
bf3e8bf1 554 }
68a79315
FB
555 if (interrupt_request & CPU_INTERRUPT_EXIT) {
556 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
557 env->exception_index = EXCP_INTERRUPT;
558 cpu_loop_exit();
559 }
3fb2ded1 560 }
7d13299d 561#ifdef DEBUG_EXEC
b5ff1b31 562 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 563 /* restore flags in standard format */
ecb644f4
TS
564 regs_to_env();
565#if defined(TARGET_I386)
3fb2ded1 566 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
7fe48483 567 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3fb2ded1 568 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 569#elif defined(TARGET_ARM)
7fe48483 570 cpu_dump_state(env, logfile, fprintf, 0);
93ac68bc 571#elif defined(TARGET_SPARC)
3475187d 572 cpu_dump_state(env, logfile, fprintf, 0);
67867308 573#elif defined(TARGET_PPC)
7fe48483 574 cpu_dump_state(env, logfile, fprintf, 0);
e6e5906b
PB
575#elif defined(TARGET_M68K)
576 cpu_m68k_flush_flags(env, env->cc_op);
577 env->cc_op = CC_OP_FLAGS;
578 env->sr = (env->sr & 0xffe0)
579 | env->cc_dest | (env->cc_x << 4);
580 cpu_dump_state(env, logfile, fprintf, 0);
6af0bf9c
FB
581#elif defined(TARGET_MIPS)
582 cpu_dump_state(env, logfile, fprintf, 0);
fdf9b3e8
FB
583#elif defined(TARGET_SH4)
584 cpu_dump_state(env, logfile, fprintf, 0);
eddf68a6
JM
585#elif defined(TARGET_ALPHA)
586 cpu_dump_state(env, logfile, fprintf, 0);
f1ccf904
TS
587#elif defined(TARGET_CRIS)
588 cpu_dump_state(env, logfile, fprintf, 0);
e4533c7a 589#else
5fafdf24 590#error unsupported target CPU
e4533c7a 591#endif
3fb2ded1 592 }
7d13299d 593#endif
d5975363 594 spin_lock(&tb_lock);
8a40a180 595 tb = tb_find_fast();
d5975363
PB
596 /* Note: we do it here to avoid a gcc bug on Mac OS X when
597 doing it in tb_find_slow */
598 if (tb_invalidated_flag) {
599 /* as some TB could have been invalidated because
600 of memory exceptions while generating the code, we
601 must recompute the hash index here */
602 next_tb = 0;
2e70f6ef 603 tb_invalidated_flag = 0;
d5975363 604 }
9d27abd9 605#ifdef DEBUG_EXEC
c1135f61 606 if ((loglevel & CPU_LOG_EXEC)) {
c27004ec
FB
607 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
608 (long)tb->tc_ptr, tb->pc,
609 lookup_symbol(tb->pc));
3fb2ded1 610 }
9d27abd9 611#endif
8a40a180
FB
612 /* see if we can patch the calling TB. When the TB
613 spans two pages, we cannot safely do a direct
614 jump. */
c27004ec 615 {
b5fc09ae 616 if (next_tb != 0 &&
4d7a0880 617#ifdef USE_KQEMU
f32fc648
FB
618 (env->kqemu_enabled != 2) &&
619#endif
ec6338ba 620 tb->page_addr[1] == -1) {
b5fc09ae 621 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 622 }
c27004ec 623 }
d5975363 624 spin_unlock(&tb_lock);
83479e77 625 env->current_tb = tb;
55e8b85e 626
627 /* cpu_interrupt might be called while translating the
628 TB, but before it is linked into a potentially
629 infinite loop and becomes env->current_tb. Avoid
630 starting execution if there is a pending interrupt. */
631 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
632 env->current_tb = NULL;
633
2e70f6ef
PB
634 while (env->current_tb) {
635 tc_ptr = tb->tc_ptr;
3fb2ded1 636 /* execute the generated code */
572a9d4a
BS
637#if defined(__sparc__) && !defined(HOST_SOLARIS)
638#undef env
2e70f6ef 639 env = cpu_single_env;
572a9d4a
BS
640#define env cpu_single_env
641#endif
2e70f6ef
PB
642 next_tb = tcg_qemu_tb_exec(tc_ptr);
643 env->current_tb = NULL;
644 if ((next_tb & 3) == 2) {
bf20dc07 645 /* Instruction counter expired. */
2e70f6ef
PB
646 int insns_left;
647 tb = (TranslationBlock *)(long)(next_tb & ~3);
648 /* Restore PC. */
649 CPU_PC_FROM_TB(env, tb);
650 insns_left = env->icount_decr.u32;
651 if (env->icount_extra && insns_left >= 0) {
652 /* Refill decrementer and continue execution. */
653 env->icount_extra += insns_left;
654 if (env->icount_extra > 0xffff) {
655 insns_left = 0xffff;
656 } else {
657 insns_left = env->icount_extra;
658 }
659 env->icount_extra -= insns_left;
660 env->icount_decr.u16.low = insns_left;
661 } else {
662 if (insns_left > 0) {
663 /* Execute remaining instructions. */
664 cpu_exec_nocache(insns_left, tb);
665 }
666 env->exception_index = EXCP_INTERRUPT;
667 next_tb = 0;
668 cpu_loop_exit();
669 }
670 }
671 }
4cbf74b6
FB
672 /* reset soft MMU for next block (it can currently
673 only be set by a memory fault) */
f32fc648
FB
674#if defined(USE_KQEMU)
675#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
676 if (kqemu_is_ok(env) &&
677 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
678 cpu_loop_exit();
679 }
4cbf74b6 680#endif
50a518e3 681 } /* for(;;) */
3fb2ded1 682 } else {
0d1a29f9 683 env_to_regs();
7d13299d 684 }
3fb2ded1
FB
685 } /* for(;;) */
686
7d13299d 687
e4533c7a 688#if defined(TARGET_I386)
9de5e440 689 /* restore flags in standard format */
fc2b4c48 690 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
e4533c7a 691#elif defined(TARGET_ARM)
b7bcbe95 692 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 693#elif defined(TARGET_SPARC)
67867308 694#elif defined(TARGET_PPC)
e6e5906b
PB
695#elif defined(TARGET_M68K)
696 cpu_m68k_flush_flags(env, env->cc_op);
697 env->cc_op = CC_OP_FLAGS;
698 env->sr = (env->sr & 0xffe0)
699 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 700#elif defined(TARGET_MIPS)
fdf9b3e8 701#elif defined(TARGET_SH4)
eddf68a6 702#elif defined(TARGET_ALPHA)
f1ccf904 703#elif defined(TARGET_CRIS)
fdf9b3e8 704 /* XXXXX */
e4533c7a
FB
705#else
706#error unsupported target CPU
707#endif
1057eaa7
PB
708
709 /* restore global registers */
1057eaa7
PB
710#include "hostregs_helper.h"
711
6a00d601 712 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 713 cpu_single_env = NULL;
7d13299d
FB
714 return ret;
715}
6dbad63e 716
fbf9eeb3
FB
717/* must only be called from the generated code as an exception can be
718 generated */
719void tb_invalidate_page_range(target_ulong start, target_ulong end)
720{
dc5d0b3d
FB
721 /* XXX: cannot enable it yet because it yields to MMU exception
722 where NIP != read address on PowerPC */
723#if 0
fbf9eeb3
FB
724 target_ulong phys_addr;
725 phys_addr = get_phys_addr_code(env, start);
726 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 727#endif
fbf9eeb3
FB
728}
729
1a18c71b 730#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 731
6dbad63e
FB
732void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
733{
734 CPUX86State *saved_env;
735
736 saved_env = env;
737 env = s;
a412ac57 738 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 739 selector &= 0xffff;
5fafdf24 740 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 741 (selector << 4), 0xffff, 0);
a513fe19 742 } else {
5d97559d 743 helper_load_seg(seg_reg, selector);
a513fe19 744 }
6dbad63e
FB
745 env = saved_env;
746}
9de5e440 747
6f12a2a6 748void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
749{
750 CPUX86State *saved_env;
751
752 saved_env = env;
753 env = s;
3b46e624 754
6f12a2a6 755 helper_fsave(ptr, data32);
d0a1ffc9
FB
756
757 env = saved_env;
758}
759
6f12a2a6 760void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
761{
762 CPUX86State *saved_env;
763
764 saved_env = env;
765 env = s;
3b46e624 766
6f12a2a6 767 helper_frstor(ptr, data32);
d0a1ffc9
FB
768
769 env = saved_env;
770}
771
e4533c7a
FB
772#endif /* TARGET_I386 */
773
67b915a5
FB
774#if !defined(CONFIG_SOFTMMU)
775
3fb2ded1
FB
776#if defined(TARGET_I386)
777
b56dad1c 778/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
779 the effective address of the memory exception. 'is_write' is 1 if a
780 write caused the exception and otherwise 0'. 'old_set' is the
781 signal set which should be restored */
2b413144 782static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 783 int is_write, sigset_t *old_set,
bf3e8bf1 784 void *puc)
9de5e440 785{
a513fe19
FB
786 TranslationBlock *tb;
787 int ret;
68a79315 788
83479e77
FB
789 if (cpu_single_env)
790 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 791#if defined(DEBUG_SIGNAL)
5fafdf24 792 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 793 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 794#endif
25eb4484 795 /* XXX: locking issue */
53a5960a 796 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
797 return 1;
798 }
fbf9eeb3 799
3fb2ded1 800 /* see if it is an MMU fault */
6ebbf390 801 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
802 if (ret < 0)
803 return 0; /* not an MMU fault */
804 if (ret == 0)
805 return 1; /* the MMU fault was handled without causing real CPU fault */
806 /* now we have a real cpu fault */
a513fe19
FB
807 tb = tb_find_pc(pc);
808 if (tb) {
9de5e440
FB
809 /* the PC is inside the translated code. It means that we have
810 a virtual CPU fault */
bf3e8bf1 811 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 812 }
4cbf74b6 813 if (ret == 1) {
3fb2ded1 814#if 0
5fafdf24 815 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 816 env->eip, env->cr[2], env->error_code);
3fb2ded1 817#endif
4cbf74b6
FB
818 /* we restore the process signal mask as the sigreturn should
819 do it (XXX: use sigsetjmp) */
820 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 821 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
822 } else {
823 /* activate soft MMU for this block */
3f337316 824 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 825 cpu_resume_from_signal(env, puc);
4cbf74b6 826 }
3fb2ded1
FB
827 /* never comes here */
828 return 1;
829}
830
e4533c7a 831#elif defined(TARGET_ARM)
3fb2ded1 832static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
833 int is_write, sigset_t *old_set,
834 void *puc)
3fb2ded1 835{
68016c62
FB
836 TranslationBlock *tb;
837 int ret;
838
839 if (cpu_single_env)
840 env = cpu_single_env; /* XXX: find a correct solution for multithread */
841#if defined(DEBUG_SIGNAL)
5fafdf24 842 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
843 pc, address, is_write, *(unsigned long *)old_set);
844#endif
9f0777ed 845 /* XXX: locking issue */
53a5960a 846 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
847 return 1;
848 }
68016c62 849 /* see if it is an MMU fault */
6ebbf390 850 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
851 if (ret < 0)
852 return 0; /* not an MMU fault */
853 if (ret == 0)
854 return 1; /* the MMU fault was handled without causing real CPU fault */
855 /* now we have a real cpu fault */
856 tb = tb_find_pc(pc);
857 if (tb) {
858 /* the PC is inside the translated code. It means that we have
859 a virtual CPU fault */
860 cpu_restore_state(tb, env, pc, puc);
861 }
862 /* we restore the process signal mask as the sigreturn should
863 do it (XXX: use sigsetjmp) */
864 sigprocmask(SIG_SETMASK, old_set, NULL);
865 cpu_loop_exit();
968c74da
AJ
866 /* never comes here */
867 return 1;
3fb2ded1 868}
93ac68bc
FB
869#elif defined(TARGET_SPARC)
870static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
871 int is_write, sigset_t *old_set,
872 void *puc)
93ac68bc 873{
68016c62
FB
874 TranslationBlock *tb;
875 int ret;
876
877 if (cpu_single_env)
878 env = cpu_single_env; /* XXX: find a correct solution for multithread */
879#if defined(DEBUG_SIGNAL)
5fafdf24 880 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
881 pc, address, is_write, *(unsigned long *)old_set);
882#endif
b453b70b 883 /* XXX: locking issue */
53a5960a 884 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
885 return 1;
886 }
68016c62 887 /* see if it is an MMU fault */
6ebbf390 888 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
889 if (ret < 0)
890 return 0; /* not an MMU fault */
891 if (ret == 0)
892 return 1; /* the MMU fault was handled without causing real CPU fault */
893 /* now we have a real cpu fault */
894 tb = tb_find_pc(pc);
895 if (tb) {
896 /* the PC is inside the translated code. It means that we have
897 a virtual CPU fault */
898 cpu_restore_state(tb, env, pc, puc);
899 }
900 /* we restore the process signal mask as the sigreturn should
901 do it (XXX: use sigsetjmp) */
902 sigprocmask(SIG_SETMASK, old_set, NULL);
903 cpu_loop_exit();
968c74da
AJ
904 /* never comes here */
905 return 1;
93ac68bc 906}
67867308
FB
907#elif defined (TARGET_PPC)
908static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
909 int is_write, sigset_t *old_set,
910 void *puc)
67867308
FB
911{
912 TranslationBlock *tb;
ce09776b 913 int ret;
3b46e624 914
67867308
FB
915 if (cpu_single_env)
916 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 917#if defined(DEBUG_SIGNAL)
5fafdf24 918 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
919 pc, address, is_write, *(unsigned long *)old_set);
920#endif
921 /* XXX: locking issue */
53a5960a 922 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
923 return 1;
924 }
925
ce09776b 926 /* see if it is an MMU fault */
6ebbf390 927 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
928 if (ret < 0)
929 return 0; /* not an MMU fault */
930 if (ret == 0)
931 return 1; /* the MMU fault was handled without causing real CPU fault */
932
67867308
FB
933 /* now we have a real cpu fault */
934 tb = tb_find_pc(pc);
935 if (tb) {
936 /* the PC is inside the translated code. It means that we have
937 a virtual CPU fault */
bf3e8bf1 938 cpu_restore_state(tb, env, pc, puc);
67867308 939 }
ce09776b 940 if (ret == 1) {
67867308 941#if 0
5fafdf24 942 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 943 env->nip, env->error_code, tb);
67867308
FB
944#endif
945 /* we restore the process signal mask as the sigreturn should
946 do it (XXX: use sigsetjmp) */
bf3e8bf1 947 sigprocmask(SIG_SETMASK, old_set, NULL);
9fddaa0c 948 do_raise_exception_err(env->exception_index, env->error_code);
ce09776b
FB
949 } else {
950 /* activate soft MMU for this block */
fbf9eeb3 951 cpu_resume_from_signal(env, puc);
ce09776b 952 }
67867308 953 /* never comes here */
e6e5906b
PB
954 return 1;
955}
956
957#elif defined(TARGET_M68K)
958static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
959 int is_write, sigset_t *old_set,
960 void *puc)
961{
962 TranslationBlock *tb;
963 int ret;
964
965 if (cpu_single_env)
966 env = cpu_single_env; /* XXX: find a correct solution for multithread */
967#if defined(DEBUG_SIGNAL)
5fafdf24 968 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
969 pc, address, is_write, *(unsigned long *)old_set);
970#endif
971 /* XXX: locking issue */
972 if (is_write && page_unprotect(address, pc, puc)) {
973 return 1;
974 }
975 /* see if it is an MMU fault */
6ebbf390 976 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
977 if (ret < 0)
978 return 0; /* not an MMU fault */
979 if (ret == 0)
980 return 1; /* the MMU fault was handled without causing real CPU fault */
981 /* now we have a real cpu fault */
982 tb = tb_find_pc(pc);
983 if (tb) {
984 /* the PC is inside the translated code. It means that we have
985 a virtual CPU fault */
986 cpu_restore_state(tb, env, pc, puc);
987 }
988 /* we restore the process signal mask as the sigreturn should
989 do it (XXX: use sigsetjmp) */
990 sigprocmask(SIG_SETMASK, old_set, NULL);
991 cpu_loop_exit();
992 /* never comes here */
67867308
FB
993 return 1;
994}
6af0bf9c
FB
995
996#elif defined (TARGET_MIPS)
997static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
998 int is_write, sigset_t *old_set,
999 void *puc)
1000{
1001 TranslationBlock *tb;
1002 int ret;
3b46e624 1003
6af0bf9c
FB
1004 if (cpu_single_env)
1005 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1006#if defined(DEBUG_SIGNAL)
5fafdf24 1007 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
1008 pc, address, is_write, *(unsigned long *)old_set);
1009#endif
1010 /* XXX: locking issue */
53a5960a 1011 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
1012 return 1;
1013 }
1014
1015 /* see if it is an MMU fault */
6ebbf390 1016 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
1017 if (ret < 0)
1018 return 0; /* not an MMU fault */
1019 if (ret == 0)
1020 return 1; /* the MMU fault was handled without causing real CPU fault */
1021
1022 /* now we have a real cpu fault */
1023 tb = tb_find_pc(pc);
1024 if (tb) {
1025 /* the PC is inside the translated code. It means that we have
1026 a virtual CPU fault */
1027 cpu_restore_state(tb, env, pc, puc);
1028 }
1029 if (ret == 1) {
1030#if 0
5fafdf24 1031 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 1032 env->PC, env->error_code, tb);
6af0bf9c
FB
1033#endif
1034 /* we restore the process signal mask as the sigreturn should
1035 do it (XXX: use sigsetjmp) */
1036 sigprocmask(SIG_SETMASK, old_set, NULL);
1037 do_raise_exception_err(env->exception_index, env->error_code);
1038 } else {
1039 /* activate soft MMU for this block */
1040 cpu_resume_from_signal(env, puc);
1041 }
1042 /* never comes here */
1043 return 1;
1044}
1045
fdf9b3e8
FB
1046#elif defined (TARGET_SH4)
1047static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1048 int is_write, sigset_t *old_set,
1049 void *puc)
1050{
1051 TranslationBlock *tb;
1052 int ret;
3b46e624 1053
fdf9b3e8
FB
1054 if (cpu_single_env)
1055 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1056#if defined(DEBUG_SIGNAL)
5fafdf24 1057 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1058 pc, address, is_write, *(unsigned long *)old_set);
1059#endif
1060 /* XXX: locking issue */
1061 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1062 return 1;
1063 }
1064
1065 /* see if it is an MMU fault */
6ebbf390 1066 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1067 if (ret < 0)
1068 return 0; /* not an MMU fault */
1069 if (ret == 0)
1070 return 1; /* the MMU fault was handled without causing real CPU fault */
1071
1072 /* now we have a real cpu fault */
eddf68a6
JM
1073 tb = tb_find_pc(pc);
1074 if (tb) {
1075 /* the PC is inside the translated code. It means that we have
1076 a virtual CPU fault */
1077 cpu_restore_state(tb, env, pc, puc);
1078 }
1079#if 0
5fafdf24 1080 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1081 env->nip, env->error_code, tb);
1082#endif
1083 /* we restore the process signal mask as the sigreturn should
1084 do it (XXX: use sigsetjmp) */
1085 sigprocmask(SIG_SETMASK, old_set, NULL);
1086 cpu_loop_exit();
1087 /* never comes here */
1088 return 1;
1089}
1090
1091#elif defined (TARGET_ALPHA)
1092static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1093 int is_write, sigset_t *old_set,
1094 void *puc)
1095{
1096 TranslationBlock *tb;
1097 int ret;
3b46e624 1098
eddf68a6
JM
1099 if (cpu_single_env)
1100 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1101#if defined(DEBUG_SIGNAL)
5fafdf24 1102 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1103 pc, address, is_write, *(unsigned long *)old_set);
1104#endif
1105 /* XXX: locking issue */
1106 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1107 return 1;
1108 }
1109
1110 /* see if it is an MMU fault */
6ebbf390 1111 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1112 if (ret < 0)
1113 return 0; /* not an MMU fault */
1114 if (ret == 0)
1115 return 1; /* the MMU fault was handled without causing real CPU fault */
1116
1117 /* now we have a real cpu fault */
fdf9b3e8
FB
1118 tb = tb_find_pc(pc);
1119 if (tb) {
1120 /* the PC is inside the translated code. It means that we have
1121 a virtual CPU fault */
1122 cpu_restore_state(tb, env, pc, puc);
1123 }
fdf9b3e8 1124#if 0
5fafdf24 1125 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1126 env->nip, env->error_code, tb);
1127#endif
1128 /* we restore the process signal mask as the sigreturn should
1129 do it (XXX: use sigsetjmp) */
355fb23d
PB
1130 sigprocmask(SIG_SETMASK, old_set, NULL);
1131 cpu_loop_exit();
fdf9b3e8
FB
1132 /* never comes here */
1133 return 1;
1134}
f1ccf904
TS
1135#elif defined (TARGET_CRIS)
1136static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1137 int is_write, sigset_t *old_set,
1138 void *puc)
1139{
1140 TranslationBlock *tb;
1141 int ret;
1142
1143 if (cpu_single_env)
1144 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1145#if defined(DEBUG_SIGNAL)
1146 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1147 pc, address, is_write, *(unsigned long *)old_set);
1148#endif
1149 /* XXX: locking issue */
1150 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1151 return 1;
1152 }
1153
1154 /* see if it is an MMU fault */
6ebbf390 1155 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1156 if (ret < 0)
1157 return 0; /* not an MMU fault */
1158 if (ret == 0)
1159 return 1; /* the MMU fault was handled without causing real CPU fault */
1160
1161 /* now we have a real cpu fault */
1162 tb = tb_find_pc(pc);
1163 if (tb) {
1164 /* the PC is inside the translated code. It means that we have
1165 a virtual CPU fault */
1166 cpu_restore_state(tb, env, pc, puc);
1167 }
f1ccf904
TS
1168 /* we restore the process signal mask as the sigreturn should
1169 do it (XXX: use sigsetjmp) */
1170 sigprocmask(SIG_SETMASK, old_set, NULL);
1171 cpu_loop_exit();
1172 /* never comes here */
1173 return 1;
1174}
1175
e4533c7a
FB
1176#else
1177#error unsupported target CPU
1178#endif
9de5e440 1179
2b413144
FB
1180#if defined(__i386__)
1181
d8ecc0b9
FB
1182#if defined(__APPLE__)
1183# include <sys/ucontext.h>
1184
1185# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1186# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1187# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1188#else
1189# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1190# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1191# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1192#endif
1193
5fafdf24 1194int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1195 void *puc)
9de5e440 1196{
5a7b542b 1197 siginfo_t *info = pinfo;
9de5e440
FB
1198 struct ucontext *uc = puc;
1199 unsigned long pc;
bf3e8bf1 1200 int trapno;
97eb5b14 1201
d691f669
FB
1202#ifndef REG_EIP
1203/* for glibc 2.1 */
fd6ce8f6
FB
1204#define REG_EIP EIP
1205#define REG_ERR ERR
1206#define REG_TRAPNO TRAPNO
d691f669 1207#endif
d8ecc0b9
FB
1208 pc = EIP_sig(uc);
1209 trapno = TRAP_sig(uc);
ec6338ba
FB
1210 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1211 trapno == 0xe ?
1212 (ERROR_sig(uc) >> 1) & 1 : 0,
1213 &uc->uc_sigmask, puc);
2b413144
FB
1214}
1215
bc51c5c9
FB
1216#elif defined(__x86_64__)
1217
5a7b542b 1218int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1219 void *puc)
1220{
5a7b542b 1221 siginfo_t *info = pinfo;
bc51c5c9
FB
1222 struct ucontext *uc = puc;
1223 unsigned long pc;
1224
1225 pc = uc->uc_mcontext.gregs[REG_RIP];
5fafdf24
TS
1226 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1227 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
bc51c5c9
FB
1228 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1229 &uc->uc_sigmask, puc);
1230}
1231
83fb7adf 1232#elif defined(__powerpc__)
2b413144 1233
83fb7adf
FB
1234/***********************************************************************
1235 * signal context platform-specific definitions
1236 * From Wine
1237 */
1238#ifdef linux
1239/* All Registers access - only for local access */
1240# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1241/* Gpr Registers access */
1242# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1243# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1244# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1245# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1246# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1247# define LR_sig(context) REG_sig(link, context) /* Link register */
1248# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1249/* Float Registers access */
1250# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1251# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1252/* Exception Registers access */
1253# define DAR_sig(context) REG_sig(dar, context)
1254# define DSISR_sig(context) REG_sig(dsisr, context)
1255# define TRAP_sig(context) REG_sig(trap, context)
1256#endif /* linux */
1257
1258#ifdef __APPLE__
1259# include <sys/ucontext.h>
1260typedef struct ucontext SIGCONTEXT;
1261/* All Registers access - only for local access */
1262# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1263# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1264# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1265# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1266/* Gpr Registers access */
1267# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1268# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1269# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1270# define CTR_sig(context) REG_sig(ctr, context)
1271# define XER_sig(context) REG_sig(xer, context) /* Link register */
1272# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1273# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1274/* Float Registers access */
1275# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1276# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1277/* Exception Registers access */
1278# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1279# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1280# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1281#endif /* __APPLE__ */
1282
5fafdf24 1283int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1284 void *puc)
2b413144 1285{
5a7b542b 1286 siginfo_t *info = pinfo;
25eb4484 1287 struct ucontext *uc = puc;
25eb4484 1288 unsigned long pc;
25eb4484
FB
1289 int is_write;
1290
83fb7adf 1291 pc = IAR_sig(uc);
25eb4484
FB
1292 is_write = 0;
1293#if 0
1294 /* ppc 4xx case */
83fb7adf 1295 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1296 is_write = 1;
1297#else
83fb7adf 1298 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1299 is_write = 1;
1300#endif
5fafdf24 1301 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1302 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1303}
1304
2f87c607
FB
1305#elif defined(__alpha__)
1306
5fafdf24 1307int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1308 void *puc)
1309{
5a7b542b 1310 siginfo_t *info = pinfo;
2f87c607
FB
1311 struct ucontext *uc = puc;
1312 uint32_t *pc = uc->uc_mcontext.sc_pc;
1313 uint32_t insn = *pc;
1314 int is_write = 0;
1315
8c6939c0 1316 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1317 switch (insn >> 26) {
1318 case 0x0d: // stw
1319 case 0x0e: // stb
1320 case 0x0f: // stq_u
1321 case 0x24: // stf
1322 case 0x25: // stg
1323 case 0x26: // sts
1324 case 0x27: // stt
1325 case 0x2c: // stl
1326 case 0x2d: // stq
1327 case 0x2e: // stl_c
1328 case 0x2f: // stq_c
1329 is_write = 1;
1330 }
1331
5fafdf24 1332 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1333 is_write, &uc->uc_sigmask, puc);
2f87c607 1334}
8c6939c0
FB
1335#elif defined(__sparc__)
1336
5fafdf24 1337int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1338 void *puc)
8c6939c0 1339{
5a7b542b 1340 siginfo_t *info = pinfo;
8c6939c0
FB
1341 int is_write;
1342 uint32_t insn;
6b4c11cd 1343#if !defined(__arch64__) || defined(HOST_SOLARIS)
c9e1e2b0
BS
1344 uint32_t *regs = (uint32_t *)(info + 1);
1345 void *sigmask = (regs + 20);
8c6939c0 1346 /* XXX: is there a standard glibc define ? */
c9e1e2b0
BS
1347 unsigned long pc = regs[1];
1348#else
84778508 1349#ifdef __linux__
c9e1e2b0
BS
1350 struct sigcontext *sc = puc;
1351 unsigned long pc = sc->sigc_regs.tpc;
1352 void *sigmask = (void *)sc->sigc_mask;
84778508
BS
1353#elif defined(__OpenBSD__)
1354 struct sigcontext *uc = puc;
1355 unsigned long pc = uc->sc_pc;
1356 void *sigmask = (void *)(long)uc->sc_mask;
1357#endif
c9e1e2b0
BS
1358#endif
1359
8c6939c0
FB
1360 /* XXX: need kernel patch to get write flag faster */
1361 is_write = 0;
1362 insn = *(uint32_t *)pc;
1363 if ((insn >> 30) == 3) {
1364 switch((insn >> 19) & 0x3f) {
1365 case 0x05: // stb
1366 case 0x06: // sth
1367 case 0x04: // st
1368 case 0x07: // std
1369 case 0x24: // stf
1370 case 0x27: // stdf
1371 case 0x25: // stfsr
1372 is_write = 1;
1373 break;
1374 }
1375 }
5fafdf24 1376 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1377 is_write, sigmask, NULL);
8c6939c0
FB
1378}
1379
1380#elif defined(__arm__)
1381
5fafdf24 1382int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1383 void *puc)
8c6939c0 1384{
5a7b542b 1385 siginfo_t *info = pinfo;
8c6939c0
FB
1386 struct ucontext *uc = puc;
1387 unsigned long pc;
1388 int is_write;
3b46e624 1389
48bbf11b 1390#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
5c49b363
AZ
1391 pc = uc->uc_mcontext.gregs[R15];
1392#else
4eee57f5 1393 pc = uc->uc_mcontext.arm_pc;
5c49b363 1394#endif
8c6939c0
FB
1395 /* XXX: compute is_write */
1396 is_write = 0;
5fafdf24 1397 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1398 is_write,
f3a9676a 1399 &uc->uc_sigmask, puc);
8c6939c0
FB
1400}
1401
38e584a0
FB
1402#elif defined(__mc68000)
1403
5fafdf24 1404int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1405 void *puc)
1406{
5a7b542b 1407 siginfo_t *info = pinfo;
38e584a0
FB
1408 struct ucontext *uc = puc;
1409 unsigned long pc;
1410 int is_write;
3b46e624 1411
38e584a0
FB
1412 pc = uc->uc_mcontext.gregs[16];
1413 /* XXX: compute is_write */
1414 is_write = 0;
5fafdf24 1415 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1416 is_write,
bf3e8bf1 1417 &uc->uc_sigmask, puc);
38e584a0
FB
1418}
1419
b8076a74
FB
1420#elif defined(__ia64)
1421
1422#ifndef __ISR_VALID
1423 /* This ought to be in <bits/siginfo.h>... */
1424# define __ISR_VALID 1
b8076a74
FB
1425#endif
1426
5a7b542b 1427int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1428{
5a7b542b 1429 siginfo_t *info = pinfo;
b8076a74
FB
1430 struct ucontext *uc = puc;
1431 unsigned long ip;
1432 int is_write = 0;
1433
1434 ip = uc->uc_mcontext.sc_ip;
1435 switch (host_signum) {
1436 case SIGILL:
1437 case SIGFPE:
1438 case SIGSEGV:
1439 case SIGBUS:
1440 case SIGTRAP:
fd4a43e4 1441 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1442 /* ISR.W (write-access) is bit 33: */
1443 is_write = (info->si_isr >> 33) & 1;
1444 break;
1445
1446 default:
1447 break;
1448 }
1449 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1450 is_write,
1451 &uc->uc_sigmask, puc);
1452}
1453
90cb9493
FB
1454#elif defined(__s390__)
1455
5fafdf24 1456int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1457 void *puc)
1458{
5a7b542b 1459 siginfo_t *info = pinfo;
90cb9493
FB
1460 struct ucontext *uc = puc;
1461 unsigned long pc;
1462 int is_write;
3b46e624 1463
90cb9493
FB
1464 pc = uc->uc_mcontext.psw.addr;
1465 /* XXX: compute is_write */
1466 is_write = 0;
5fafdf24 1467 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1468 is_write, &uc->uc_sigmask, puc);
1469}
1470
1471#elif defined(__mips__)
1472
5fafdf24 1473int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1474 void *puc)
1475{
9617efe8 1476 siginfo_t *info = pinfo;
c4b89d18
TS
1477 struct ucontext *uc = puc;
1478 greg_t pc = uc->uc_mcontext.pc;
1479 int is_write;
3b46e624 1480
c4b89d18
TS
1481 /* XXX: compute is_write */
1482 is_write = 0;
5fafdf24 1483 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1484 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1485}
1486
f54b3f92
AJ
1487#elif defined(__hppa__)
1488
1489int cpu_signal_handler(int host_signum, void *pinfo,
1490 void *puc)
1491{
1492 struct siginfo *info = pinfo;
1493 struct ucontext *uc = puc;
1494 unsigned long pc;
1495 int is_write;
1496
1497 pc = uc->uc_mcontext.sc_iaoq[0];
1498 /* FIXME: compute is_write */
1499 is_write = 0;
1500 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1501 is_write,
1502 &uc->uc_sigmask, puc);
1503}
1504
9de5e440 1505#else
2b413144 1506
3fb2ded1 1507#error host CPU specific signal handler needed
2b413144 1508
9de5e440 1509#endif
67b915a5
FB
1510
1511#endif /* !defined(CONFIG_SOFTMMU) */
This page took 0.400213 seconds and 4 git commands to generate.