]> Git Repo - qemu.git/blame - cpu-exec.c
cpu: Move icount_decr field from CPU_COMMON to CPUState
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
7cb69cae 22#include "tcg.h"
1de7afc9 23#include "qemu/atomic.h"
9c17d615 24#include "sysemu/qtest.h"
7d13299d 25
9349b4f9 26void cpu_loop_exit(CPUArchState *env)
e4533c7a 27{
d77953b9
AF
28 CPUState *cpu = ENV_GET_CPU(env);
29
30 cpu->current_tb = NULL;
6ab7e546 31 siglongjmp(env->jmp_env, 1);
e4533c7a 32}
bfed01fc 33
fbf9eeb3
FB
34/* exit the current TB from a signal handler. The host registers are
35 restored in a state compatible with the CPU emulator
36 */
9eff14f3 37#if defined(CONFIG_SOFTMMU)
9349b4f9 38void cpu_resume_from_signal(CPUArchState *env, void *puc)
9eff14f3 39{
9eff14f3
BS
40 /* XXX: restore cpu registers saved in host registers */
41
42 env->exception_index = -1;
6ab7e546 43 siglongjmp(env->jmp_env, 1);
9eff14f3 44}
9eff14f3 45#endif
fbf9eeb3 46
77211379
PM
47/* Execute a TB, and fix up the CPU state afterwards if necessary */
48static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
49{
50 CPUArchState *env = cpu->env_ptr;
03afa5f8
RH
51 uintptr_t next_tb;
52
53#if defined(DEBUG_DISAS)
54 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
55#if defined(TARGET_I386)
56 log_cpu_state(cpu, CPU_DUMP_CCOP);
57#elif defined(TARGET_M68K)
58 /* ??? Should not modify env state for dumping. */
59 cpu_m68k_flush_flags(env, env->cc_op);
60 env->cc_op = CC_OP_FLAGS;
61 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
62 log_cpu_state(cpu, 0);
63#else
64 log_cpu_state(cpu, 0);
65#endif
66 }
67#endif /* DEBUG_DISAS */
68
69 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
77211379
PM
70 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
71 /* We didn't start executing this TB (eg because the instruction
72 * counter hit zero); we must restore the guest PC to the address
73 * of the start of the TB.
74 */
bdf7ae5b 75 CPUClass *cc = CPU_GET_CLASS(cpu);
77211379 76 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
bdf7ae5b
AF
77 if (cc->synchronize_from_tb) {
78 cc->synchronize_from_tb(cpu, tb);
79 } else {
80 assert(cc->set_pc);
81 cc->set_pc(cpu, tb->pc);
82 }
77211379 83 }
378df4b2
PM
84 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
85 /* We were asked to stop executing TBs (probably a pending
86 * interrupt. We've now stopped, so clear the flag.
87 */
88 cpu->tcg_exit_req = 0;
89 }
77211379
PM
90 return next_tb;
91}
92
2e70f6ef
PB
93/* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
9349b4f9 95static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
cea5f9a2 96 TranslationBlock *orig_tb)
2e70f6ef 97{
d77953b9 98 CPUState *cpu = ENV_GET_CPU(env);
2e70f6ef
PB
99 TranslationBlock *tb;
100
101 /* Should never happen.
102 We only end up here when an existing TB is too long. */
103 if (max_cycles > CF_COUNT_MASK)
104 max_cycles = CF_COUNT_MASK;
105
106 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
107 max_cycles);
d77953b9 108 cpu->current_tb = tb;
2e70f6ef 109 /* execute the generated code */
77211379 110 cpu_tb_exec(cpu, tb->tc_ptr);
d77953b9 111 cpu->current_tb = NULL;
2e70f6ef
PB
112 tb_phys_invalidate(tb, -1);
113 tb_free(tb);
114}
115
9349b4f9 116static TranslationBlock *tb_find_slow(CPUArchState *env,
cea5f9a2 117 target_ulong pc,
8a40a180 118 target_ulong cs_base,
c068688b 119 uint64_t flags)
8a40a180
FB
120{
121 TranslationBlock *tb, **ptb1;
8a40a180 122 unsigned int h;
337fc758 123 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 124 target_ulong virt_page2;
3b46e624 125
5e5f07e0 126 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
3b46e624 127
8a40a180 128 /* find translated block using physical mappings */
41c1b1c9 129 phys_pc = get_page_addr_code(env, pc);
8a40a180 130 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180 131 h = tb_phys_hash_func(phys_pc);
5e5f07e0 132 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
8a40a180
FB
133 for(;;) {
134 tb = *ptb1;
135 if (!tb)
136 goto not_found;
5fafdf24 137 if (tb->pc == pc &&
8a40a180 138 tb->page_addr[0] == phys_page1 &&
5fafdf24 139 tb->cs_base == cs_base &&
8a40a180
FB
140 tb->flags == flags) {
141 /* check next page if needed */
142 if (tb->page_addr[1] != -1) {
337fc758
BS
143 tb_page_addr_t phys_page2;
144
5fafdf24 145 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 146 TARGET_PAGE_SIZE;
41c1b1c9 147 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
148 if (tb->page_addr[1] == phys_page2)
149 goto found;
150 } else {
151 goto found;
152 }
153 }
154 ptb1 = &tb->phys_hash_next;
155 }
156 not_found:
2e70f6ef
PB
157 /* if no translated code available, then translate it now */
158 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 159
8a40a180 160 found:
2c90fe2b
KB
161 /* Move the last found TB to the head of the list */
162 if (likely(*ptb1)) {
163 *ptb1 = tb->phys_hash_next;
5e5f07e0
EV
164 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
165 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
2c90fe2b 166 }
8a40a180
FB
167 /* we add the TB in the virtual pc hash table */
168 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
169 return tb;
170}
171
9349b4f9 172static inline TranslationBlock *tb_find_fast(CPUArchState *env)
8a40a180
FB
173{
174 TranslationBlock *tb;
175 target_ulong cs_base, pc;
6b917547 176 int flags;
8a40a180
FB
177
178 /* we record a subset of the CPU state. It will
179 always be the same before a given translated block
180 is executed. */
6b917547 181 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 182 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
183 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
184 tb->flags != flags)) {
cea5f9a2 185 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
186 }
187 return tb;
188}
189
1009d2ed
JK
190static CPUDebugExcpHandler *debug_excp_handler;
191
84e3b602 192void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
1009d2ed 193{
1009d2ed 194 debug_excp_handler = handler;
1009d2ed
JK
195}
196
9349b4f9 197static void cpu_handle_debug_exception(CPUArchState *env)
1009d2ed
JK
198{
199 CPUWatchpoint *wp;
200
201 if (!env->watchpoint_hit) {
202 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
203 wp->flags &= ~BP_WATCHPOINT_HIT;
204 }
205 }
206 if (debug_excp_handler) {
207 debug_excp_handler(env);
208 }
209}
210
7d13299d
FB
211/* main execution loop */
212
1a28cac3
MT
213volatile sig_atomic_t exit_request;
214
9349b4f9 215int cpu_exec(CPUArchState *env)
7d13299d 216{
c356a1bc 217 CPUState *cpu = ENV_GET_CPU(env);
97a8ea5a
AF
218#if !(defined(CONFIG_USER_ONLY) && \
219 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
220 CPUClass *cc = CPU_GET_CLASS(cpu);
693fa551
AF
221#endif
222#ifdef TARGET_I386
223 X86CPU *x86_cpu = X86_CPU(cpu);
97a8ea5a 224#endif
8a40a180 225 int ret, interrupt_request;
8a40a180 226 TranslationBlock *tb;
c27004ec 227 uint8_t *tc_ptr;
3e9bd63a 228 uintptr_t next_tb;
8c6939c0 229
259186a7 230 if (cpu->halted) {
3993c6bd 231 if (!cpu_has_work(cpu)) {
eda48c34
PB
232 return EXCP_HALTED;
233 }
234
259186a7 235 cpu->halted = 0;
eda48c34 236 }
5a1e3cfc 237
4917cf44 238 current_cpu = cpu;
e4533c7a 239
4917cf44 240 /* As long as current_cpu is null, up to the assignment just above,
ec9bd89f
OH
241 * requests by other threads to exit the execution loop are expected to
242 * be issued using the exit_request global. We must make sure that our
4917cf44 243 * evaluation of the global value is performed past the current_cpu
ec9bd89f
OH
244 * value transition point, which requires a memory barrier as well as
245 * an instruction scheduling constraint on modern architectures. */
246 smp_mb();
247
c629a4bc 248 if (unlikely(exit_request)) {
fcd7d003 249 cpu->exit_request = 1;
1a28cac3
MT
250 }
251
ecb644f4 252#if defined(TARGET_I386)
6792a57b
JK
253 /* put eflags in CPU temporary format */
254 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
80cf2c81 255 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
6792a57b
JK
256 CC_OP = CC_OP_EFLAGS;
257 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 258#elif defined(TARGET_SPARC)
e6e5906b
PB
259#elif defined(TARGET_M68K)
260 env->cc_op = CC_OP_FLAGS;
261 env->cc_dest = env->sr & 0xf;
262 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
263#elif defined(TARGET_ALPHA)
264#elif defined(TARGET_ARM)
d2fbca94 265#elif defined(TARGET_UNICORE32)
ecb644f4 266#elif defined(TARGET_PPC)
4e85f82c 267 env->reserve_addr = -1;
81ea0e13 268#elif defined(TARGET_LM32)
b779e29e 269#elif defined(TARGET_MICROBLAZE)
6af0bf9c 270#elif defined(TARGET_MIPS)
d15a9c23 271#elif defined(TARGET_MOXIE)
e67db06e 272#elif defined(TARGET_OPENRISC)
fdf9b3e8 273#elif defined(TARGET_SH4)
f1ccf904 274#elif defined(TARGET_CRIS)
10ec5117 275#elif defined(TARGET_S390X)
2328826b 276#elif defined(TARGET_XTENSA)
fdf9b3e8 277 /* XXXXX */
e4533c7a
FB
278#else
279#error unsupported target CPU
280#endif
3fb2ded1 281 env->exception_index = -1;
9d27abd9 282
7d13299d 283 /* prepare setjmp context for exception handling */
3fb2ded1 284 for(;;) {
6ab7e546 285 if (sigsetjmp(env->jmp_env, 0) == 0) {
3fb2ded1
FB
286 /* if an exception is pending, we execute it here */
287 if (env->exception_index >= 0) {
288 if (env->exception_index >= EXCP_INTERRUPT) {
289 /* exit request from the cpu execution loop */
290 ret = env->exception_index;
1009d2ed
JK
291 if (ret == EXCP_DEBUG) {
292 cpu_handle_debug_exception(env);
293 }
3fb2ded1 294 break;
72d239ed
AJ
295 } else {
296#if defined(CONFIG_USER_ONLY)
3fb2ded1 297 /* if user mode only, we simulate a fake exception
9f083493 298 which will be handled outside the cpu execution
3fb2ded1 299 loop */
83479e77 300#if defined(TARGET_I386)
97a8ea5a 301 cc->do_interrupt(cpu);
83479e77 302#endif
3fb2ded1
FB
303 ret = env->exception_index;
304 break;
72d239ed 305#else
97a8ea5a 306 cc->do_interrupt(cpu);
301d2908 307 env->exception_index = -1;
83479e77 308#endif
3fb2ded1 309 }
5fafdf24 310 }
9df217a3 311
b5fc09ae 312 next_tb = 0; /* force lookup of first TB */
3fb2ded1 313 for(;;) {
259186a7 314 interrupt_request = cpu->interrupt_request;
e1638bd8 315 if (unlikely(interrupt_request)) {
ed2803da 316 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
e1638bd8 317 /* Mask out external interrupts for this step. */
3125f763 318 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 319 }
6658ffb8 320 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
259186a7 321 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
6658ffb8 322 env->exception_index = EXCP_DEBUG;
1162c041 323 cpu_loop_exit(env);
6658ffb8 324 }
a90b7318 325#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 326 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 327 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318 328 if (interrupt_request & CPU_INTERRUPT_HALT) {
259186a7
AF
329 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
330 cpu->halted = 1;
a90b7318 331 env->exception_index = EXCP_HLT;
1162c041 332 cpu_loop_exit(env);
a90b7318
AZ
333 }
334#endif
68a79315 335#if defined(TARGET_I386)
5d62c43a
JK
336#if !defined(CONFIG_USER_ONLY)
337 if (interrupt_request & CPU_INTERRUPT_POLL) {
259186a7 338 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
693fa551 339 apic_poll_irq(x86_cpu->apic_state);
5d62c43a
JK
340 }
341#endif
b09ea7d5 342 if (interrupt_request & CPU_INTERRUPT_INIT) {
77b2bc2c
BS
343 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
344 0);
693fa551 345 do_cpu_init(x86_cpu);
b09ea7d5 346 env->exception_index = EXCP_HALTED;
1162c041 347 cpu_loop_exit(env);
b09ea7d5 348 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
693fa551 349 do_cpu_sipi(x86_cpu);
b09ea7d5 350 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
351 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
352 !(env->hflags & HF_SMM_MASK)) {
77b2bc2c
BS
353 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
354 0);
259186a7 355 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
693fa551 356 do_smm_enter(x86_cpu);
db620f46
FB
357 next_tb = 0;
358 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
359 !(env->hflags2 & HF2_NMI_MASK)) {
259186a7 360 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
db620f46 361 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 362 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 363 next_tb = 0;
e965fc38 364 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
259186a7 365 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 366 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 367 next_tb = 0;
db620f46
FB
368 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
369 (((env->hflags2 & HF2_VINTR_MASK) &&
370 (env->hflags2 & HF2_HIF_MASK)) ||
371 (!(env->hflags2 & HF2_VINTR_MASK) &&
372 (env->eflags & IF_MASK &&
373 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
374 int intno;
77b2bc2c
BS
375 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
376 0);
259186a7
AF
377 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
378 CPU_INTERRUPT_VIRQ);
db620f46 379 intno = cpu_get_pic_interrupt(env);
4f213879 380 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
381 do_interrupt_x86_hardirq(env, intno, 1);
382 /* ensure that no TB jump will be modified as
383 the program flow was changed */
384 next_tb = 0;
0573fbfc 385#if !defined(CONFIG_USER_ONLY)
db620f46
FB
386 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
387 (env->eflags & IF_MASK) &&
388 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
389 int intno;
390 /* FIXME: this should respect TPR */
77b2bc2c
BS
391 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
392 0);
fdfba1a2
EI
393 intno = ldl_phys(cpu->as,
394 env->vm_vmcb
395 + offsetof(struct vmcb,
396 control.int_vector));
93fcfe39 397 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 398 do_interrupt_x86_hardirq(env, intno, 1);
259186a7 399 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 400 next_tb = 0;
907a5b26 401#endif
db620f46 402 }
68a79315 403 }
ce09776b 404#elif defined(TARGET_PPC)
9fddaa0c 405 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
c356a1bc 406 cpu_reset(cpu);
9fddaa0c 407 }
47103572 408 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c 409 ppc_hw_interrupt(env);
259186a7
AF
410 if (env->pending_interrupts == 0) {
411 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
412 }
b5fc09ae 413 next_tb = 0;
ce09776b 414 }
81ea0e13
MW
415#elif defined(TARGET_LM32)
416 if ((interrupt_request & CPU_INTERRUPT_HARD)
417 && (env->ie & IE_IE)) {
418 env->exception_index = EXCP_IRQ;
97a8ea5a 419 cc->do_interrupt(cpu);
81ea0e13
MW
420 next_tb = 0;
421 }
b779e29e
EI
422#elif defined(TARGET_MICROBLAZE)
423 if ((interrupt_request & CPU_INTERRUPT_HARD)
424 && (env->sregs[SR_MSR] & MSR_IE)
425 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
426 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
427 env->exception_index = EXCP_IRQ;
97a8ea5a 428 cc->do_interrupt(cpu);
b779e29e
EI
429 next_tb = 0;
430 }
6af0bf9c
FB
431#elif defined(TARGET_MIPS)
432 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 433 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c
FB
434 /* Raise it */
435 env->exception_index = EXCP_EXT_INTERRUPT;
436 env->error_code = 0;
97a8ea5a 437 cc->do_interrupt(cpu);
b5fc09ae 438 next_tb = 0;
6af0bf9c 439 }
b6a71ef7
JL
440#elif defined(TARGET_OPENRISC)
441 {
442 int idx = -1;
443 if ((interrupt_request & CPU_INTERRUPT_HARD)
444 && (env->sr & SR_IEE)) {
445 idx = EXCP_INT;
446 }
447 if ((interrupt_request & CPU_INTERRUPT_TIMER)
448 && (env->sr & SR_TEE)) {
449 idx = EXCP_TICK;
450 }
451 if (idx >= 0) {
452 env->exception_index = idx;
97a8ea5a 453 cc->do_interrupt(cpu);
b6a71ef7
JL
454 next_tb = 0;
455 }
456 }
e95c8d51 457#elif defined(TARGET_SPARC)
d532b26c
IK
458 if (interrupt_request & CPU_INTERRUPT_HARD) {
459 if (cpu_interrupts_enabled(env) &&
460 env->interrupt_index > 0) {
461 int pil = env->interrupt_index & 0xf;
462 int type = env->interrupt_index & 0xf0;
463
464 if (((type == TT_EXTINT) &&
465 cpu_pil_allowed(env, pil)) ||
466 type != TT_EXTINT) {
467 env->exception_index = env->interrupt_index;
97a8ea5a 468 cc->do_interrupt(cpu);
d532b26c
IK
469 next_tb = 0;
470 }
471 }
e965fc38 472 }
b5ff1b31
FB
473#elif defined(TARGET_ARM)
474 if (interrupt_request & CPU_INTERRUPT_FIQ
4cc35614 475 && !(env->daif & PSTATE_F)) {
b5ff1b31 476 env->exception_index = EXCP_FIQ;
97a8ea5a 477 cc->do_interrupt(cpu);
b5fc09ae 478 next_tb = 0;
b5ff1b31 479 }
9ee6e8bb
PB
480 /* ARMv7-M interrupt return works by loading a magic value
481 into the PC. On real hardware the load causes the
482 return to occur. The qemu implementation performs the
483 jump normally, then does the exception return when the
484 CPU tries to execute code at the magic address.
485 This will cause the magic PC value to be pushed to
a1c7273b 486 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
487 We avoid this by disabling interrupts when
488 pc contains a magic address. */
b5ff1b31 489 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb 490 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
4cc35614 491 || !(env->daif & PSTATE_I))) {
b5ff1b31 492 env->exception_index = EXCP_IRQ;
97a8ea5a 493 cc->do_interrupt(cpu);
b5fc09ae 494 next_tb = 0;
b5ff1b31 495 }
d2fbca94
GX
496#elif defined(TARGET_UNICORE32)
497 if (interrupt_request & CPU_INTERRUPT_HARD
498 && !(env->uncached_asr & ASR_I)) {
d48813dd 499 env->exception_index = UC32_EXCP_INTR;
97a8ea5a 500 cc->do_interrupt(cpu);
d2fbca94
GX
501 next_tb = 0;
502 }
fdf9b3e8 503#elif defined(TARGET_SH4)
e96e2044 504 if (interrupt_request & CPU_INTERRUPT_HARD) {
97a8ea5a 505 cc->do_interrupt(cpu);
b5fc09ae 506 next_tb = 0;
e96e2044 507 }
eddf68a6 508#elif defined(TARGET_ALPHA)
6a80e088
RH
509 {
510 int idx = -1;
511 /* ??? This hard-codes the OSF/1 interrupt levels. */
e965fc38 512 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
6a80e088
RH
513 case 0 ... 3:
514 if (interrupt_request & CPU_INTERRUPT_HARD) {
515 idx = EXCP_DEV_INTERRUPT;
516 }
517 /* FALLTHRU */
518 case 4:
519 if (interrupt_request & CPU_INTERRUPT_TIMER) {
520 idx = EXCP_CLK_INTERRUPT;
521 }
522 /* FALLTHRU */
523 case 5:
524 if (interrupt_request & CPU_INTERRUPT_SMP) {
525 idx = EXCP_SMP_INTERRUPT;
526 }
527 /* FALLTHRU */
528 case 6:
529 if (interrupt_request & CPU_INTERRUPT_MCHK) {
530 idx = EXCP_MCHK;
531 }
532 }
533 if (idx >= 0) {
534 env->exception_index = idx;
535 env->error_code = 0;
97a8ea5a 536 cc->do_interrupt(cpu);
6a80e088
RH
537 next_tb = 0;
538 }
eddf68a6 539 }
f1ccf904 540#elif defined(TARGET_CRIS)
1b1a38b0 541 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
542 && (env->pregs[PR_CCS] & I_FLAG)
543 && !env->locked_irq) {
1b1a38b0 544 env->exception_index = EXCP_IRQ;
97a8ea5a 545 cc->do_interrupt(cpu);
1b1a38b0
EI
546 next_tb = 0;
547 }
8219314b
LP
548 if (interrupt_request & CPU_INTERRUPT_NMI) {
549 unsigned int m_flag_archval;
550 if (env->pregs[PR_VR] < 32) {
551 m_flag_archval = M_FLAG_V10;
552 } else {
553 m_flag_archval = M_FLAG_V32;
554 }
555 if ((env->pregs[PR_CCS] & m_flag_archval)) {
556 env->exception_index = EXCP_NMI;
97a8ea5a 557 cc->do_interrupt(cpu);
8219314b
LP
558 next_tb = 0;
559 }
f1ccf904 560 }
0633879f
PB
561#elif defined(TARGET_M68K)
562 if (interrupt_request & CPU_INTERRUPT_HARD
563 && ((env->sr & SR_I) >> SR_I_SHIFT)
564 < env->pending_level) {
565 /* Real hardware gets the interrupt vector via an
566 IACK cycle at this point. Current emulated
567 hardware doesn't rely on this, so we
568 provide/save the vector when the interrupt is
569 first signalled. */
570 env->exception_index = env->pending_vector;
3c688828 571 do_interrupt_m68k_hardirq(env);
b5fc09ae 572 next_tb = 0;
0633879f 573 }
3110e292
AG
574#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
575 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
576 (env->psw.mask & PSW_MASK_EXT)) {
97a8ea5a 577 cc->do_interrupt(cpu);
3110e292
AG
578 next_tb = 0;
579 }
40643d7c
MF
580#elif defined(TARGET_XTENSA)
581 if (interrupt_request & CPU_INTERRUPT_HARD) {
582 env->exception_index = EXC_IRQ;
97a8ea5a 583 cc->do_interrupt(cpu);
40643d7c
MF
584 next_tb = 0;
585 }
68a79315 586#endif
ff2712ba 587 /* Don't use the cached interrupt_request value,
9d05095e 588 do_interrupt may have updated the EXITTB flag. */
259186a7
AF
589 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
590 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bf3e8bf1
FB
591 /* ensure that no TB jump will be modified as
592 the program flow was changed */
b5fc09ae 593 next_tb = 0;
bf3e8bf1 594 }
be214e6c 595 }
fcd7d003
AF
596 if (unlikely(cpu->exit_request)) {
597 cpu->exit_request = 0;
be214e6c 598 env->exception_index = EXCP_INTERRUPT;
1162c041 599 cpu_loop_exit(env);
3fb2ded1 600 }
5e5f07e0 601 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
cea5f9a2 602 tb = tb_find_fast(env);
d5975363
PB
603 /* Note: we do it here to avoid a gcc bug on Mac OS X when
604 doing it in tb_find_slow */
5e5f07e0 605 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
d5975363
PB
606 /* as some TB could have been invalidated because
607 of memory exceptions while generating the code, we
608 must recompute the hash index here */
609 next_tb = 0;
5e5f07e0 610 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
d5975363 611 }
c30d1aea
PM
612 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
613 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
614 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
615 }
8a40a180
FB
616 /* see if we can patch the calling TB. When the TB
617 spans two pages, we cannot safely do a direct
618 jump. */
040f2fb2 619 if (next_tb != 0 && tb->page_addr[1] == -1) {
0980011b
PM
620 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
621 next_tb & TB_EXIT_MASK, tb);
3fb2ded1 622 }
5e5f07e0 623 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
55e8b85e 624
625 /* cpu_interrupt might be called while translating the
626 TB, but before it is linked into a potentially
627 infinite loop and becomes env->current_tb. Avoid
628 starting execution if there is a pending interrupt. */
d77953b9 629 cpu->current_tb = tb;
b0052d15 630 barrier();
fcd7d003 631 if (likely(!cpu->exit_request)) {
2e70f6ef 632 tc_ptr = tb->tc_ptr;
e965fc38 633 /* execute the generated code */
77211379 634 next_tb = cpu_tb_exec(cpu, tc_ptr);
378df4b2
PM
635 switch (next_tb & TB_EXIT_MASK) {
636 case TB_EXIT_REQUESTED:
637 /* Something asked us to stop executing
638 * chained TBs; just continue round the main
639 * loop. Whatever requested the exit will also
640 * have set something else (eg exit_request or
641 * interrupt_request) which we will handle
642 * next time around the loop.
643 */
644 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
645 next_tb = 0;
646 break;
647 case TB_EXIT_ICOUNT_EXPIRED:
648 {
bf20dc07 649 /* Instruction counter expired. */
2e70f6ef 650 int insns_left;
0980011b 651 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
28ecfd7a 652 insns_left = cpu->icount_decr.u32;
efee7340 653 if (cpu->icount_extra && insns_left >= 0) {
2e70f6ef 654 /* Refill decrementer and continue execution. */
efee7340
AF
655 cpu->icount_extra += insns_left;
656 if (cpu->icount_extra > 0xffff) {
2e70f6ef
PB
657 insns_left = 0xffff;
658 } else {
efee7340 659 insns_left = cpu->icount_extra;
2e70f6ef 660 }
efee7340 661 cpu->icount_extra -= insns_left;
28ecfd7a 662 cpu->icount_decr.u16.low = insns_left;
2e70f6ef
PB
663 } else {
664 if (insns_left > 0) {
665 /* Execute remaining instructions. */
cea5f9a2 666 cpu_exec_nocache(env, insns_left, tb);
2e70f6ef
PB
667 }
668 env->exception_index = EXCP_INTERRUPT;
669 next_tb = 0;
1162c041 670 cpu_loop_exit(env);
2e70f6ef 671 }
378df4b2
PM
672 break;
673 }
674 default:
675 break;
2e70f6ef
PB
676 }
677 }
d77953b9 678 cpu->current_tb = NULL;
4cbf74b6
FB
679 /* reset soft MMU for next block (it can currently
680 only be set by a memory fault) */
50a518e3 681 } /* for(;;) */
0d101938
JK
682 } else {
683 /* Reload env after longjmp - the compiler may have smashed all
684 * local variables as longjmp is marked 'noreturn'. */
4917cf44
AF
685 cpu = current_cpu;
686 env = cpu->env_ptr;
6c78f29a
JL
687#if !(defined(CONFIG_USER_ONLY) && \
688 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
689 cc = CPU_GET_CLASS(cpu);
693fa551
AF
690#endif
691#ifdef TARGET_I386
692 x86_cpu = X86_CPU(cpu);
6c78f29a 693#endif
7d13299d 694 }
3fb2ded1
FB
695 } /* for(;;) */
696
7d13299d 697
e4533c7a 698#if defined(TARGET_I386)
9de5e440 699 /* restore flags in standard format */
e694d4e2 700 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
80cf2c81 701 | (env->df & DF_MASK);
e4533c7a 702#elif defined(TARGET_ARM)
b7bcbe95 703 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 704#elif defined(TARGET_UNICORE32)
93ac68bc 705#elif defined(TARGET_SPARC)
67867308 706#elif defined(TARGET_PPC)
81ea0e13 707#elif defined(TARGET_LM32)
e6e5906b
PB
708#elif defined(TARGET_M68K)
709 cpu_m68k_flush_flags(env, env->cc_op);
710 env->cc_op = CC_OP_FLAGS;
711 env->sr = (env->sr & 0xffe0)
712 | env->cc_dest | (env->cc_x << 4);
b779e29e 713#elif defined(TARGET_MICROBLAZE)
6af0bf9c 714#elif defined(TARGET_MIPS)
d15a9c23 715#elif defined(TARGET_MOXIE)
e67db06e 716#elif defined(TARGET_OPENRISC)
fdf9b3e8 717#elif defined(TARGET_SH4)
eddf68a6 718#elif defined(TARGET_ALPHA)
f1ccf904 719#elif defined(TARGET_CRIS)
10ec5117 720#elif defined(TARGET_S390X)
2328826b 721#elif defined(TARGET_XTENSA)
fdf9b3e8 722 /* XXXXX */
e4533c7a
FB
723#else
724#error unsupported target CPU
725#endif
1057eaa7 726
4917cf44
AF
727 /* fail safe : never use current_cpu outside cpu_exec() */
728 current_cpu = NULL;
7d13299d
FB
729 return ret;
730}
This page took 0.724662 seconds and 4 git commands to generate.