]> Git Repo - qemu.git/blob - cpu-exec.c
Merge remote-tracking branch 'mst/tags/for_anthony' into staging
[qemu.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "qemu-barrier.h"
24 #include "qtest.h"
25
26 int tb_invalidated_flag;
27
28 //#define CONFIG_DEBUG_EXEC
29
30 bool qemu_cpu_has_work(CPUArchState *env)
31 {
32     return cpu_has_work(env);
33 }
34
35 void cpu_loop_exit(CPUArchState *env)
36 {
37     env->current_tb = NULL;
38     longjmp(env->jmp_env, 1);
39 }
40
41 /* exit the current TB from a signal handler. The host registers are
42    restored in a state compatible with the CPU emulator
43  */
44 #if defined(CONFIG_SOFTMMU)
45 void cpu_resume_from_signal(CPUArchState *env, void *puc)
46 {
47     /* XXX: restore cpu registers saved in host registers */
48
49     env->exception_index = -1;
50     longjmp(env->jmp_env, 1);
51 }
52 #endif
53
54 /* Execute the code without caching the generated code. An interpreter
55    could be used if available. */
56 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
57                              TranslationBlock *orig_tb)
58 {
59     tcg_target_ulong next_tb;
60     TranslationBlock *tb;
61
62     /* Should never happen.
63        We only end up here when an existing TB is too long.  */
64     if (max_cycles > CF_COUNT_MASK)
65         max_cycles = CF_COUNT_MASK;
66
67     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
68                      max_cycles);
69     env->current_tb = tb;
70     /* execute the generated code */
71     next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
72     env->current_tb = NULL;
73
74     if ((next_tb & 3) == 2) {
75         /* Restore PC.  This may happen if async event occurs before
76            the TB starts executing.  */
77         cpu_pc_from_tb(env, tb);
78     }
79     tb_phys_invalidate(tb, -1);
80     tb_free(tb);
81 }
82
83 static TranslationBlock *tb_find_slow(CPUArchState *env,
84                                       target_ulong pc,
85                                       target_ulong cs_base,
86                                       uint64_t flags)
87 {
88     TranslationBlock *tb, **ptb1;
89     unsigned int h;
90     tb_page_addr_t phys_pc, phys_page1;
91     target_ulong virt_page2;
92
93     tb_invalidated_flag = 0;
94
95     /* find translated block using physical mappings */
96     phys_pc = get_page_addr_code(env, pc);
97     phys_page1 = phys_pc & TARGET_PAGE_MASK;
98     h = tb_phys_hash_func(phys_pc);
99     ptb1 = &tb_phys_hash[h];
100     for(;;) {
101         tb = *ptb1;
102         if (!tb)
103             goto not_found;
104         if (tb->pc == pc &&
105             tb->page_addr[0] == phys_page1 &&
106             tb->cs_base == cs_base &&
107             tb->flags == flags) {
108             /* check next page if needed */
109             if (tb->page_addr[1] != -1) {
110                 tb_page_addr_t phys_page2;
111
112                 virt_page2 = (pc & TARGET_PAGE_MASK) +
113                     TARGET_PAGE_SIZE;
114                 phys_page2 = get_page_addr_code(env, virt_page2);
115                 if (tb->page_addr[1] == phys_page2)
116                     goto found;
117             } else {
118                 goto found;
119             }
120         }
121         ptb1 = &tb->phys_hash_next;
122     }
123  not_found:
124    /* if no translated code available, then translate it now */
125     tb = tb_gen_code(env, pc, cs_base, flags, 0);
126
127  found:
128     /* Move the last found TB to the head of the list */
129     if (likely(*ptb1)) {
130         *ptb1 = tb->phys_hash_next;
131         tb->phys_hash_next = tb_phys_hash[h];
132         tb_phys_hash[h] = tb;
133     }
134     /* we add the TB in the virtual pc hash table */
135     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
136     return tb;
137 }
138
139 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
140 {
141     TranslationBlock *tb;
142     target_ulong cs_base, pc;
143     int flags;
144
145     /* we record a subset of the CPU state. It will
146        always be the same before a given translated block
147        is executed. */
148     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
149     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
150     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
151                  tb->flags != flags)) {
152         tb = tb_find_slow(env, pc, cs_base, flags);
153     }
154     return tb;
155 }
156
157 static CPUDebugExcpHandler *debug_excp_handler;
158
159 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
160 {
161     CPUDebugExcpHandler *old_handler = debug_excp_handler;
162
163     debug_excp_handler = handler;
164     return old_handler;
165 }
166
167 static void cpu_handle_debug_exception(CPUArchState *env)
168 {
169     CPUWatchpoint *wp;
170
171     if (!env->watchpoint_hit) {
172         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
173             wp->flags &= ~BP_WATCHPOINT_HIT;
174         }
175     }
176     if (debug_excp_handler) {
177         debug_excp_handler(env);
178     }
179 }
180
181 /* main execution loop */
182
183 volatile sig_atomic_t exit_request;
184
185 int cpu_exec(CPUArchState *env)
186 {
187     int ret, interrupt_request;
188     TranslationBlock *tb;
189     uint8_t *tc_ptr;
190     tcg_target_ulong next_tb;
191
192     if (env->halted) {
193         if (!cpu_has_work(env)) {
194             return EXCP_HALTED;
195         }
196
197         env->halted = 0;
198     }
199
200     cpu_single_env = env;
201
202     if (unlikely(exit_request)) {
203         env->exit_request = 1;
204     }
205
206 #if defined(TARGET_I386)
207     /* put eflags in CPU temporary format */
208     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
209     DF = 1 - (2 * ((env->eflags >> 10) & 1));
210     CC_OP = CC_OP_EFLAGS;
211     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
212 #elif defined(TARGET_SPARC)
213 #elif defined(TARGET_M68K)
214     env->cc_op = CC_OP_FLAGS;
215     env->cc_dest = env->sr & 0xf;
216     env->cc_x = (env->sr >> 4) & 1;
217 #elif defined(TARGET_ALPHA)
218 #elif defined(TARGET_ARM)
219 #elif defined(TARGET_UNICORE32)
220 #elif defined(TARGET_PPC)
221     env->reserve_addr = -1;
222 #elif defined(TARGET_LM32)
223 #elif defined(TARGET_MICROBLAZE)
224 #elif defined(TARGET_MIPS)
225 #elif defined(TARGET_SH4)
226 #elif defined(TARGET_CRIS)
227 #elif defined(TARGET_S390X)
228 #elif defined(TARGET_XTENSA)
229     /* XXXXX */
230 #else
231 #error unsupported target CPU
232 #endif
233     env->exception_index = -1;
234
235     /* prepare setjmp context for exception handling */
236     for(;;) {
237         if (setjmp(env->jmp_env) == 0) {
238             /* if an exception is pending, we execute it here */
239             if (env->exception_index >= 0) {
240                 if (env->exception_index >= EXCP_INTERRUPT) {
241                     /* exit request from the cpu execution loop */
242                     ret = env->exception_index;
243                     if (ret == EXCP_DEBUG) {
244                         cpu_handle_debug_exception(env);
245                     }
246                     break;
247                 } else {
248 #if defined(CONFIG_USER_ONLY)
249                     /* if user mode only, we simulate a fake exception
250                        which will be handled outside the cpu execution
251                        loop */
252 #if defined(TARGET_I386)
253                     do_interrupt(env);
254 #endif
255                     ret = env->exception_index;
256                     break;
257 #else
258                     do_interrupt(env);
259                     env->exception_index = -1;
260 #endif
261                 }
262             }
263
264             next_tb = 0; /* force lookup of first TB */
265             for(;;) {
266                 interrupt_request = env->interrupt_request;
267                 if (unlikely(interrupt_request)) {
268                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
269                         /* Mask out external interrupts for this step. */
270                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
271                     }
272                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
273                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
274                         env->exception_index = EXCP_DEBUG;
275                         cpu_loop_exit(env);
276                     }
277 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
278     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
279     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
280                     if (interrupt_request & CPU_INTERRUPT_HALT) {
281                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
282                         env->halted = 1;
283                         env->exception_index = EXCP_HLT;
284                         cpu_loop_exit(env);
285                     }
286 #endif
287 #if defined(TARGET_I386)
288                     if (interrupt_request & CPU_INTERRUPT_INIT) {
289                             svm_check_intercept(env, SVM_EXIT_INIT);
290                             do_cpu_init(env);
291                             env->exception_index = EXCP_HALTED;
292                             cpu_loop_exit(env);
293                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
294                             do_cpu_sipi(env);
295                     } else if (env->hflags2 & HF2_GIF_MASK) {
296                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
297                             !(env->hflags & HF_SMM_MASK)) {
298                             svm_check_intercept(env, SVM_EXIT_SMI);
299                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
300                             do_smm_enter(env);
301                             next_tb = 0;
302                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
303                                    !(env->hflags2 & HF2_NMI_MASK)) {
304                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
305                             env->hflags2 |= HF2_NMI_MASK;
306                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
307                             next_tb = 0;
308                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
309                             env->interrupt_request &= ~CPU_INTERRUPT_MCE;
310                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
311                             next_tb = 0;
312                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
313                                    (((env->hflags2 & HF2_VINTR_MASK) && 
314                                      (env->hflags2 & HF2_HIF_MASK)) ||
315                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
316                                      (env->eflags & IF_MASK && 
317                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
318                             int intno;
319                             svm_check_intercept(env, SVM_EXIT_INTR);
320                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
321                             intno = cpu_get_pic_interrupt(env);
322                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
323                             do_interrupt_x86_hardirq(env, intno, 1);
324                             /* ensure that no TB jump will be modified as
325                                the program flow was changed */
326                             next_tb = 0;
327 #if !defined(CONFIG_USER_ONLY)
328                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
329                                    (env->eflags & IF_MASK) && 
330                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
331                             int intno;
332                             /* FIXME: this should respect TPR */
333                             svm_check_intercept(env, SVM_EXIT_VINTR);
334                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
335                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
336                             do_interrupt_x86_hardirq(env, intno, 1);
337                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
338                             next_tb = 0;
339 #endif
340                         }
341                     }
342 #elif defined(TARGET_PPC)
343                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
344                         cpu_state_reset(env);
345                     }
346                     if (interrupt_request & CPU_INTERRUPT_HARD) {
347                         ppc_hw_interrupt(env);
348                         if (env->pending_interrupts == 0)
349                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
350                         next_tb = 0;
351                     }
352 #elif defined(TARGET_LM32)
353                     if ((interrupt_request & CPU_INTERRUPT_HARD)
354                         && (env->ie & IE_IE)) {
355                         env->exception_index = EXCP_IRQ;
356                         do_interrupt(env);
357                         next_tb = 0;
358                     }
359 #elif defined(TARGET_MICROBLAZE)
360                     if ((interrupt_request & CPU_INTERRUPT_HARD)
361                         && (env->sregs[SR_MSR] & MSR_IE)
362                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
363                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
364                         env->exception_index = EXCP_IRQ;
365                         do_interrupt(env);
366                         next_tb = 0;
367                     }
368 #elif defined(TARGET_MIPS)
369                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
370                         cpu_mips_hw_interrupts_pending(env)) {
371                         /* Raise it */
372                         env->exception_index = EXCP_EXT_INTERRUPT;
373                         env->error_code = 0;
374                         do_interrupt(env);
375                         next_tb = 0;
376                     }
377 #elif defined(TARGET_SPARC)
378                     if (interrupt_request & CPU_INTERRUPT_HARD) {
379                         if (cpu_interrupts_enabled(env) &&
380                             env->interrupt_index > 0) {
381                             int pil = env->interrupt_index & 0xf;
382                             int type = env->interrupt_index & 0xf0;
383
384                             if (((type == TT_EXTINT) &&
385                                   cpu_pil_allowed(env, pil)) ||
386                                   type != TT_EXTINT) {
387                                 env->exception_index = env->interrupt_index;
388                                 do_interrupt(env);
389                                 next_tb = 0;
390                             }
391                         }
392                     }
393 #elif defined(TARGET_ARM)
394                     if (interrupt_request & CPU_INTERRUPT_FIQ
395                         && !(env->uncached_cpsr & CPSR_F)) {
396                         env->exception_index = EXCP_FIQ;
397                         do_interrupt(env);
398                         next_tb = 0;
399                     }
400                     /* ARMv7-M interrupt return works by loading a magic value
401                        into the PC.  On real hardware the load causes the
402                        return to occur.  The qemu implementation performs the
403                        jump normally, then does the exception return when the
404                        CPU tries to execute code at the magic address.
405                        This will cause the magic PC value to be pushed to
406                        the stack if an interrupt occurred at the wrong time.
407                        We avoid this by disabling interrupts when
408                        pc contains a magic address.  */
409                     if (interrupt_request & CPU_INTERRUPT_HARD
410                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
411                             || !(env->uncached_cpsr & CPSR_I))) {
412                         env->exception_index = EXCP_IRQ;
413                         do_interrupt(env);
414                         next_tb = 0;
415                     }
416 #elif defined(TARGET_UNICORE32)
417                     if (interrupt_request & CPU_INTERRUPT_HARD
418                         && !(env->uncached_asr & ASR_I)) {
419                         do_interrupt(env);
420                         next_tb = 0;
421                     }
422 #elif defined(TARGET_SH4)
423                     if (interrupt_request & CPU_INTERRUPT_HARD) {
424                         do_interrupt(env);
425                         next_tb = 0;
426                     }
427 #elif defined(TARGET_ALPHA)
428                     {
429                         int idx = -1;
430                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
431                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
432                         case 0 ... 3:
433                             if (interrupt_request & CPU_INTERRUPT_HARD) {
434                                 idx = EXCP_DEV_INTERRUPT;
435                             }
436                             /* FALLTHRU */
437                         case 4:
438                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
439                                 idx = EXCP_CLK_INTERRUPT;
440                             }
441                             /* FALLTHRU */
442                         case 5:
443                             if (interrupt_request & CPU_INTERRUPT_SMP) {
444                                 idx = EXCP_SMP_INTERRUPT;
445                             }
446                             /* FALLTHRU */
447                         case 6:
448                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
449                                 idx = EXCP_MCHK;
450                             }
451                         }
452                         if (idx >= 0) {
453                             env->exception_index = idx;
454                             env->error_code = 0;
455                             do_interrupt(env);
456                             next_tb = 0;
457                         }
458                     }
459 #elif defined(TARGET_CRIS)
460                     if (interrupt_request & CPU_INTERRUPT_HARD
461                         && (env->pregs[PR_CCS] & I_FLAG)
462                         && !env->locked_irq) {
463                         env->exception_index = EXCP_IRQ;
464                         do_interrupt(env);
465                         next_tb = 0;
466                     }
467                     if (interrupt_request & CPU_INTERRUPT_NMI
468                         && (env->pregs[PR_CCS] & M_FLAG)) {
469                         env->exception_index = EXCP_NMI;
470                         do_interrupt(env);
471                         next_tb = 0;
472                     }
473 #elif defined(TARGET_M68K)
474                     if (interrupt_request & CPU_INTERRUPT_HARD
475                         && ((env->sr & SR_I) >> SR_I_SHIFT)
476                             < env->pending_level) {
477                         /* Real hardware gets the interrupt vector via an
478                            IACK cycle at this point.  Current emulated
479                            hardware doesn't rely on this, so we
480                            provide/save the vector when the interrupt is
481                            first signalled.  */
482                         env->exception_index = env->pending_vector;
483                         do_interrupt_m68k_hardirq(env);
484                         next_tb = 0;
485                     }
486 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
487                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
488                         (env->psw.mask & PSW_MASK_EXT)) {
489                         do_interrupt(env);
490                         next_tb = 0;
491                     }
492 #elif defined(TARGET_XTENSA)
493                     if (interrupt_request & CPU_INTERRUPT_HARD) {
494                         env->exception_index = EXC_IRQ;
495                         do_interrupt(env);
496                         next_tb = 0;
497                     }
498 #endif
499                    /* Don't use the cached interrupt_request value,
500                       do_interrupt may have updated the EXITTB flag. */
501                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
502                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
503                         /* ensure that no TB jump will be modified as
504                            the program flow was changed */
505                         next_tb = 0;
506                     }
507                 }
508                 if (unlikely(env->exit_request)) {
509                     env->exit_request = 0;
510                     env->exception_index = EXCP_INTERRUPT;
511                     cpu_loop_exit(env);
512                 }
513 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
514                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
515                     /* restore flags in standard format */
516 #if defined(TARGET_I386)
517                     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
518                         | (DF & DF_MASK);
519                     log_cpu_state(env, X86_DUMP_CCOP);
520                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
521 #elif defined(TARGET_M68K)
522                     cpu_m68k_flush_flags(env, env->cc_op);
523                     env->cc_op = CC_OP_FLAGS;
524                     env->sr = (env->sr & 0xffe0)
525                               | env->cc_dest | (env->cc_x << 4);
526                     log_cpu_state(env, 0);
527 #else
528                     log_cpu_state(env, 0);
529 #endif
530                 }
531 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
532                 spin_lock(&tb_lock);
533                 tb = tb_find_fast(env);
534                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
535                    doing it in tb_find_slow */
536                 if (tb_invalidated_flag) {
537                     /* as some TB could have been invalidated because
538                        of memory exceptions while generating the code, we
539                        must recompute the hash index here */
540                     next_tb = 0;
541                     tb_invalidated_flag = 0;
542                 }
543 #ifdef CONFIG_DEBUG_EXEC
544                 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
545                              tb->tc_ptr, tb->pc,
546                              lookup_symbol(tb->pc));
547 #endif
548                 /* see if we can patch the calling TB. When the TB
549                    spans two pages, we cannot safely do a direct
550                    jump. */
551                 if (next_tb != 0 && tb->page_addr[1] == -1) {
552                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
553                 }
554                 spin_unlock(&tb_lock);
555
556                 /* cpu_interrupt might be called while translating the
557                    TB, but before it is linked into a potentially
558                    infinite loop and becomes env->current_tb. Avoid
559                    starting execution if there is a pending interrupt. */
560                 env->current_tb = tb;
561                 barrier();
562                 if (likely(!env->exit_request)) {
563                     tc_ptr = tb->tc_ptr;
564                     /* execute the generated code */
565                     next_tb = tcg_qemu_tb_exec(env, tc_ptr);
566                     if ((next_tb & 3) == 2) {
567                         /* Instruction counter expired.  */
568                         int insns_left;
569                         tb = (TranslationBlock *)(next_tb & ~3);
570                         /* Restore PC.  */
571                         cpu_pc_from_tb(env, tb);
572                         insns_left = env->icount_decr.u32;
573                         if (env->icount_extra && insns_left >= 0) {
574                             /* Refill decrementer and continue execution.  */
575                             env->icount_extra += insns_left;
576                             if (env->icount_extra > 0xffff) {
577                                 insns_left = 0xffff;
578                             } else {
579                                 insns_left = env->icount_extra;
580                             }
581                             env->icount_extra -= insns_left;
582                             env->icount_decr.u16.low = insns_left;
583                         } else {
584                             if (insns_left > 0) {
585                                 /* Execute remaining instructions.  */
586                                 cpu_exec_nocache(env, insns_left, tb);
587                             }
588                             env->exception_index = EXCP_INTERRUPT;
589                             next_tb = 0;
590                             cpu_loop_exit(env);
591                         }
592                     }
593                 }
594                 env->current_tb = NULL;
595                 /* reset soft MMU for next block (it can currently
596                    only be set by a memory fault) */
597             } /* for(;;) */
598         } else {
599             /* Reload env after longjmp - the compiler may have smashed all
600              * local variables as longjmp is marked 'noreturn'. */
601             env = cpu_single_env;
602         }
603     } /* for(;;) */
604
605
606 #if defined(TARGET_I386)
607     /* restore flags in standard format */
608     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
609         | (DF & DF_MASK);
610 #elif defined(TARGET_ARM)
611     /* XXX: Save/restore host fpu exception state?.  */
612 #elif defined(TARGET_UNICORE32)
613 #elif defined(TARGET_SPARC)
614 #elif defined(TARGET_PPC)
615 #elif defined(TARGET_LM32)
616 #elif defined(TARGET_M68K)
617     cpu_m68k_flush_flags(env, env->cc_op);
618     env->cc_op = CC_OP_FLAGS;
619     env->sr = (env->sr & 0xffe0)
620               | env->cc_dest | (env->cc_x << 4);
621 #elif defined(TARGET_MICROBLAZE)
622 #elif defined(TARGET_MIPS)
623 #elif defined(TARGET_SH4)
624 #elif defined(TARGET_ALPHA)
625 #elif defined(TARGET_CRIS)
626 #elif defined(TARGET_S390X)
627 #elif defined(TARGET_XTENSA)
628     /* XXXXX */
629 #else
630 #error unsupported target CPU
631 #endif
632
633     /* fail safe : never use cpu_single_env outside cpu_exec() */
634     cpu_single_env = NULL;
635     return ret;
636 }
This page took 0.062054 seconds and 4 git commands to generate.