]> Git Repo - qemu.git/blob - cpu-exec.c
configure: Fix build for some versions of glibc (9pfs)
[qemu.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "qemu-barrier.h"
24 #include "qtest.h"
25
26 int tb_invalidated_flag;
27
28 //#define CONFIG_DEBUG_EXEC
29
30 bool qemu_cpu_has_work(CPUArchState *env)
31 {
32     return cpu_has_work(env);
33 }
34
35 void cpu_loop_exit(CPUArchState *env)
36 {
37     env->current_tb = NULL;
38     longjmp(env->jmp_env, 1);
39 }
40
41 /* exit the current TB from a signal handler. The host registers are
42    restored in a state compatible with the CPU emulator
43  */
44 #if defined(CONFIG_SOFTMMU)
45 void cpu_resume_from_signal(CPUArchState *env, void *puc)
46 {
47     /* XXX: restore cpu registers saved in host registers */
48
49     env->exception_index = -1;
50     longjmp(env->jmp_env, 1);
51 }
52 #endif
53
54 /* Execute the code without caching the generated code. An interpreter
55    could be used if available. */
56 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
57                              TranslationBlock *orig_tb)
58 {
59     tcg_target_ulong next_tb;
60     TranslationBlock *tb;
61
62     /* Should never happen.
63        We only end up here when an existing TB is too long.  */
64     if (max_cycles > CF_COUNT_MASK)
65         max_cycles = CF_COUNT_MASK;
66
67     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
68                      max_cycles);
69     env->current_tb = tb;
70     /* execute the generated code */
71     next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
72     env->current_tb = NULL;
73
74     if ((next_tb & 3) == 2) {
75         /* Restore PC.  This may happen if async event occurs before
76            the TB starts executing.  */
77         cpu_pc_from_tb(env, tb);
78     }
79     tb_phys_invalidate(tb, -1);
80     tb_free(tb);
81 }
82
83 static TranslationBlock *tb_find_slow(CPUArchState *env,
84                                       target_ulong pc,
85                                       target_ulong cs_base,
86                                       uint64_t flags)
87 {
88     TranslationBlock *tb, **ptb1;
89     unsigned int h;
90     tb_page_addr_t phys_pc, phys_page1;
91     target_ulong virt_page2;
92
93     tb_invalidated_flag = 0;
94
95     /* find translated block using physical mappings */
96     phys_pc = get_page_addr_code(env, pc);
97     phys_page1 = phys_pc & TARGET_PAGE_MASK;
98     h = tb_phys_hash_func(phys_pc);
99     ptb1 = &tb_phys_hash[h];
100     for(;;) {
101         tb = *ptb1;
102         if (!tb)
103             goto not_found;
104         if (tb->pc == pc &&
105             tb->page_addr[0] == phys_page1 &&
106             tb->cs_base == cs_base &&
107             tb->flags == flags) {
108             /* check next page if needed */
109             if (tb->page_addr[1] != -1) {
110                 tb_page_addr_t phys_page2;
111
112                 virt_page2 = (pc & TARGET_PAGE_MASK) +
113                     TARGET_PAGE_SIZE;
114                 phys_page2 = get_page_addr_code(env, virt_page2);
115                 if (tb->page_addr[1] == phys_page2)
116                     goto found;
117             } else {
118                 goto found;
119             }
120         }
121         ptb1 = &tb->phys_hash_next;
122     }
123  not_found:
124    /* if no translated code available, then translate it now */
125     tb = tb_gen_code(env, pc, cs_base, flags, 0);
126
127  found:
128     /* Move the last found TB to the head of the list */
129     if (likely(*ptb1)) {
130         *ptb1 = tb->phys_hash_next;
131         tb->phys_hash_next = tb_phys_hash[h];
132         tb_phys_hash[h] = tb;
133     }
134     /* we add the TB in the virtual pc hash table */
135     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
136     return tb;
137 }
138
139 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
140 {
141     TranslationBlock *tb;
142     target_ulong cs_base, pc;
143     int flags;
144
145     /* we record a subset of the CPU state. It will
146        always be the same before a given translated block
147        is executed. */
148     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
149     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
150     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
151                  tb->flags != flags)) {
152         tb = tb_find_slow(env, pc, cs_base, flags);
153     }
154     return tb;
155 }
156
157 static CPUDebugExcpHandler *debug_excp_handler;
158
159 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
160 {
161     CPUDebugExcpHandler *old_handler = debug_excp_handler;
162
163     debug_excp_handler = handler;
164     return old_handler;
165 }
166
167 static void cpu_handle_debug_exception(CPUArchState *env)
168 {
169     CPUWatchpoint *wp;
170
171     if (!env->watchpoint_hit) {
172         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
173             wp->flags &= ~BP_WATCHPOINT_HIT;
174         }
175     }
176     if (debug_excp_handler) {
177         debug_excp_handler(env);
178     }
179 }
180
181 /* main execution loop */
182
183 volatile sig_atomic_t exit_request;
184
185 int cpu_exec(CPUArchState *env)
186 {
187 #ifdef TARGET_PPC
188     CPUState *cpu = ENV_GET_CPU(env);
189 #endif
190     int ret, interrupt_request;
191     TranslationBlock *tb;
192     uint8_t *tc_ptr;
193     tcg_target_ulong next_tb;
194
195     if (env->halted) {
196         if (!cpu_has_work(env)) {
197             return EXCP_HALTED;
198         }
199
200         env->halted = 0;
201     }
202
203     cpu_single_env = env;
204
205     if (unlikely(exit_request)) {
206         env->exit_request = 1;
207     }
208
209 #if defined(TARGET_I386)
210     /* put eflags in CPU temporary format */
211     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
212     DF = 1 - (2 * ((env->eflags >> 10) & 1));
213     CC_OP = CC_OP_EFLAGS;
214     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
215 #elif defined(TARGET_SPARC)
216 #elif defined(TARGET_M68K)
217     env->cc_op = CC_OP_FLAGS;
218     env->cc_dest = env->sr & 0xf;
219     env->cc_x = (env->sr >> 4) & 1;
220 #elif defined(TARGET_ALPHA)
221 #elif defined(TARGET_ARM)
222 #elif defined(TARGET_UNICORE32)
223 #elif defined(TARGET_PPC)
224     env->reserve_addr = -1;
225 #elif defined(TARGET_LM32)
226 #elif defined(TARGET_MICROBLAZE)
227 #elif defined(TARGET_MIPS)
228 #elif defined(TARGET_SH4)
229 #elif defined(TARGET_CRIS)
230 #elif defined(TARGET_S390X)
231 #elif defined(TARGET_XTENSA)
232     /* XXXXX */
233 #else
234 #error unsupported target CPU
235 #endif
236     env->exception_index = -1;
237
238     /* prepare setjmp context for exception handling */
239     for(;;) {
240         if (setjmp(env->jmp_env) == 0) {
241             /* if an exception is pending, we execute it here */
242             if (env->exception_index >= 0) {
243                 if (env->exception_index >= EXCP_INTERRUPT) {
244                     /* exit request from the cpu execution loop */
245                     ret = env->exception_index;
246                     if (ret == EXCP_DEBUG) {
247                         cpu_handle_debug_exception(env);
248                     }
249                     break;
250                 } else {
251 #if defined(CONFIG_USER_ONLY)
252                     /* if user mode only, we simulate a fake exception
253                        which will be handled outside the cpu execution
254                        loop */
255 #if defined(TARGET_I386)
256                     do_interrupt(env);
257 #endif
258                     ret = env->exception_index;
259                     break;
260 #else
261                     do_interrupt(env);
262                     env->exception_index = -1;
263 #endif
264                 }
265             }
266
267             next_tb = 0; /* force lookup of first TB */
268             for(;;) {
269                 interrupt_request = env->interrupt_request;
270                 if (unlikely(interrupt_request)) {
271                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
272                         /* Mask out external interrupts for this step. */
273                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
274                     }
275                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
276                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
277                         env->exception_index = EXCP_DEBUG;
278                         cpu_loop_exit(env);
279                     }
280 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
281     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
282     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
283                     if (interrupt_request & CPU_INTERRUPT_HALT) {
284                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
285                         env->halted = 1;
286                         env->exception_index = EXCP_HLT;
287                         cpu_loop_exit(env);
288                     }
289 #endif
290 #if defined(TARGET_I386)
291                     if (interrupt_request & CPU_INTERRUPT_INIT) {
292                             svm_check_intercept(env, SVM_EXIT_INIT);
293                             do_cpu_init(x86_env_get_cpu(env));
294                             env->exception_index = EXCP_HALTED;
295                             cpu_loop_exit(env);
296                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
297                             do_cpu_sipi(x86_env_get_cpu(env));
298                     } else if (env->hflags2 & HF2_GIF_MASK) {
299                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
300                             !(env->hflags & HF_SMM_MASK)) {
301                             svm_check_intercept(env, SVM_EXIT_SMI);
302                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
303                             do_smm_enter(env);
304                             next_tb = 0;
305                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
306                                    !(env->hflags2 & HF2_NMI_MASK)) {
307                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
308                             env->hflags2 |= HF2_NMI_MASK;
309                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
310                             next_tb = 0;
311                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
312                             env->interrupt_request &= ~CPU_INTERRUPT_MCE;
313                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
314                             next_tb = 0;
315                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
316                                    (((env->hflags2 & HF2_VINTR_MASK) && 
317                                      (env->hflags2 & HF2_HIF_MASK)) ||
318                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
319                                      (env->eflags & IF_MASK && 
320                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
321                             int intno;
322                             svm_check_intercept(env, SVM_EXIT_INTR);
323                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
324                             intno = cpu_get_pic_interrupt(env);
325                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
326                             do_interrupt_x86_hardirq(env, intno, 1);
327                             /* ensure that no TB jump will be modified as
328                                the program flow was changed */
329                             next_tb = 0;
330 #if !defined(CONFIG_USER_ONLY)
331                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
332                                    (env->eflags & IF_MASK) && 
333                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
334                             int intno;
335                             /* FIXME: this should respect TPR */
336                             svm_check_intercept(env, SVM_EXIT_VINTR);
337                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
338                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
339                             do_interrupt_x86_hardirq(env, intno, 1);
340                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
341                             next_tb = 0;
342 #endif
343                         }
344                     }
345 #elif defined(TARGET_PPC)
346                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
347                         cpu_reset(cpu);
348                     }
349                     if (interrupt_request & CPU_INTERRUPT_HARD) {
350                         ppc_hw_interrupt(env);
351                         if (env->pending_interrupts == 0)
352                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
353                         next_tb = 0;
354                     }
355 #elif defined(TARGET_LM32)
356                     if ((interrupt_request & CPU_INTERRUPT_HARD)
357                         && (env->ie & IE_IE)) {
358                         env->exception_index = EXCP_IRQ;
359                         do_interrupt(env);
360                         next_tb = 0;
361                     }
362 #elif defined(TARGET_MICROBLAZE)
363                     if ((interrupt_request & CPU_INTERRUPT_HARD)
364                         && (env->sregs[SR_MSR] & MSR_IE)
365                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
366                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
367                         env->exception_index = EXCP_IRQ;
368                         do_interrupt(env);
369                         next_tb = 0;
370                     }
371 #elif defined(TARGET_MIPS)
372                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
373                         cpu_mips_hw_interrupts_pending(env)) {
374                         /* Raise it */
375                         env->exception_index = EXCP_EXT_INTERRUPT;
376                         env->error_code = 0;
377                         do_interrupt(env);
378                         next_tb = 0;
379                     }
380 #elif defined(TARGET_SPARC)
381                     if (interrupt_request & CPU_INTERRUPT_HARD) {
382                         if (cpu_interrupts_enabled(env) &&
383                             env->interrupt_index > 0) {
384                             int pil = env->interrupt_index & 0xf;
385                             int type = env->interrupt_index & 0xf0;
386
387                             if (((type == TT_EXTINT) &&
388                                   cpu_pil_allowed(env, pil)) ||
389                                   type != TT_EXTINT) {
390                                 env->exception_index = env->interrupt_index;
391                                 do_interrupt(env);
392                                 next_tb = 0;
393                             }
394                         }
395                     }
396 #elif defined(TARGET_ARM)
397                     if (interrupt_request & CPU_INTERRUPT_FIQ
398                         && !(env->uncached_cpsr & CPSR_F)) {
399                         env->exception_index = EXCP_FIQ;
400                         do_interrupt(env);
401                         next_tb = 0;
402                     }
403                     /* ARMv7-M interrupt return works by loading a magic value
404                        into the PC.  On real hardware the load causes the
405                        return to occur.  The qemu implementation performs the
406                        jump normally, then does the exception return when the
407                        CPU tries to execute code at the magic address.
408                        This will cause the magic PC value to be pushed to
409                        the stack if an interrupt occurred at the wrong time.
410                        We avoid this by disabling interrupts when
411                        pc contains a magic address.  */
412                     if (interrupt_request & CPU_INTERRUPT_HARD
413                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
414                             || !(env->uncached_cpsr & CPSR_I))) {
415                         env->exception_index = EXCP_IRQ;
416                         do_interrupt(env);
417                         next_tb = 0;
418                     }
419 #elif defined(TARGET_UNICORE32)
420                     if (interrupt_request & CPU_INTERRUPT_HARD
421                         && !(env->uncached_asr & ASR_I)) {
422                         do_interrupt(env);
423                         next_tb = 0;
424                     }
425 #elif defined(TARGET_SH4)
426                     if (interrupt_request & CPU_INTERRUPT_HARD) {
427                         do_interrupt(env);
428                         next_tb = 0;
429                     }
430 #elif defined(TARGET_ALPHA)
431                     {
432                         int idx = -1;
433                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
434                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
435                         case 0 ... 3:
436                             if (interrupt_request & CPU_INTERRUPT_HARD) {
437                                 idx = EXCP_DEV_INTERRUPT;
438                             }
439                             /* FALLTHRU */
440                         case 4:
441                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
442                                 idx = EXCP_CLK_INTERRUPT;
443                             }
444                             /* FALLTHRU */
445                         case 5:
446                             if (interrupt_request & CPU_INTERRUPT_SMP) {
447                                 idx = EXCP_SMP_INTERRUPT;
448                             }
449                             /* FALLTHRU */
450                         case 6:
451                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
452                                 idx = EXCP_MCHK;
453                             }
454                         }
455                         if (idx >= 0) {
456                             env->exception_index = idx;
457                             env->error_code = 0;
458                             do_interrupt(env);
459                             next_tb = 0;
460                         }
461                     }
462 #elif defined(TARGET_CRIS)
463                     if (interrupt_request & CPU_INTERRUPT_HARD
464                         && (env->pregs[PR_CCS] & I_FLAG)
465                         && !env->locked_irq) {
466                         env->exception_index = EXCP_IRQ;
467                         do_interrupt(env);
468                         next_tb = 0;
469                     }
470                     if (interrupt_request & CPU_INTERRUPT_NMI
471                         && (env->pregs[PR_CCS] & M_FLAG)) {
472                         env->exception_index = EXCP_NMI;
473                         do_interrupt(env);
474                         next_tb = 0;
475                     }
476 #elif defined(TARGET_M68K)
477                     if (interrupt_request & CPU_INTERRUPT_HARD
478                         && ((env->sr & SR_I) >> SR_I_SHIFT)
479                             < env->pending_level) {
480                         /* Real hardware gets the interrupt vector via an
481                            IACK cycle at this point.  Current emulated
482                            hardware doesn't rely on this, so we
483                            provide/save the vector when the interrupt is
484                            first signalled.  */
485                         env->exception_index = env->pending_vector;
486                         do_interrupt_m68k_hardirq(env);
487                         next_tb = 0;
488                     }
489 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
490                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
491                         (env->psw.mask & PSW_MASK_EXT)) {
492                         do_interrupt(env);
493                         next_tb = 0;
494                     }
495 #elif defined(TARGET_XTENSA)
496                     if (interrupt_request & CPU_INTERRUPT_HARD) {
497                         env->exception_index = EXC_IRQ;
498                         do_interrupt(env);
499                         next_tb = 0;
500                     }
501 #endif
502                    /* Don't use the cached interrupt_request value,
503                       do_interrupt may have updated the EXITTB flag. */
504                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
505                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
506                         /* ensure that no TB jump will be modified as
507                            the program flow was changed */
508                         next_tb = 0;
509                     }
510                 }
511                 if (unlikely(env->exit_request)) {
512                     env->exit_request = 0;
513                     env->exception_index = EXCP_INTERRUPT;
514                     cpu_loop_exit(env);
515                 }
516 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
517                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
518                     /* restore flags in standard format */
519 #if defined(TARGET_I386)
520                     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
521                         | (DF & DF_MASK);
522                     log_cpu_state(env, X86_DUMP_CCOP);
523                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
524 #elif defined(TARGET_M68K)
525                     cpu_m68k_flush_flags(env, env->cc_op);
526                     env->cc_op = CC_OP_FLAGS;
527                     env->sr = (env->sr & 0xffe0)
528                               | env->cc_dest | (env->cc_x << 4);
529                     log_cpu_state(env, 0);
530 #else
531                     log_cpu_state(env, 0);
532 #endif
533                 }
534 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
535                 spin_lock(&tb_lock);
536                 tb = tb_find_fast(env);
537                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
538                    doing it in tb_find_slow */
539                 if (tb_invalidated_flag) {
540                     /* as some TB could have been invalidated because
541                        of memory exceptions while generating the code, we
542                        must recompute the hash index here */
543                     next_tb = 0;
544                     tb_invalidated_flag = 0;
545                 }
546 #ifdef CONFIG_DEBUG_EXEC
547                 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
548                              tb->tc_ptr, tb->pc,
549                              lookup_symbol(tb->pc));
550 #endif
551                 /* see if we can patch the calling TB. When the TB
552                    spans two pages, we cannot safely do a direct
553                    jump. */
554                 if (next_tb != 0 && tb->page_addr[1] == -1) {
555                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
556                 }
557                 spin_unlock(&tb_lock);
558
559                 /* cpu_interrupt might be called while translating the
560                    TB, but before it is linked into a potentially
561                    infinite loop and becomes env->current_tb. Avoid
562                    starting execution if there is a pending interrupt. */
563                 env->current_tb = tb;
564                 barrier();
565                 if (likely(!env->exit_request)) {
566                     tc_ptr = tb->tc_ptr;
567                     /* execute the generated code */
568                     next_tb = tcg_qemu_tb_exec(env, tc_ptr);
569                     if ((next_tb & 3) == 2) {
570                         /* Instruction counter expired.  */
571                         int insns_left;
572                         tb = (TranslationBlock *)(next_tb & ~3);
573                         /* Restore PC.  */
574                         cpu_pc_from_tb(env, tb);
575                         insns_left = env->icount_decr.u32;
576                         if (env->icount_extra && insns_left >= 0) {
577                             /* Refill decrementer and continue execution.  */
578                             env->icount_extra += insns_left;
579                             if (env->icount_extra > 0xffff) {
580                                 insns_left = 0xffff;
581                             } else {
582                                 insns_left = env->icount_extra;
583                             }
584                             env->icount_extra -= insns_left;
585                             env->icount_decr.u16.low = insns_left;
586                         } else {
587                             if (insns_left > 0) {
588                                 /* Execute remaining instructions.  */
589                                 cpu_exec_nocache(env, insns_left, tb);
590                             }
591                             env->exception_index = EXCP_INTERRUPT;
592                             next_tb = 0;
593                             cpu_loop_exit(env);
594                         }
595                     }
596                 }
597                 env->current_tb = NULL;
598                 /* reset soft MMU for next block (it can currently
599                    only be set by a memory fault) */
600             } /* for(;;) */
601         } else {
602             /* Reload env after longjmp - the compiler may have smashed all
603              * local variables as longjmp is marked 'noreturn'. */
604             env = cpu_single_env;
605         }
606     } /* for(;;) */
607
608
609 #if defined(TARGET_I386)
610     /* restore flags in standard format */
611     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
612         | (DF & DF_MASK);
613 #elif defined(TARGET_ARM)
614     /* XXX: Save/restore host fpu exception state?.  */
615 #elif defined(TARGET_UNICORE32)
616 #elif defined(TARGET_SPARC)
617 #elif defined(TARGET_PPC)
618 #elif defined(TARGET_LM32)
619 #elif defined(TARGET_M68K)
620     cpu_m68k_flush_flags(env, env->cc_op);
621     env->cc_op = CC_OP_FLAGS;
622     env->sr = (env->sr & 0xffe0)
623               | env->cc_dest | (env->cc_x << 4);
624 #elif defined(TARGET_MICROBLAZE)
625 #elif defined(TARGET_MIPS)
626 #elif defined(TARGET_SH4)
627 #elif defined(TARGET_ALPHA)
628 #elif defined(TARGET_CRIS)
629 #elif defined(TARGET_S390X)
630 #elif defined(TARGET_XTENSA)
631     /* XXXXX */
632 #else
633 #error unsupported target CPU
634 #endif
635
636     /* fail safe : never use cpu_single_env outside cpu_exec() */
637     cpu_single_env = NULL;
638     return ret;
639 }
This page took 0.060491 seconds and 4 git commands to generate.