]> Git Repo - qemu.git/blob - cpu-exec.c
cleanup cpu_set_debug_excp_handler
[qemu.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "qemu-barrier.h"
24 #include "qtest.h"
25
26 int tb_invalidated_flag;
27
28 //#define CONFIG_DEBUG_EXEC
29
30 bool qemu_cpu_has_work(CPUArchState *env)
31 {
32     return cpu_has_work(env);
33 }
34
35 void cpu_loop_exit(CPUArchState *env)
36 {
37     env->current_tb = NULL;
38     longjmp(env->jmp_env, 1);
39 }
40
41 /* exit the current TB from a signal handler. The host registers are
42    restored in a state compatible with the CPU emulator
43  */
44 #if defined(CONFIG_SOFTMMU)
45 void cpu_resume_from_signal(CPUArchState *env, void *puc)
46 {
47     /* XXX: restore cpu registers saved in host registers */
48
49     env->exception_index = -1;
50     longjmp(env->jmp_env, 1);
51 }
52 #endif
53
54 /* Execute the code without caching the generated code. An interpreter
55    could be used if available. */
56 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
57                              TranslationBlock *orig_tb)
58 {
59     tcg_target_ulong next_tb;
60     TranslationBlock *tb;
61
62     /* Should never happen.
63        We only end up here when an existing TB is too long.  */
64     if (max_cycles > CF_COUNT_MASK)
65         max_cycles = CF_COUNT_MASK;
66
67     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
68                      max_cycles);
69     env->current_tb = tb;
70     /* execute the generated code */
71     next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
72     env->current_tb = NULL;
73
74     if ((next_tb & 3) == 2) {
75         /* Restore PC.  This may happen if async event occurs before
76            the TB starts executing.  */
77         cpu_pc_from_tb(env, tb);
78     }
79     tb_phys_invalidate(tb, -1);
80     tb_free(tb);
81 }
82
83 static TranslationBlock *tb_find_slow(CPUArchState *env,
84                                       target_ulong pc,
85                                       target_ulong cs_base,
86                                       uint64_t flags)
87 {
88     TranslationBlock *tb, **ptb1;
89     unsigned int h;
90     tb_page_addr_t phys_pc, phys_page1;
91     target_ulong virt_page2;
92
93     tb_invalidated_flag = 0;
94
95     /* find translated block using physical mappings */
96     phys_pc = get_page_addr_code(env, pc);
97     phys_page1 = phys_pc & TARGET_PAGE_MASK;
98     h = tb_phys_hash_func(phys_pc);
99     ptb1 = &tb_phys_hash[h];
100     for(;;) {
101         tb = *ptb1;
102         if (!tb)
103             goto not_found;
104         if (tb->pc == pc &&
105             tb->page_addr[0] == phys_page1 &&
106             tb->cs_base == cs_base &&
107             tb->flags == flags) {
108             /* check next page if needed */
109             if (tb->page_addr[1] != -1) {
110                 tb_page_addr_t phys_page2;
111
112                 virt_page2 = (pc & TARGET_PAGE_MASK) +
113                     TARGET_PAGE_SIZE;
114                 phys_page2 = get_page_addr_code(env, virt_page2);
115                 if (tb->page_addr[1] == phys_page2)
116                     goto found;
117             } else {
118                 goto found;
119             }
120         }
121         ptb1 = &tb->phys_hash_next;
122     }
123  not_found:
124    /* if no translated code available, then translate it now */
125     tb = tb_gen_code(env, pc, cs_base, flags, 0);
126
127  found:
128     /* Move the last found TB to the head of the list */
129     if (likely(*ptb1)) {
130         *ptb1 = tb->phys_hash_next;
131         tb->phys_hash_next = tb_phys_hash[h];
132         tb_phys_hash[h] = tb;
133     }
134     /* we add the TB in the virtual pc hash table */
135     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
136     return tb;
137 }
138
139 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
140 {
141     TranslationBlock *tb;
142     target_ulong cs_base, pc;
143     int flags;
144
145     /* we record a subset of the CPU state. It will
146        always be the same before a given translated block
147        is executed. */
148     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
149     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
150     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
151                  tb->flags != flags)) {
152         tb = tb_find_slow(env, pc, cs_base, flags);
153     }
154     return tb;
155 }
156
157 static CPUDebugExcpHandler *debug_excp_handler;
158
159 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
160 {
161     debug_excp_handler = handler;
162 }
163
164 static void cpu_handle_debug_exception(CPUArchState *env)
165 {
166     CPUWatchpoint *wp;
167
168     if (!env->watchpoint_hit) {
169         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
170             wp->flags &= ~BP_WATCHPOINT_HIT;
171         }
172     }
173     if (debug_excp_handler) {
174         debug_excp_handler(env);
175     }
176 }
177
178 /* main execution loop */
179
180 volatile sig_atomic_t exit_request;
181
182 int cpu_exec(CPUArchState *env)
183 {
184 #ifdef TARGET_PPC
185     CPUState *cpu = ENV_GET_CPU(env);
186 #endif
187     int ret, interrupt_request;
188     TranslationBlock *tb;
189     uint8_t *tc_ptr;
190     tcg_target_ulong next_tb;
191
192     if (env->halted) {
193         if (!cpu_has_work(env)) {
194             return EXCP_HALTED;
195         }
196
197         env->halted = 0;
198     }
199
200     cpu_single_env = env;
201
202     if (unlikely(exit_request)) {
203         env->exit_request = 1;
204     }
205
206 #if defined(TARGET_I386)
207     /* put eflags in CPU temporary format */
208     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
209     DF = 1 - (2 * ((env->eflags >> 10) & 1));
210     CC_OP = CC_OP_EFLAGS;
211     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
212 #elif defined(TARGET_SPARC)
213 #elif defined(TARGET_M68K)
214     env->cc_op = CC_OP_FLAGS;
215     env->cc_dest = env->sr & 0xf;
216     env->cc_x = (env->sr >> 4) & 1;
217 #elif defined(TARGET_ALPHA)
218 #elif defined(TARGET_ARM)
219 #elif defined(TARGET_UNICORE32)
220 #elif defined(TARGET_PPC)
221     env->reserve_addr = -1;
222 #elif defined(TARGET_LM32)
223 #elif defined(TARGET_MICROBLAZE)
224 #elif defined(TARGET_MIPS)
225 #elif defined(TARGET_SH4)
226 #elif defined(TARGET_CRIS)
227 #elif defined(TARGET_S390X)
228 #elif defined(TARGET_XTENSA)
229     /* XXXXX */
230 #else
231 #error unsupported target CPU
232 #endif
233     env->exception_index = -1;
234
235     /* prepare setjmp context for exception handling */
236     for(;;) {
237         if (setjmp(env->jmp_env) == 0) {
238             /* if an exception is pending, we execute it here */
239             if (env->exception_index >= 0) {
240                 if (env->exception_index >= EXCP_INTERRUPT) {
241                     /* exit request from the cpu execution loop */
242                     ret = env->exception_index;
243                     if (ret == EXCP_DEBUG) {
244                         cpu_handle_debug_exception(env);
245                     }
246                     break;
247                 } else {
248 #if defined(CONFIG_USER_ONLY)
249                     /* if user mode only, we simulate a fake exception
250                        which will be handled outside the cpu execution
251                        loop */
252 #if defined(TARGET_I386)
253                     do_interrupt(env);
254 #endif
255                     ret = env->exception_index;
256                     break;
257 #else
258                     do_interrupt(env);
259                     env->exception_index = -1;
260 #endif
261                 }
262             }
263
264             next_tb = 0; /* force lookup of first TB */
265             for(;;) {
266                 interrupt_request = env->interrupt_request;
267                 if (unlikely(interrupt_request)) {
268                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
269                         /* Mask out external interrupts for this step. */
270                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
271                     }
272                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
273                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
274                         env->exception_index = EXCP_DEBUG;
275                         cpu_loop_exit(env);
276                     }
277 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
278     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
279     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
280                     if (interrupt_request & CPU_INTERRUPT_HALT) {
281                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
282                         env->halted = 1;
283                         env->exception_index = EXCP_HLT;
284                         cpu_loop_exit(env);
285                     }
286 #endif
287 #if defined(TARGET_I386)
288                     if (interrupt_request & CPU_INTERRUPT_INIT) {
289                             svm_check_intercept(env, SVM_EXIT_INIT);
290                             do_cpu_init(x86_env_get_cpu(env));
291                             env->exception_index = EXCP_HALTED;
292                             cpu_loop_exit(env);
293                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
294                             do_cpu_sipi(x86_env_get_cpu(env));
295                     } else if (env->hflags2 & HF2_GIF_MASK) {
296                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
297                             !(env->hflags & HF_SMM_MASK)) {
298                             svm_check_intercept(env, SVM_EXIT_SMI);
299                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
300                             do_smm_enter(env);
301                             next_tb = 0;
302                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
303                                    !(env->hflags2 & HF2_NMI_MASK)) {
304                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
305                             env->hflags2 |= HF2_NMI_MASK;
306                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
307                             next_tb = 0;
308                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
309                             env->interrupt_request &= ~CPU_INTERRUPT_MCE;
310                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
311                             next_tb = 0;
312                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
313                                    (((env->hflags2 & HF2_VINTR_MASK) && 
314                                      (env->hflags2 & HF2_HIF_MASK)) ||
315                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
316                                      (env->eflags & IF_MASK && 
317                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
318                             int intno;
319                             svm_check_intercept(env, SVM_EXIT_INTR);
320                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
321                             intno = cpu_get_pic_interrupt(env);
322                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
323                             do_interrupt_x86_hardirq(env, intno, 1);
324                             /* ensure that no TB jump will be modified as
325                                the program flow was changed */
326                             next_tb = 0;
327 #if !defined(CONFIG_USER_ONLY)
328                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
329                                    (env->eflags & IF_MASK) && 
330                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
331                             int intno;
332                             /* FIXME: this should respect TPR */
333                             svm_check_intercept(env, SVM_EXIT_VINTR);
334                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
335                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
336                             do_interrupt_x86_hardirq(env, intno, 1);
337                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
338                             next_tb = 0;
339 #endif
340                         }
341                     }
342 #elif defined(TARGET_PPC)
343                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
344                         cpu_reset(cpu);
345                     }
346                     if (interrupt_request & CPU_INTERRUPT_HARD) {
347                         ppc_hw_interrupt(env);
348                         if (env->pending_interrupts == 0)
349                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
350                         next_tb = 0;
351                     }
352 #elif defined(TARGET_LM32)
353                     if ((interrupt_request & CPU_INTERRUPT_HARD)
354                         && (env->ie & IE_IE)) {
355                         env->exception_index = EXCP_IRQ;
356                         do_interrupt(env);
357                         next_tb = 0;
358                     }
359 #elif defined(TARGET_MICROBLAZE)
360                     if ((interrupt_request & CPU_INTERRUPT_HARD)
361                         && (env->sregs[SR_MSR] & MSR_IE)
362                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
363                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
364                         env->exception_index = EXCP_IRQ;
365                         do_interrupt(env);
366                         next_tb = 0;
367                     }
368 #elif defined(TARGET_MIPS)
369                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
370                         cpu_mips_hw_interrupts_pending(env)) {
371                         /* Raise it */
372                         env->exception_index = EXCP_EXT_INTERRUPT;
373                         env->error_code = 0;
374                         do_interrupt(env);
375                         next_tb = 0;
376                     }
377 #elif defined(TARGET_SPARC)
378                     if (interrupt_request & CPU_INTERRUPT_HARD) {
379                         if (cpu_interrupts_enabled(env) &&
380                             env->interrupt_index > 0) {
381                             int pil = env->interrupt_index & 0xf;
382                             int type = env->interrupt_index & 0xf0;
383
384                             if (((type == TT_EXTINT) &&
385                                   cpu_pil_allowed(env, pil)) ||
386                                   type != TT_EXTINT) {
387                                 env->exception_index = env->interrupt_index;
388                                 do_interrupt(env);
389                                 next_tb = 0;
390                             }
391                         }
392                     }
393 #elif defined(TARGET_ARM)
394                     if (interrupt_request & CPU_INTERRUPT_FIQ
395                         && !(env->uncached_cpsr & CPSR_F)) {
396                         env->exception_index = EXCP_FIQ;
397                         do_interrupt(env);
398                         next_tb = 0;
399                     }
400                     /* ARMv7-M interrupt return works by loading a magic value
401                        into the PC.  On real hardware the load causes the
402                        return to occur.  The qemu implementation performs the
403                        jump normally, then does the exception return when the
404                        CPU tries to execute code at the magic address.
405                        This will cause the magic PC value to be pushed to
406                        the stack if an interrupt occurred at the wrong time.
407                        We avoid this by disabling interrupts when
408                        pc contains a magic address.  */
409                     if (interrupt_request & CPU_INTERRUPT_HARD
410                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
411                             || !(env->uncached_cpsr & CPSR_I))) {
412                         env->exception_index = EXCP_IRQ;
413                         do_interrupt(env);
414                         next_tb = 0;
415                     }
416 #elif defined(TARGET_UNICORE32)
417                     if (interrupt_request & CPU_INTERRUPT_HARD
418                         && !(env->uncached_asr & ASR_I)) {
419                         do_interrupt(env);
420                         next_tb = 0;
421                     }
422 #elif defined(TARGET_SH4)
423                     if (interrupt_request & CPU_INTERRUPT_HARD) {
424                         do_interrupt(env);
425                         next_tb = 0;
426                     }
427 #elif defined(TARGET_ALPHA)
428                     {
429                         int idx = -1;
430                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
431                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
432                         case 0 ... 3:
433                             if (interrupt_request & CPU_INTERRUPT_HARD) {
434                                 idx = EXCP_DEV_INTERRUPT;
435                             }
436                             /* FALLTHRU */
437                         case 4:
438                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
439                                 idx = EXCP_CLK_INTERRUPT;
440                             }
441                             /* FALLTHRU */
442                         case 5:
443                             if (interrupt_request & CPU_INTERRUPT_SMP) {
444                                 idx = EXCP_SMP_INTERRUPT;
445                             }
446                             /* FALLTHRU */
447                         case 6:
448                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
449                                 idx = EXCP_MCHK;
450                             }
451                         }
452                         if (idx >= 0) {
453                             env->exception_index = idx;
454                             env->error_code = 0;
455                             do_interrupt(env);
456                             next_tb = 0;
457                         }
458                     }
459 #elif defined(TARGET_CRIS)
460                     if (interrupt_request & CPU_INTERRUPT_HARD
461                         && (env->pregs[PR_CCS] & I_FLAG)
462                         && !env->locked_irq) {
463                         env->exception_index = EXCP_IRQ;
464                         do_interrupt(env);
465                         next_tb = 0;
466                     }
467                     if (interrupt_request & CPU_INTERRUPT_NMI) {
468                         unsigned int m_flag_archval;
469                         if (env->pregs[PR_VR] < 32) {
470                             m_flag_archval = M_FLAG_V10;
471                         } else {
472                             m_flag_archval = M_FLAG_V32;
473                         }
474                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
475                             env->exception_index = EXCP_NMI;
476                             do_interrupt(env);
477                             next_tb = 0;
478                         }
479                     }
480 #elif defined(TARGET_M68K)
481                     if (interrupt_request & CPU_INTERRUPT_HARD
482                         && ((env->sr & SR_I) >> SR_I_SHIFT)
483                             < env->pending_level) {
484                         /* Real hardware gets the interrupt vector via an
485                            IACK cycle at this point.  Current emulated
486                            hardware doesn't rely on this, so we
487                            provide/save the vector when the interrupt is
488                            first signalled.  */
489                         env->exception_index = env->pending_vector;
490                         do_interrupt_m68k_hardirq(env);
491                         next_tb = 0;
492                     }
493 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
494                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
495                         (env->psw.mask & PSW_MASK_EXT)) {
496                         do_interrupt(env);
497                         next_tb = 0;
498                     }
499 #elif defined(TARGET_XTENSA)
500                     if (interrupt_request & CPU_INTERRUPT_HARD) {
501                         env->exception_index = EXC_IRQ;
502                         do_interrupt(env);
503                         next_tb = 0;
504                     }
505 #endif
506                    /* Don't use the cached interrupt_request value,
507                       do_interrupt may have updated the EXITTB flag. */
508                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
509                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
510                         /* ensure that no TB jump will be modified as
511                            the program flow was changed */
512                         next_tb = 0;
513                     }
514                 }
515                 if (unlikely(env->exit_request)) {
516                     env->exit_request = 0;
517                     env->exception_index = EXCP_INTERRUPT;
518                     cpu_loop_exit(env);
519                 }
520 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
521                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
522                     /* restore flags in standard format */
523 #if defined(TARGET_I386)
524                     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
525                         | (DF & DF_MASK);
526                     log_cpu_state(env, X86_DUMP_CCOP);
527                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
528 #elif defined(TARGET_M68K)
529                     cpu_m68k_flush_flags(env, env->cc_op);
530                     env->cc_op = CC_OP_FLAGS;
531                     env->sr = (env->sr & 0xffe0)
532                               | env->cc_dest | (env->cc_x << 4);
533                     log_cpu_state(env, 0);
534 #else
535                     log_cpu_state(env, 0);
536 #endif
537                 }
538 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
539                 spin_lock(&tb_lock);
540                 tb = tb_find_fast(env);
541                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
542                    doing it in tb_find_slow */
543                 if (tb_invalidated_flag) {
544                     /* as some TB could have been invalidated because
545                        of memory exceptions while generating the code, we
546                        must recompute the hash index here */
547                     next_tb = 0;
548                     tb_invalidated_flag = 0;
549                 }
550 #ifdef CONFIG_DEBUG_EXEC
551                 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
552                              tb->tc_ptr, tb->pc,
553                              lookup_symbol(tb->pc));
554 #endif
555                 /* see if we can patch the calling TB. When the TB
556                    spans two pages, we cannot safely do a direct
557                    jump. */
558                 if (next_tb != 0 && tb->page_addr[1] == -1) {
559                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
560                 }
561                 spin_unlock(&tb_lock);
562
563                 /* cpu_interrupt might be called while translating the
564                    TB, but before it is linked into a potentially
565                    infinite loop and becomes env->current_tb. Avoid
566                    starting execution if there is a pending interrupt. */
567                 env->current_tb = tb;
568                 barrier();
569                 if (likely(!env->exit_request)) {
570                     tc_ptr = tb->tc_ptr;
571                     /* execute the generated code */
572                     next_tb = tcg_qemu_tb_exec(env, tc_ptr);
573                     if ((next_tb & 3) == 2) {
574                         /* Instruction counter expired.  */
575                         int insns_left;
576                         tb = (TranslationBlock *)(next_tb & ~3);
577                         /* Restore PC.  */
578                         cpu_pc_from_tb(env, tb);
579                         insns_left = env->icount_decr.u32;
580                         if (env->icount_extra && insns_left >= 0) {
581                             /* Refill decrementer and continue execution.  */
582                             env->icount_extra += insns_left;
583                             if (env->icount_extra > 0xffff) {
584                                 insns_left = 0xffff;
585                             } else {
586                                 insns_left = env->icount_extra;
587                             }
588                             env->icount_extra -= insns_left;
589                             env->icount_decr.u16.low = insns_left;
590                         } else {
591                             if (insns_left > 0) {
592                                 /* Execute remaining instructions.  */
593                                 cpu_exec_nocache(env, insns_left, tb);
594                             }
595                             env->exception_index = EXCP_INTERRUPT;
596                             next_tb = 0;
597                             cpu_loop_exit(env);
598                         }
599                     }
600                 }
601                 env->current_tb = NULL;
602                 /* reset soft MMU for next block (it can currently
603                    only be set by a memory fault) */
604             } /* for(;;) */
605         } else {
606             /* Reload env after longjmp - the compiler may have smashed all
607              * local variables as longjmp is marked 'noreturn'. */
608             env = cpu_single_env;
609         }
610     } /* for(;;) */
611
612
613 #if defined(TARGET_I386)
614     /* restore flags in standard format */
615     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
616         | (DF & DF_MASK);
617 #elif defined(TARGET_ARM)
618     /* XXX: Save/restore host fpu exception state?.  */
619 #elif defined(TARGET_UNICORE32)
620 #elif defined(TARGET_SPARC)
621 #elif defined(TARGET_PPC)
622 #elif defined(TARGET_LM32)
623 #elif defined(TARGET_M68K)
624     cpu_m68k_flush_flags(env, env->cc_op);
625     env->cc_op = CC_OP_FLAGS;
626     env->sr = (env->sr & 0xffe0)
627               | env->cc_dest | (env->cc_x << 4);
628 #elif defined(TARGET_MICROBLAZE)
629 #elif defined(TARGET_MIPS)
630 #elif defined(TARGET_SH4)
631 #elif defined(TARGET_ALPHA)
632 #elif defined(TARGET_CRIS)
633 #elif defined(TARGET_S390X)
634 #elif defined(TARGET_XTENSA)
635     /* XXXXX */
636 #else
637 #error unsupported target CPU
638 #endif
639
640     /* fail safe : never use cpu_single_env outside cpu_exec() */
641     cpu_single_env = NULL;
642     return ret;
643 }
This page took 0.059823 seconds and 4 git commands to generate.