]> Git Repo - qemu.git/blob - cpu-exec.c
No need to iterate if we already are over the limit
[qemu.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "qemu-barrier.h"
24 #include "qtest.h"
25
26 int tb_invalidated_flag;
27
28 //#define CONFIG_DEBUG_EXEC
29
30 bool qemu_cpu_has_work(CPUArchState *env)
31 {
32     return cpu_has_work(env);
33 }
34
35 void cpu_loop_exit(CPUArchState *env)
36 {
37     env->current_tb = NULL;
38     longjmp(env->jmp_env, 1);
39 }
40
41 /* exit the current TB from a signal handler. The host registers are
42    restored in a state compatible with the CPU emulator
43  */
44 #if defined(CONFIG_SOFTMMU)
45 void cpu_resume_from_signal(CPUArchState *env, void *puc)
46 {
47     /* XXX: restore cpu registers saved in host registers */
48
49     env->exception_index = -1;
50     longjmp(env->jmp_env, 1);
51 }
52 #endif
53
54 /* Execute the code without caching the generated code. An interpreter
55    could be used if available. */
56 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
57                              TranslationBlock *orig_tb)
58 {
59     tcg_target_ulong next_tb;
60     TranslationBlock *tb;
61
62     /* Should never happen.
63        We only end up here when an existing TB is too long.  */
64     if (max_cycles > CF_COUNT_MASK)
65         max_cycles = CF_COUNT_MASK;
66
67     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
68                      max_cycles);
69     env->current_tb = tb;
70     /* execute the generated code */
71     next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
72     env->current_tb = NULL;
73
74     if ((next_tb & 3) == 2) {
75         /* Restore PC.  This may happen if async event occurs before
76            the TB starts executing.  */
77         cpu_pc_from_tb(env, tb);
78     }
79     tb_phys_invalidate(tb, -1);
80     tb_free(tb);
81 }
82
83 static TranslationBlock *tb_find_slow(CPUArchState *env,
84                                       target_ulong pc,
85                                       target_ulong cs_base,
86                                       uint64_t flags)
87 {
88     TranslationBlock *tb, **ptb1;
89     unsigned int h;
90     tb_page_addr_t phys_pc, phys_page1;
91     target_ulong virt_page2;
92
93     tb_invalidated_flag = 0;
94
95     /* find translated block using physical mappings */
96     phys_pc = get_page_addr_code(env, pc);
97     phys_page1 = phys_pc & TARGET_PAGE_MASK;
98     h = tb_phys_hash_func(phys_pc);
99     ptb1 = &tb_phys_hash[h];
100     for(;;) {
101         tb = *ptb1;
102         if (!tb)
103             goto not_found;
104         if (tb->pc == pc &&
105             tb->page_addr[0] == phys_page1 &&
106             tb->cs_base == cs_base &&
107             tb->flags == flags) {
108             /* check next page if needed */
109             if (tb->page_addr[1] != -1) {
110                 tb_page_addr_t phys_page2;
111
112                 virt_page2 = (pc & TARGET_PAGE_MASK) +
113                     TARGET_PAGE_SIZE;
114                 phys_page2 = get_page_addr_code(env, virt_page2);
115                 if (tb->page_addr[1] == phys_page2)
116                     goto found;
117             } else {
118                 goto found;
119             }
120         }
121         ptb1 = &tb->phys_hash_next;
122     }
123  not_found:
124    /* if no translated code available, then translate it now */
125     tb = tb_gen_code(env, pc, cs_base, flags, 0);
126
127  found:
128     /* Move the last found TB to the head of the list */
129     if (likely(*ptb1)) {
130         *ptb1 = tb->phys_hash_next;
131         tb->phys_hash_next = tb_phys_hash[h];
132         tb_phys_hash[h] = tb;
133     }
134     /* we add the TB in the virtual pc hash table */
135     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
136     return tb;
137 }
138
139 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
140 {
141     TranslationBlock *tb;
142     target_ulong cs_base, pc;
143     int flags;
144
145     /* we record a subset of the CPU state. It will
146        always be the same before a given translated block
147        is executed. */
148     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
149     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
150     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
151                  tb->flags != flags)) {
152         tb = tb_find_slow(env, pc, cs_base, flags);
153     }
154     return tb;
155 }
156
157 static CPUDebugExcpHandler *debug_excp_handler;
158
159 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
160 {
161     CPUDebugExcpHandler *old_handler = debug_excp_handler;
162
163     debug_excp_handler = handler;
164     return old_handler;
165 }
166
167 static void cpu_handle_debug_exception(CPUArchState *env)
168 {
169     CPUWatchpoint *wp;
170
171     if (!env->watchpoint_hit) {
172         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
173             wp->flags &= ~BP_WATCHPOINT_HIT;
174         }
175     }
176     if (debug_excp_handler) {
177         debug_excp_handler(env);
178     }
179 }
180
181 /* main execution loop */
182
183 volatile sig_atomic_t exit_request;
184
185 int cpu_exec(CPUArchState *env)
186 {
187 #ifdef TARGET_PPC
188     CPUState *cpu = ENV_GET_CPU(env);
189 #endif
190     int ret, interrupt_request;
191     TranslationBlock *tb;
192     uint8_t *tc_ptr;
193     tcg_target_ulong next_tb;
194
195     if (env->halted) {
196         if (!cpu_has_work(env)) {
197             return EXCP_HALTED;
198         }
199
200         env->halted = 0;
201     }
202
203     cpu_single_env = env;
204
205     if (unlikely(exit_request)) {
206         env->exit_request = 1;
207     }
208
209 #if defined(TARGET_I386)
210     /* put eflags in CPU temporary format */
211     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
212     DF = 1 - (2 * ((env->eflags >> 10) & 1));
213     CC_OP = CC_OP_EFLAGS;
214     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
215 #elif defined(TARGET_SPARC)
216 #elif defined(TARGET_M68K)
217     env->cc_op = CC_OP_FLAGS;
218     env->cc_dest = env->sr & 0xf;
219     env->cc_x = (env->sr >> 4) & 1;
220 #elif defined(TARGET_ALPHA)
221 #elif defined(TARGET_ARM)
222 #elif defined(TARGET_UNICORE32)
223 #elif defined(TARGET_PPC)
224     env->reserve_addr = -1;
225 #elif defined(TARGET_LM32)
226 #elif defined(TARGET_MICROBLAZE)
227 #elif defined(TARGET_MIPS)
228 #elif defined(TARGET_SH4)
229 #elif defined(TARGET_CRIS)
230 #elif defined(TARGET_S390X)
231 #elif defined(TARGET_XTENSA)
232     /* XXXXX */
233 #else
234 #error unsupported target CPU
235 #endif
236     env->exception_index = -1;
237
238     /* prepare setjmp context for exception handling */
239     for(;;) {
240         if (setjmp(env->jmp_env) == 0) {
241             /* if an exception is pending, we execute it here */
242             if (env->exception_index >= 0) {
243                 if (env->exception_index >= EXCP_INTERRUPT) {
244                     /* exit request from the cpu execution loop */
245                     ret = env->exception_index;
246                     if (ret == EXCP_DEBUG) {
247                         cpu_handle_debug_exception(env);
248                     }
249                     break;
250                 } else {
251 #if defined(CONFIG_USER_ONLY)
252                     /* if user mode only, we simulate a fake exception
253                        which will be handled outside the cpu execution
254                        loop */
255 #if defined(TARGET_I386)
256                     do_interrupt(env);
257 #endif
258                     ret = env->exception_index;
259                     break;
260 #else
261                     do_interrupt(env);
262                     env->exception_index = -1;
263 #endif
264                 }
265             }
266
267             next_tb = 0; /* force lookup of first TB */
268             for(;;) {
269                 interrupt_request = env->interrupt_request;
270                 if (unlikely(interrupt_request)) {
271                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
272                         /* Mask out external interrupts for this step. */
273                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
274                     }
275                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
276                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
277                         env->exception_index = EXCP_DEBUG;
278                         cpu_loop_exit(env);
279                     }
280 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
281     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
282     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
283                     if (interrupt_request & CPU_INTERRUPT_HALT) {
284                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
285                         env->halted = 1;
286                         env->exception_index = EXCP_HLT;
287                         cpu_loop_exit(env);
288                     }
289 #endif
290 #if defined(TARGET_I386)
291                     if (interrupt_request & CPU_INTERRUPT_INIT) {
292                             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
293                                                           0);
294                             do_cpu_init(x86_env_get_cpu(env));
295                             env->exception_index = EXCP_HALTED;
296                             cpu_loop_exit(env);
297                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
298                             do_cpu_sipi(x86_env_get_cpu(env));
299                     } else if (env->hflags2 & HF2_GIF_MASK) {
300                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
301                             !(env->hflags & HF_SMM_MASK)) {
302                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
303                                                           0);
304                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
305                             do_smm_enter(env);
306                             next_tb = 0;
307                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
308                                    !(env->hflags2 & HF2_NMI_MASK)) {
309                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
310                             env->hflags2 |= HF2_NMI_MASK;
311                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
312                             next_tb = 0;
313                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
314                             env->interrupt_request &= ~CPU_INTERRUPT_MCE;
315                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
316                             next_tb = 0;
317                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
318                                    (((env->hflags2 & HF2_VINTR_MASK) && 
319                                      (env->hflags2 & HF2_HIF_MASK)) ||
320                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
321                                      (env->eflags & IF_MASK && 
322                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
323                             int intno;
324                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
325                                                           0);
326                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
327                             intno = cpu_get_pic_interrupt(env);
328                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
329                             do_interrupt_x86_hardirq(env, intno, 1);
330                             /* ensure that no TB jump will be modified as
331                                the program flow was changed */
332                             next_tb = 0;
333 #if !defined(CONFIG_USER_ONLY)
334                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
335                                    (env->eflags & IF_MASK) && 
336                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
337                             int intno;
338                             /* FIXME: this should respect TPR */
339                             cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
340                                                           0);
341                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
342                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
343                             do_interrupt_x86_hardirq(env, intno, 1);
344                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
345                             next_tb = 0;
346 #endif
347                         }
348                     }
349 #elif defined(TARGET_PPC)
350                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
351                         cpu_reset(cpu);
352                     }
353                     if (interrupt_request & CPU_INTERRUPT_HARD) {
354                         ppc_hw_interrupt(env);
355                         if (env->pending_interrupts == 0)
356                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
357                         next_tb = 0;
358                     }
359 #elif defined(TARGET_LM32)
360                     if ((interrupt_request & CPU_INTERRUPT_HARD)
361                         && (env->ie & IE_IE)) {
362                         env->exception_index = EXCP_IRQ;
363                         do_interrupt(env);
364                         next_tb = 0;
365                     }
366 #elif defined(TARGET_MICROBLAZE)
367                     if ((interrupt_request & CPU_INTERRUPT_HARD)
368                         && (env->sregs[SR_MSR] & MSR_IE)
369                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
370                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
371                         env->exception_index = EXCP_IRQ;
372                         do_interrupt(env);
373                         next_tb = 0;
374                     }
375 #elif defined(TARGET_MIPS)
376                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
377                         cpu_mips_hw_interrupts_pending(env)) {
378                         /* Raise it */
379                         env->exception_index = EXCP_EXT_INTERRUPT;
380                         env->error_code = 0;
381                         do_interrupt(env);
382                         next_tb = 0;
383                     }
384 #elif defined(TARGET_SPARC)
385                     if (interrupt_request & CPU_INTERRUPT_HARD) {
386                         if (cpu_interrupts_enabled(env) &&
387                             env->interrupt_index > 0) {
388                             int pil = env->interrupt_index & 0xf;
389                             int type = env->interrupt_index & 0xf0;
390
391                             if (((type == TT_EXTINT) &&
392                                   cpu_pil_allowed(env, pil)) ||
393                                   type != TT_EXTINT) {
394                                 env->exception_index = env->interrupt_index;
395                                 do_interrupt(env);
396                                 next_tb = 0;
397                             }
398                         }
399                     }
400 #elif defined(TARGET_ARM)
401                     if (interrupt_request & CPU_INTERRUPT_FIQ
402                         && !(env->uncached_cpsr & CPSR_F)) {
403                         env->exception_index = EXCP_FIQ;
404                         do_interrupt(env);
405                         next_tb = 0;
406                     }
407                     /* ARMv7-M interrupt return works by loading a magic value
408                        into the PC.  On real hardware the load causes the
409                        return to occur.  The qemu implementation performs the
410                        jump normally, then does the exception return when the
411                        CPU tries to execute code at the magic address.
412                        This will cause the magic PC value to be pushed to
413                        the stack if an interrupt occurred at the wrong time.
414                        We avoid this by disabling interrupts when
415                        pc contains a magic address.  */
416                     if (interrupt_request & CPU_INTERRUPT_HARD
417                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
418                             || !(env->uncached_cpsr & CPSR_I))) {
419                         env->exception_index = EXCP_IRQ;
420                         do_interrupt(env);
421                         next_tb = 0;
422                     }
423 #elif defined(TARGET_UNICORE32)
424                     if (interrupt_request & CPU_INTERRUPT_HARD
425                         && !(env->uncached_asr & ASR_I)) {
426                         do_interrupt(env);
427                         next_tb = 0;
428                     }
429 #elif defined(TARGET_SH4)
430                     if (interrupt_request & CPU_INTERRUPT_HARD) {
431                         do_interrupt(env);
432                         next_tb = 0;
433                     }
434 #elif defined(TARGET_ALPHA)
435                     {
436                         int idx = -1;
437                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
438                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
439                         case 0 ... 3:
440                             if (interrupt_request & CPU_INTERRUPT_HARD) {
441                                 idx = EXCP_DEV_INTERRUPT;
442                             }
443                             /* FALLTHRU */
444                         case 4:
445                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
446                                 idx = EXCP_CLK_INTERRUPT;
447                             }
448                             /* FALLTHRU */
449                         case 5:
450                             if (interrupt_request & CPU_INTERRUPT_SMP) {
451                                 idx = EXCP_SMP_INTERRUPT;
452                             }
453                             /* FALLTHRU */
454                         case 6:
455                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
456                                 idx = EXCP_MCHK;
457                             }
458                         }
459                         if (idx >= 0) {
460                             env->exception_index = idx;
461                             env->error_code = 0;
462                             do_interrupt(env);
463                             next_tb = 0;
464                         }
465                     }
466 #elif defined(TARGET_CRIS)
467                     if (interrupt_request & CPU_INTERRUPT_HARD
468                         && (env->pregs[PR_CCS] & I_FLAG)
469                         && !env->locked_irq) {
470                         env->exception_index = EXCP_IRQ;
471                         do_interrupt(env);
472                         next_tb = 0;
473                     }
474                     if (interrupt_request & CPU_INTERRUPT_NMI) {
475                         unsigned int m_flag_archval;
476                         if (env->pregs[PR_VR] < 32) {
477                             m_flag_archval = M_FLAG_V10;
478                         } else {
479                             m_flag_archval = M_FLAG_V32;
480                         }
481                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
482                             env->exception_index = EXCP_NMI;
483                             do_interrupt(env);
484                             next_tb = 0;
485                         }
486                     }
487 #elif defined(TARGET_M68K)
488                     if (interrupt_request & CPU_INTERRUPT_HARD
489                         && ((env->sr & SR_I) >> SR_I_SHIFT)
490                             < env->pending_level) {
491                         /* Real hardware gets the interrupt vector via an
492                            IACK cycle at this point.  Current emulated
493                            hardware doesn't rely on this, so we
494                            provide/save the vector when the interrupt is
495                            first signalled.  */
496                         env->exception_index = env->pending_vector;
497                         do_interrupt_m68k_hardirq(env);
498                         next_tb = 0;
499                     }
500 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
501                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
502                         (env->psw.mask & PSW_MASK_EXT)) {
503                         do_interrupt(env);
504                         next_tb = 0;
505                     }
506 #elif defined(TARGET_XTENSA)
507                     if (interrupt_request & CPU_INTERRUPT_HARD) {
508                         env->exception_index = EXC_IRQ;
509                         do_interrupt(env);
510                         next_tb = 0;
511                     }
512 #endif
513                    /* Don't use the cached interrupt_request value,
514                       do_interrupt may have updated the EXITTB flag. */
515                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
516                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
517                         /* ensure that no TB jump will be modified as
518                            the program flow was changed */
519                         next_tb = 0;
520                     }
521                 }
522                 if (unlikely(env->exit_request)) {
523                     env->exit_request = 0;
524                     env->exception_index = EXCP_INTERRUPT;
525                     cpu_loop_exit(env);
526                 }
527 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
528                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
529                     /* restore flags in standard format */
530 #if defined(TARGET_I386)
531                     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
532                         | (DF & DF_MASK);
533                     log_cpu_state(env, X86_DUMP_CCOP);
534                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
535 #elif defined(TARGET_M68K)
536                     cpu_m68k_flush_flags(env, env->cc_op);
537                     env->cc_op = CC_OP_FLAGS;
538                     env->sr = (env->sr & 0xffe0)
539                               | env->cc_dest | (env->cc_x << 4);
540                     log_cpu_state(env, 0);
541 #else
542                     log_cpu_state(env, 0);
543 #endif
544                 }
545 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
546                 spin_lock(&tb_lock);
547                 tb = tb_find_fast(env);
548                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
549                    doing it in tb_find_slow */
550                 if (tb_invalidated_flag) {
551                     /* as some TB could have been invalidated because
552                        of memory exceptions while generating the code, we
553                        must recompute the hash index here */
554                     next_tb = 0;
555                     tb_invalidated_flag = 0;
556                 }
557 #ifdef CONFIG_DEBUG_EXEC
558                 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
559                              tb->tc_ptr, tb->pc,
560                              lookup_symbol(tb->pc));
561 #endif
562                 /* see if we can patch the calling TB. When the TB
563                    spans two pages, we cannot safely do a direct
564                    jump. */
565                 if (next_tb != 0 && tb->page_addr[1] == -1) {
566                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
567                 }
568                 spin_unlock(&tb_lock);
569
570                 /* cpu_interrupt might be called while translating the
571                    TB, but before it is linked into a potentially
572                    infinite loop and becomes env->current_tb. Avoid
573                    starting execution if there is a pending interrupt. */
574                 env->current_tb = tb;
575                 barrier();
576                 if (likely(!env->exit_request)) {
577                     tc_ptr = tb->tc_ptr;
578                     /* execute the generated code */
579                     next_tb = tcg_qemu_tb_exec(env, tc_ptr);
580                     if ((next_tb & 3) == 2) {
581                         /* Instruction counter expired.  */
582                         int insns_left;
583                         tb = (TranslationBlock *)(next_tb & ~3);
584                         /* Restore PC.  */
585                         cpu_pc_from_tb(env, tb);
586                         insns_left = env->icount_decr.u32;
587                         if (env->icount_extra && insns_left >= 0) {
588                             /* Refill decrementer and continue execution.  */
589                             env->icount_extra += insns_left;
590                             if (env->icount_extra > 0xffff) {
591                                 insns_left = 0xffff;
592                             } else {
593                                 insns_left = env->icount_extra;
594                             }
595                             env->icount_extra -= insns_left;
596                             env->icount_decr.u16.low = insns_left;
597                         } else {
598                             if (insns_left > 0) {
599                                 /* Execute remaining instructions.  */
600                                 cpu_exec_nocache(env, insns_left, tb);
601                             }
602                             env->exception_index = EXCP_INTERRUPT;
603                             next_tb = 0;
604                             cpu_loop_exit(env);
605                         }
606                     }
607                 }
608                 env->current_tb = NULL;
609                 /* reset soft MMU for next block (it can currently
610                    only be set by a memory fault) */
611             } /* for(;;) */
612         } else {
613             /* Reload env after longjmp - the compiler may have smashed all
614              * local variables as longjmp is marked 'noreturn'. */
615             env = cpu_single_env;
616         }
617     } /* for(;;) */
618
619
620 #if defined(TARGET_I386)
621     /* restore flags in standard format */
622     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
623         | (DF & DF_MASK);
624 #elif defined(TARGET_ARM)
625     /* XXX: Save/restore host fpu exception state?.  */
626 #elif defined(TARGET_UNICORE32)
627 #elif defined(TARGET_SPARC)
628 #elif defined(TARGET_PPC)
629 #elif defined(TARGET_LM32)
630 #elif defined(TARGET_M68K)
631     cpu_m68k_flush_flags(env, env->cc_op);
632     env->cc_op = CC_OP_FLAGS;
633     env->sr = (env->sr & 0xffe0)
634               | env->cc_dest | (env->cc_x << 4);
635 #elif defined(TARGET_MICROBLAZE)
636 #elif defined(TARGET_MIPS)
637 #elif defined(TARGET_SH4)
638 #elif defined(TARGET_ALPHA)
639 #elif defined(TARGET_CRIS)
640 #elif defined(TARGET_S390X)
641 #elif defined(TARGET_XTENSA)
642     /* XXXXX */
643 #else
644 #error unsupported target CPU
645 #endif
646
647     /* fail safe : never use cpu_single_env outside cpu_exec() */
648     cpu_single_env = NULL;
649     return ret;
650 }
This page took 0.063826 seconds and 4 git commands to generate.