]> Git Repo - qemu.git/blob - cpu-exec.c
cpu: Move exit_request field to CPUState
[qemu.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25
26 //#define CONFIG_DEBUG_EXEC
27
28 bool qemu_cpu_has_work(CPUState *cpu)
29 {
30     return cpu_has_work(cpu);
31 }
32
33 void cpu_loop_exit(CPUArchState *env)
34 {
35     env->current_tb = NULL;
36     longjmp(env->jmp_env, 1);
37 }
38
39 /* exit the current TB from a signal handler. The host registers are
40    restored in a state compatible with the CPU emulator
41  */
42 #if defined(CONFIG_SOFTMMU)
43 void cpu_resume_from_signal(CPUArchState *env, void *puc)
44 {
45     /* XXX: restore cpu registers saved in host registers */
46
47     env->exception_index = -1;
48     longjmp(env->jmp_env, 1);
49 }
50 #endif
51
52 /* Execute the code without caching the generated code. An interpreter
53    could be used if available. */
54 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
55                              TranslationBlock *orig_tb)
56 {
57     tcg_target_ulong next_tb;
58     TranslationBlock *tb;
59
60     /* Should never happen.
61        We only end up here when an existing TB is too long.  */
62     if (max_cycles > CF_COUNT_MASK)
63         max_cycles = CF_COUNT_MASK;
64
65     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
66                      max_cycles);
67     env->current_tb = tb;
68     /* execute the generated code */
69     next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
70     env->current_tb = NULL;
71
72     if ((next_tb & 3) == 2) {
73         /* Restore PC.  This may happen if async event occurs before
74            the TB starts executing.  */
75         cpu_pc_from_tb(env, tb);
76     }
77     tb_phys_invalidate(tb, -1);
78     tb_free(tb);
79 }
80
81 static TranslationBlock *tb_find_slow(CPUArchState *env,
82                                       target_ulong pc,
83                                       target_ulong cs_base,
84                                       uint64_t flags)
85 {
86     TranslationBlock *tb, **ptb1;
87     unsigned int h;
88     tb_page_addr_t phys_pc, phys_page1;
89     target_ulong virt_page2;
90
91     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
92
93     /* find translated block using physical mappings */
94     phys_pc = get_page_addr_code(env, pc);
95     phys_page1 = phys_pc & TARGET_PAGE_MASK;
96     h = tb_phys_hash_func(phys_pc);
97     ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
98     for(;;) {
99         tb = *ptb1;
100         if (!tb)
101             goto not_found;
102         if (tb->pc == pc &&
103             tb->page_addr[0] == phys_page1 &&
104             tb->cs_base == cs_base &&
105             tb->flags == flags) {
106             /* check next page if needed */
107             if (tb->page_addr[1] != -1) {
108                 tb_page_addr_t phys_page2;
109
110                 virt_page2 = (pc & TARGET_PAGE_MASK) +
111                     TARGET_PAGE_SIZE;
112                 phys_page2 = get_page_addr_code(env, virt_page2);
113                 if (tb->page_addr[1] == phys_page2)
114                     goto found;
115             } else {
116                 goto found;
117             }
118         }
119         ptb1 = &tb->phys_hash_next;
120     }
121  not_found:
122    /* if no translated code available, then translate it now */
123     tb = tb_gen_code(env, pc, cs_base, flags, 0);
124
125  found:
126     /* Move the last found TB to the head of the list */
127     if (likely(*ptb1)) {
128         *ptb1 = tb->phys_hash_next;
129         tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
130         tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
131     }
132     /* we add the TB in the virtual pc hash table */
133     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
134     return tb;
135 }
136
137 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
138 {
139     TranslationBlock *tb;
140     target_ulong cs_base, pc;
141     int flags;
142
143     /* we record a subset of the CPU state. It will
144        always be the same before a given translated block
145        is executed. */
146     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
147     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
148     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
149                  tb->flags != flags)) {
150         tb = tb_find_slow(env, pc, cs_base, flags);
151     }
152     return tb;
153 }
154
155 static CPUDebugExcpHandler *debug_excp_handler;
156
157 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
158 {
159     debug_excp_handler = handler;
160 }
161
162 static void cpu_handle_debug_exception(CPUArchState *env)
163 {
164     CPUWatchpoint *wp;
165
166     if (!env->watchpoint_hit) {
167         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
168             wp->flags &= ~BP_WATCHPOINT_HIT;
169         }
170     }
171     if (debug_excp_handler) {
172         debug_excp_handler(env);
173     }
174 }
175
176 /* main execution loop */
177
178 volatile sig_atomic_t exit_request;
179
180 int cpu_exec(CPUArchState *env)
181 {
182     CPUState *cpu = ENV_GET_CPU(env);
183     int ret, interrupt_request;
184     TranslationBlock *tb;
185     uint8_t *tc_ptr;
186     tcg_target_ulong next_tb;
187
188     if (env->halted) {
189         if (!cpu_has_work(cpu)) {
190             return EXCP_HALTED;
191         }
192
193         env->halted = 0;
194     }
195
196     cpu_single_env = env;
197
198     if (unlikely(exit_request)) {
199         cpu->exit_request = 1;
200     }
201
202 #if defined(TARGET_I386)
203     /* put eflags in CPU temporary format */
204     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
205     DF = 1 - (2 * ((env->eflags >> 10) & 1));
206     CC_OP = CC_OP_EFLAGS;
207     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
208 #elif defined(TARGET_SPARC)
209 #elif defined(TARGET_M68K)
210     env->cc_op = CC_OP_FLAGS;
211     env->cc_dest = env->sr & 0xf;
212     env->cc_x = (env->sr >> 4) & 1;
213 #elif defined(TARGET_ALPHA)
214 #elif defined(TARGET_ARM)
215 #elif defined(TARGET_UNICORE32)
216 #elif defined(TARGET_PPC)
217     env->reserve_addr = -1;
218 #elif defined(TARGET_LM32)
219 #elif defined(TARGET_MICROBLAZE)
220 #elif defined(TARGET_MIPS)
221 #elif defined(TARGET_OPENRISC)
222 #elif defined(TARGET_SH4)
223 #elif defined(TARGET_CRIS)
224 #elif defined(TARGET_S390X)
225 #elif defined(TARGET_XTENSA)
226     /* XXXXX */
227 #else
228 #error unsupported target CPU
229 #endif
230     env->exception_index = -1;
231
232     /* prepare setjmp context for exception handling */
233     for(;;) {
234         if (setjmp(env->jmp_env) == 0) {
235             /* if an exception is pending, we execute it here */
236             if (env->exception_index >= 0) {
237                 if (env->exception_index >= EXCP_INTERRUPT) {
238                     /* exit request from the cpu execution loop */
239                     ret = env->exception_index;
240                     if (ret == EXCP_DEBUG) {
241                         cpu_handle_debug_exception(env);
242                     }
243                     break;
244                 } else {
245 #if defined(CONFIG_USER_ONLY)
246                     /* if user mode only, we simulate a fake exception
247                        which will be handled outside the cpu execution
248                        loop */
249 #if defined(TARGET_I386)
250                     do_interrupt(env);
251 #endif
252                     ret = env->exception_index;
253                     break;
254 #else
255                     do_interrupt(env);
256                     env->exception_index = -1;
257 #endif
258                 }
259             }
260
261             next_tb = 0; /* force lookup of first TB */
262             for(;;) {
263                 interrupt_request = env->interrupt_request;
264                 if (unlikely(interrupt_request)) {
265                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
266                         /* Mask out external interrupts for this step. */
267                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
268                     }
269                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
270                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
271                         env->exception_index = EXCP_DEBUG;
272                         cpu_loop_exit(env);
273                     }
274 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
275     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
276     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
277                     if (interrupt_request & CPU_INTERRUPT_HALT) {
278                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
279                         env->halted = 1;
280                         env->exception_index = EXCP_HLT;
281                         cpu_loop_exit(env);
282                     }
283 #endif
284 #if defined(TARGET_I386)
285 #if !defined(CONFIG_USER_ONLY)
286                     if (interrupt_request & CPU_INTERRUPT_POLL) {
287                         env->interrupt_request &= ~CPU_INTERRUPT_POLL;
288                         apic_poll_irq(env->apic_state);
289                     }
290 #endif
291                     if (interrupt_request & CPU_INTERRUPT_INIT) {
292                             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
293                                                           0);
294                             do_cpu_init(x86_env_get_cpu(env));
295                             env->exception_index = EXCP_HALTED;
296                             cpu_loop_exit(env);
297                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
298                             do_cpu_sipi(x86_env_get_cpu(env));
299                     } else if (env->hflags2 & HF2_GIF_MASK) {
300                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
301                             !(env->hflags & HF_SMM_MASK)) {
302                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
303                                                           0);
304                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
305                             do_smm_enter(env);
306                             next_tb = 0;
307                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
308                                    !(env->hflags2 & HF2_NMI_MASK)) {
309                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
310                             env->hflags2 |= HF2_NMI_MASK;
311                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
312                             next_tb = 0;
313                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
314                             env->interrupt_request &= ~CPU_INTERRUPT_MCE;
315                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
316                             next_tb = 0;
317                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
318                                    (((env->hflags2 & HF2_VINTR_MASK) && 
319                                      (env->hflags2 & HF2_HIF_MASK)) ||
320                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
321                                      (env->eflags & IF_MASK && 
322                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
323                             int intno;
324                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
325                                                           0);
326                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
327                             intno = cpu_get_pic_interrupt(env);
328                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
329                             do_interrupt_x86_hardirq(env, intno, 1);
330                             /* ensure that no TB jump will be modified as
331                                the program flow was changed */
332                             next_tb = 0;
333 #if !defined(CONFIG_USER_ONLY)
334                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
335                                    (env->eflags & IF_MASK) && 
336                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
337                             int intno;
338                             /* FIXME: this should respect TPR */
339                             cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
340                                                           0);
341                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
342                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
343                             do_interrupt_x86_hardirq(env, intno, 1);
344                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
345                             next_tb = 0;
346 #endif
347                         }
348                     }
349 #elif defined(TARGET_PPC)
350                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
351                         cpu_reset(cpu);
352                     }
353                     if (interrupt_request & CPU_INTERRUPT_HARD) {
354                         ppc_hw_interrupt(env);
355                         if (env->pending_interrupts == 0)
356                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
357                         next_tb = 0;
358                     }
359 #elif defined(TARGET_LM32)
360                     if ((interrupt_request & CPU_INTERRUPT_HARD)
361                         && (env->ie & IE_IE)) {
362                         env->exception_index = EXCP_IRQ;
363                         do_interrupt(env);
364                         next_tb = 0;
365                     }
366 #elif defined(TARGET_MICROBLAZE)
367                     if ((interrupt_request & CPU_INTERRUPT_HARD)
368                         && (env->sregs[SR_MSR] & MSR_IE)
369                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
370                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
371                         env->exception_index = EXCP_IRQ;
372                         do_interrupt(env);
373                         next_tb = 0;
374                     }
375 #elif defined(TARGET_MIPS)
376                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
377                         cpu_mips_hw_interrupts_pending(env)) {
378                         /* Raise it */
379                         env->exception_index = EXCP_EXT_INTERRUPT;
380                         env->error_code = 0;
381                         do_interrupt(env);
382                         next_tb = 0;
383                     }
384 #elif defined(TARGET_OPENRISC)
385                     {
386                         int idx = -1;
387                         if ((interrupt_request & CPU_INTERRUPT_HARD)
388                             && (env->sr & SR_IEE)) {
389                             idx = EXCP_INT;
390                         }
391                         if ((interrupt_request & CPU_INTERRUPT_TIMER)
392                             && (env->sr & SR_TEE)) {
393                             idx = EXCP_TICK;
394                         }
395                         if (idx >= 0) {
396                             env->exception_index = idx;
397                             do_interrupt(env);
398                             next_tb = 0;
399                         }
400                     }
401 #elif defined(TARGET_SPARC)
402                     if (interrupt_request & CPU_INTERRUPT_HARD) {
403                         if (cpu_interrupts_enabled(env) &&
404                             env->interrupt_index > 0) {
405                             int pil = env->interrupt_index & 0xf;
406                             int type = env->interrupt_index & 0xf0;
407
408                             if (((type == TT_EXTINT) &&
409                                   cpu_pil_allowed(env, pil)) ||
410                                   type != TT_EXTINT) {
411                                 env->exception_index = env->interrupt_index;
412                                 do_interrupt(env);
413                                 next_tb = 0;
414                             }
415                         }
416                     }
417 #elif defined(TARGET_ARM)
418                     if (interrupt_request & CPU_INTERRUPT_FIQ
419                         && !(env->uncached_cpsr & CPSR_F)) {
420                         env->exception_index = EXCP_FIQ;
421                         do_interrupt(env);
422                         next_tb = 0;
423                     }
424                     /* ARMv7-M interrupt return works by loading a magic value
425                        into the PC.  On real hardware the load causes the
426                        return to occur.  The qemu implementation performs the
427                        jump normally, then does the exception return when the
428                        CPU tries to execute code at the magic address.
429                        This will cause the magic PC value to be pushed to
430                        the stack if an interrupt occurred at the wrong time.
431                        We avoid this by disabling interrupts when
432                        pc contains a magic address.  */
433                     if (interrupt_request & CPU_INTERRUPT_HARD
434                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
435                             || !(env->uncached_cpsr & CPSR_I))) {
436                         env->exception_index = EXCP_IRQ;
437                         do_interrupt(env);
438                         next_tb = 0;
439                     }
440 #elif defined(TARGET_UNICORE32)
441                     if (interrupt_request & CPU_INTERRUPT_HARD
442                         && !(env->uncached_asr & ASR_I)) {
443                         env->exception_index = UC32_EXCP_INTR;
444                         do_interrupt(env);
445                         next_tb = 0;
446                     }
447 #elif defined(TARGET_SH4)
448                     if (interrupt_request & CPU_INTERRUPT_HARD) {
449                         do_interrupt(env);
450                         next_tb = 0;
451                     }
452 #elif defined(TARGET_ALPHA)
453                     {
454                         int idx = -1;
455                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
456                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
457                         case 0 ... 3:
458                             if (interrupt_request & CPU_INTERRUPT_HARD) {
459                                 idx = EXCP_DEV_INTERRUPT;
460                             }
461                             /* FALLTHRU */
462                         case 4:
463                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
464                                 idx = EXCP_CLK_INTERRUPT;
465                             }
466                             /* FALLTHRU */
467                         case 5:
468                             if (interrupt_request & CPU_INTERRUPT_SMP) {
469                                 idx = EXCP_SMP_INTERRUPT;
470                             }
471                             /* FALLTHRU */
472                         case 6:
473                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
474                                 idx = EXCP_MCHK;
475                             }
476                         }
477                         if (idx >= 0) {
478                             env->exception_index = idx;
479                             env->error_code = 0;
480                             do_interrupt(env);
481                             next_tb = 0;
482                         }
483                     }
484 #elif defined(TARGET_CRIS)
485                     if (interrupt_request & CPU_INTERRUPT_HARD
486                         && (env->pregs[PR_CCS] & I_FLAG)
487                         && !env->locked_irq) {
488                         env->exception_index = EXCP_IRQ;
489                         do_interrupt(env);
490                         next_tb = 0;
491                     }
492                     if (interrupt_request & CPU_INTERRUPT_NMI) {
493                         unsigned int m_flag_archval;
494                         if (env->pregs[PR_VR] < 32) {
495                             m_flag_archval = M_FLAG_V10;
496                         } else {
497                             m_flag_archval = M_FLAG_V32;
498                         }
499                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
500                             env->exception_index = EXCP_NMI;
501                             do_interrupt(env);
502                             next_tb = 0;
503                         }
504                     }
505 #elif defined(TARGET_M68K)
506                     if (interrupt_request & CPU_INTERRUPT_HARD
507                         && ((env->sr & SR_I) >> SR_I_SHIFT)
508                             < env->pending_level) {
509                         /* Real hardware gets the interrupt vector via an
510                            IACK cycle at this point.  Current emulated
511                            hardware doesn't rely on this, so we
512                            provide/save the vector when the interrupt is
513                            first signalled.  */
514                         env->exception_index = env->pending_vector;
515                         do_interrupt_m68k_hardirq(env);
516                         next_tb = 0;
517                     }
518 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
519                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
520                         (env->psw.mask & PSW_MASK_EXT)) {
521                         do_interrupt(env);
522                         next_tb = 0;
523                     }
524 #elif defined(TARGET_XTENSA)
525                     if (interrupt_request & CPU_INTERRUPT_HARD) {
526                         env->exception_index = EXC_IRQ;
527                         do_interrupt(env);
528                         next_tb = 0;
529                     }
530 #endif
531                    /* Don't use the cached interrupt_request value,
532                       do_interrupt may have updated the EXITTB flag. */
533                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
534                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
535                         /* ensure that no TB jump will be modified as
536                            the program flow was changed */
537                         next_tb = 0;
538                     }
539                 }
540                 if (unlikely(cpu->exit_request)) {
541                     cpu->exit_request = 0;
542                     env->exception_index = EXCP_INTERRUPT;
543                     cpu_loop_exit(env);
544                 }
545 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
546                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
547                     /* restore flags in standard format */
548 #if defined(TARGET_I386)
549                     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
550                         | (DF & DF_MASK);
551                     log_cpu_state(env, CPU_DUMP_CCOP);
552                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
553 #elif defined(TARGET_M68K)
554                     cpu_m68k_flush_flags(env, env->cc_op);
555                     env->cc_op = CC_OP_FLAGS;
556                     env->sr = (env->sr & 0xffe0)
557                               | env->cc_dest | (env->cc_x << 4);
558                     log_cpu_state(env, 0);
559 #else
560                     log_cpu_state(env, 0);
561 #endif
562                 }
563 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
564                 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
565                 tb = tb_find_fast(env);
566                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
567                    doing it in tb_find_slow */
568                 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
569                     /* as some TB could have been invalidated because
570                        of memory exceptions while generating the code, we
571                        must recompute the hash index here */
572                     next_tb = 0;
573                     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
574                 }
575 #ifdef CONFIG_DEBUG_EXEC
576                 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
577                              tb->tc_ptr, tb->pc,
578                              lookup_symbol(tb->pc));
579 #endif
580                 /* see if we can patch the calling TB. When the TB
581                    spans two pages, we cannot safely do a direct
582                    jump. */
583                 if (next_tb != 0 && tb->page_addr[1] == -1) {
584                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
585                 }
586                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
587
588                 /* cpu_interrupt might be called while translating the
589                    TB, but before it is linked into a potentially
590                    infinite loop and becomes env->current_tb. Avoid
591                    starting execution if there is a pending interrupt. */
592                 env->current_tb = tb;
593                 barrier();
594                 if (likely(!cpu->exit_request)) {
595                     tc_ptr = tb->tc_ptr;
596                     /* execute the generated code */
597                     next_tb = tcg_qemu_tb_exec(env, tc_ptr);
598                     if ((next_tb & 3) == 2) {
599                         /* Instruction counter expired.  */
600                         int insns_left;
601                         tb = (TranslationBlock *)(next_tb & ~3);
602                         /* Restore PC.  */
603                         cpu_pc_from_tb(env, tb);
604                         insns_left = env->icount_decr.u32;
605                         if (env->icount_extra && insns_left >= 0) {
606                             /* Refill decrementer and continue execution.  */
607                             env->icount_extra += insns_left;
608                             if (env->icount_extra > 0xffff) {
609                                 insns_left = 0xffff;
610                             } else {
611                                 insns_left = env->icount_extra;
612                             }
613                             env->icount_extra -= insns_left;
614                             env->icount_decr.u16.low = insns_left;
615                         } else {
616                             if (insns_left > 0) {
617                                 /* Execute remaining instructions.  */
618                                 cpu_exec_nocache(env, insns_left, tb);
619                             }
620                             env->exception_index = EXCP_INTERRUPT;
621                             next_tb = 0;
622                             cpu_loop_exit(env);
623                         }
624                     }
625                 }
626                 env->current_tb = NULL;
627                 /* reset soft MMU for next block (it can currently
628                    only be set by a memory fault) */
629             } /* for(;;) */
630         } else {
631             /* Reload env after longjmp - the compiler may have smashed all
632              * local variables as longjmp is marked 'noreturn'. */
633             env = cpu_single_env;
634         }
635     } /* for(;;) */
636
637
638 #if defined(TARGET_I386)
639     /* restore flags in standard format */
640     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
641         | (DF & DF_MASK);
642 #elif defined(TARGET_ARM)
643     /* XXX: Save/restore host fpu exception state?.  */
644 #elif defined(TARGET_UNICORE32)
645 #elif defined(TARGET_SPARC)
646 #elif defined(TARGET_PPC)
647 #elif defined(TARGET_LM32)
648 #elif defined(TARGET_M68K)
649     cpu_m68k_flush_flags(env, env->cc_op);
650     env->cc_op = CC_OP_FLAGS;
651     env->sr = (env->sr & 0xffe0)
652               | env->cc_dest | (env->cc_x << 4);
653 #elif defined(TARGET_MICROBLAZE)
654 #elif defined(TARGET_MIPS)
655 #elif defined(TARGET_OPENRISC)
656 #elif defined(TARGET_SH4)
657 #elif defined(TARGET_ALPHA)
658 #elif defined(TARGET_CRIS)
659 #elif defined(TARGET_S390X)
660 #elif defined(TARGET_XTENSA)
661     /* XXXXX */
662 #else
663 #error unsupported target CPU
664 #endif
665
666     /* fail safe : never use cpu_single_env outside cpu_exec() */
667     cpu_single_env = NULL;
668     return ret;
669 }
This page took 0.060918 seconds and 4 git commands to generate.