]> Git Repo - qemu.git/blob - cpu-exec.c
Merge remote-tracking branch 'qemu-kvm/uq/master' into staging
[qemu.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25
26 bool qemu_cpu_has_work(CPUState *cpu)
27 {
28     return cpu_has_work(cpu);
29 }
30
31 void cpu_loop_exit(CPUArchState *env)
32 {
33     CPUState *cpu = ENV_GET_CPU(env);
34
35     cpu->current_tb = NULL;
36     siglongjmp(env->jmp_env, 1);
37 }
38
39 /* exit the current TB from a signal handler. The host registers are
40    restored in a state compatible with the CPU emulator
41  */
42 #if defined(CONFIG_SOFTMMU)
43 void cpu_resume_from_signal(CPUArchState *env, void *puc)
44 {
45     /* XXX: restore cpu registers saved in host registers */
46
47     env->exception_index = -1;
48     siglongjmp(env->jmp_env, 1);
49 }
50 #endif
51
52 /* Execute a TB, and fix up the CPU state afterwards if necessary */
53 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
54 {
55     CPUArchState *env = cpu->env_ptr;
56     uintptr_t next_tb;
57
58 #if defined(DEBUG_DISAS)
59     if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
60 #if defined(TARGET_I386)
61         log_cpu_state(cpu, CPU_DUMP_CCOP);
62 #elif defined(TARGET_M68K)
63         /* ??? Should not modify env state for dumping.  */
64         cpu_m68k_flush_flags(env, env->cc_op);
65         env->cc_op = CC_OP_FLAGS;
66         env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
67         log_cpu_state(cpu, 0);
68 #else
69         log_cpu_state(cpu, 0);
70 #endif
71     }
72 #endif /* DEBUG_DISAS */
73
74     next_tb = tcg_qemu_tb_exec(env, tb_ptr);
75     if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
76         /* We didn't start executing this TB (eg because the instruction
77          * counter hit zero); we must restore the guest PC to the address
78          * of the start of the TB.
79          */
80         CPUClass *cc = CPU_GET_CLASS(cpu);
81         TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
82         if (cc->synchronize_from_tb) {
83             cc->synchronize_from_tb(cpu, tb);
84         } else {
85             assert(cc->set_pc);
86             cc->set_pc(cpu, tb->pc);
87         }
88     }
89     if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
90         /* We were asked to stop executing TBs (probably a pending
91          * interrupt. We've now stopped, so clear the flag.
92          */
93         cpu->tcg_exit_req = 0;
94     }
95     return next_tb;
96 }
97
98 /* Execute the code without caching the generated code. An interpreter
99    could be used if available. */
100 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
101                              TranslationBlock *orig_tb)
102 {
103     CPUState *cpu = ENV_GET_CPU(env);
104     TranslationBlock *tb;
105
106     /* Should never happen.
107        We only end up here when an existing TB is too long.  */
108     if (max_cycles > CF_COUNT_MASK)
109         max_cycles = CF_COUNT_MASK;
110
111     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
112                      max_cycles);
113     cpu->current_tb = tb;
114     /* execute the generated code */
115     cpu_tb_exec(cpu, tb->tc_ptr);
116     cpu->current_tb = NULL;
117     tb_phys_invalidate(tb, -1);
118     tb_free(tb);
119 }
120
121 static TranslationBlock *tb_find_slow(CPUArchState *env,
122                                       target_ulong pc,
123                                       target_ulong cs_base,
124                                       uint64_t flags)
125 {
126     TranslationBlock *tb, **ptb1;
127     unsigned int h;
128     tb_page_addr_t phys_pc, phys_page1;
129     target_ulong virt_page2;
130
131     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
132
133     /* find translated block using physical mappings */
134     phys_pc = get_page_addr_code(env, pc);
135     phys_page1 = phys_pc & TARGET_PAGE_MASK;
136     h = tb_phys_hash_func(phys_pc);
137     ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
138     for(;;) {
139         tb = *ptb1;
140         if (!tb)
141             goto not_found;
142         if (tb->pc == pc &&
143             tb->page_addr[0] == phys_page1 &&
144             tb->cs_base == cs_base &&
145             tb->flags == flags) {
146             /* check next page if needed */
147             if (tb->page_addr[1] != -1) {
148                 tb_page_addr_t phys_page2;
149
150                 virt_page2 = (pc & TARGET_PAGE_MASK) +
151                     TARGET_PAGE_SIZE;
152                 phys_page2 = get_page_addr_code(env, virt_page2);
153                 if (tb->page_addr[1] == phys_page2)
154                     goto found;
155             } else {
156                 goto found;
157             }
158         }
159         ptb1 = &tb->phys_hash_next;
160     }
161  not_found:
162    /* if no translated code available, then translate it now */
163     tb = tb_gen_code(env, pc, cs_base, flags, 0);
164
165  found:
166     /* Move the last found TB to the head of the list */
167     if (likely(*ptb1)) {
168         *ptb1 = tb->phys_hash_next;
169         tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
170         tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
171     }
172     /* we add the TB in the virtual pc hash table */
173     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
174     return tb;
175 }
176
177 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
178 {
179     TranslationBlock *tb;
180     target_ulong cs_base, pc;
181     int flags;
182
183     /* we record a subset of the CPU state. It will
184        always be the same before a given translated block
185        is executed. */
186     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
187     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
188     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
189                  tb->flags != flags)) {
190         tb = tb_find_slow(env, pc, cs_base, flags);
191     }
192     return tb;
193 }
194
195 static CPUDebugExcpHandler *debug_excp_handler;
196
197 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
198 {
199     debug_excp_handler = handler;
200 }
201
202 static void cpu_handle_debug_exception(CPUArchState *env)
203 {
204     CPUWatchpoint *wp;
205
206     if (!env->watchpoint_hit) {
207         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
208             wp->flags &= ~BP_WATCHPOINT_HIT;
209         }
210     }
211     if (debug_excp_handler) {
212         debug_excp_handler(env);
213     }
214 }
215
216 /* main execution loop */
217
218 volatile sig_atomic_t exit_request;
219
220 int cpu_exec(CPUArchState *env)
221 {
222     CPUState *cpu = ENV_GET_CPU(env);
223 #if !(defined(CONFIG_USER_ONLY) && \
224       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
225     CPUClass *cc = CPU_GET_CLASS(cpu);
226 #endif
227 #ifdef TARGET_I386
228     X86CPU *x86_cpu = X86_CPU(cpu);
229 #endif
230     int ret, interrupt_request;
231     TranslationBlock *tb;
232     uint8_t *tc_ptr;
233     uintptr_t next_tb;
234
235     if (cpu->halted) {
236         if (!cpu_has_work(cpu)) {
237             return EXCP_HALTED;
238         }
239
240         cpu->halted = 0;
241     }
242
243     current_cpu = cpu;
244
245     /* As long as current_cpu is null, up to the assignment just above,
246      * requests by other threads to exit the execution loop are expected to
247      * be issued using the exit_request global. We must make sure that our
248      * evaluation of the global value is performed past the current_cpu
249      * value transition point, which requires a memory barrier as well as
250      * an instruction scheduling constraint on modern architectures.  */
251     smp_mb();
252
253     if (unlikely(exit_request)) {
254         cpu->exit_request = 1;
255     }
256
257 #if defined(TARGET_I386)
258     /* put eflags in CPU temporary format */
259     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
260     env->df = 1 - (2 * ((env->eflags >> 10) & 1));
261     CC_OP = CC_OP_EFLAGS;
262     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
263 #elif defined(TARGET_SPARC)
264 #elif defined(TARGET_M68K)
265     env->cc_op = CC_OP_FLAGS;
266     env->cc_dest = env->sr & 0xf;
267     env->cc_x = (env->sr >> 4) & 1;
268 #elif defined(TARGET_ALPHA)
269 #elif defined(TARGET_ARM)
270 #elif defined(TARGET_UNICORE32)
271 #elif defined(TARGET_PPC)
272     env->reserve_addr = -1;
273 #elif defined(TARGET_LM32)
274 #elif defined(TARGET_MICROBLAZE)
275 #elif defined(TARGET_MIPS)
276 #elif defined(TARGET_MOXIE)
277 #elif defined(TARGET_OPENRISC)
278 #elif defined(TARGET_SH4)
279 #elif defined(TARGET_CRIS)
280 #elif defined(TARGET_S390X)
281 #elif defined(TARGET_XTENSA)
282     /* XXXXX */
283 #else
284 #error unsupported target CPU
285 #endif
286     env->exception_index = -1;
287
288     /* prepare setjmp context for exception handling */
289     for(;;) {
290         if (sigsetjmp(env->jmp_env, 0) == 0) {
291             /* if an exception is pending, we execute it here */
292             if (env->exception_index >= 0) {
293                 if (env->exception_index >= EXCP_INTERRUPT) {
294                     /* exit request from the cpu execution loop */
295                     ret = env->exception_index;
296                     if (ret == EXCP_DEBUG) {
297                         cpu_handle_debug_exception(env);
298                     }
299                     break;
300                 } else {
301 #if defined(CONFIG_USER_ONLY)
302                     /* if user mode only, we simulate a fake exception
303                        which will be handled outside the cpu execution
304                        loop */
305 #if defined(TARGET_I386)
306                     cc->do_interrupt(cpu);
307 #endif
308                     ret = env->exception_index;
309                     break;
310 #else
311                     cc->do_interrupt(cpu);
312                     env->exception_index = -1;
313 #endif
314                 }
315             }
316
317             next_tb = 0; /* force lookup of first TB */
318             for(;;) {
319                 interrupt_request = cpu->interrupt_request;
320                 if (unlikely(interrupt_request)) {
321                     if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
322                         /* Mask out external interrupts for this step. */
323                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
324                     }
325                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
326                         cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
327                         env->exception_index = EXCP_DEBUG;
328                         cpu_loop_exit(env);
329                     }
330 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
331     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
332     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
333                     if (interrupt_request & CPU_INTERRUPT_HALT) {
334                         cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
335                         cpu->halted = 1;
336                         env->exception_index = EXCP_HLT;
337                         cpu_loop_exit(env);
338                     }
339 #endif
340 #if defined(TARGET_I386)
341 #if !defined(CONFIG_USER_ONLY)
342                     if (interrupt_request & CPU_INTERRUPT_POLL) {
343                         cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
344                         apic_poll_irq(x86_cpu->apic_state);
345                     }
346 #endif
347                     if (interrupt_request & CPU_INTERRUPT_INIT) {
348                             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
349                                                           0);
350                             do_cpu_init(x86_cpu);
351                             env->exception_index = EXCP_HALTED;
352                             cpu_loop_exit(env);
353                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
354                             do_cpu_sipi(x86_cpu);
355                     } else if (env->hflags2 & HF2_GIF_MASK) {
356                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
357                             !(env->hflags & HF_SMM_MASK)) {
358                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
359                                                           0);
360                             cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
361                             do_smm_enter(x86_cpu);
362                             next_tb = 0;
363                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
364                                    !(env->hflags2 & HF2_NMI_MASK)) {
365                             cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
366                             env->hflags2 |= HF2_NMI_MASK;
367                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
368                             next_tb = 0;
369                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
370                             cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
371                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
372                             next_tb = 0;
373                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
374                                    (((env->hflags2 & HF2_VINTR_MASK) && 
375                                      (env->hflags2 & HF2_HIF_MASK)) ||
376                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
377                                      (env->eflags & IF_MASK && 
378                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
379                             int intno;
380                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
381                                                           0);
382                             cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
383                                                         CPU_INTERRUPT_VIRQ);
384                             intno = cpu_get_pic_interrupt(env);
385                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
386                             do_interrupt_x86_hardirq(env, intno, 1);
387                             /* ensure that no TB jump will be modified as
388                                the program flow was changed */
389                             next_tb = 0;
390 #if !defined(CONFIG_USER_ONLY)
391                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
392                                    (env->eflags & IF_MASK) && 
393                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
394                             int intno;
395                             /* FIXME: this should respect TPR */
396                             cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
397                                                           0);
398                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
399                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
400                             do_interrupt_x86_hardirq(env, intno, 1);
401                             cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
402                             next_tb = 0;
403 #endif
404                         }
405                     }
406 #elif defined(TARGET_PPC)
407                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
408                         cpu_reset(cpu);
409                     }
410                     if (interrupt_request & CPU_INTERRUPT_HARD) {
411                         ppc_hw_interrupt(env);
412                         if (env->pending_interrupts == 0) {
413                             cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
414                         }
415                         next_tb = 0;
416                     }
417 #elif defined(TARGET_LM32)
418                     if ((interrupt_request & CPU_INTERRUPT_HARD)
419                         && (env->ie & IE_IE)) {
420                         env->exception_index = EXCP_IRQ;
421                         cc->do_interrupt(cpu);
422                         next_tb = 0;
423                     }
424 #elif defined(TARGET_MICROBLAZE)
425                     if ((interrupt_request & CPU_INTERRUPT_HARD)
426                         && (env->sregs[SR_MSR] & MSR_IE)
427                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
428                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
429                         env->exception_index = EXCP_IRQ;
430                         cc->do_interrupt(cpu);
431                         next_tb = 0;
432                     }
433 #elif defined(TARGET_MIPS)
434                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
435                         cpu_mips_hw_interrupts_pending(env)) {
436                         /* Raise it */
437                         env->exception_index = EXCP_EXT_INTERRUPT;
438                         env->error_code = 0;
439                         cc->do_interrupt(cpu);
440                         next_tb = 0;
441                     }
442 #elif defined(TARGET_OPENRISC)
443                     {
444                         int idx = -1;
445                         if ((interrupt_request & CPU_INTERRUPT_HARD)
446                             && (env->sr & SR_IEE)) {
447                             idx = EXCP_INT;
448                         }
449                         if ((interrupt_request & CPU_INTERRUPT_TIMER)
450                             && (env->sr & SR_TEE)) {
451                             idx = EXCP_TICK;
452                         }
453                         if (idx >= 0) {
454                             env->exception_index = idx;
455                             cc->do_interrupt(cpu);
456                             next_tb = 0;
457                         }
458                     }
459 #elif defined(TARGET_SPARC)
460                     if (interrupt_request & CPU_INTERRUPT_HARD) {
461                         if (cpu_interrupts_enabled(env) &&
462                             env->interrupt_index > 0) {
463                             int pil = env->interrupt_index & 0xf;
464                             int type = env->interrupt_index & 0xf0;
465
466                             if (((type == TT_EXTINT) &&
467                                   cpu_pil_allowed(env, pil)) ||
468                                   type != TT_EXTINT) {
469                                 env->exception_index = env->interrupt_index;
470                                 cc->do_interrupt(cpu);
471                                 next_tb = 0;
472                             }
473                         }
474                     }
475 #elif defined(TARGET_ARM)
476                     if (interrupt_request & CPU_INTERRUPT_FIQ
477                         && !(env->uncached_cpsr & CPSR_F)) {
478                         env->exception_index = EXCP_FIQ;
479                         cc->do_interrupt(cpu);
480                         next_tb = 0;
481                     }
482                     /* ARMv7-M interrupt return works by loading a magic value
483                        into the PC.  On real hardware the load causes the
484                        return to occur.  The qemu implementation performs the
485                        jump normally, then does the exception return when the
486                        CPU tries to execute code at the magic address.
487                        This will cause the magic PC value to be pushed to
488                        the stack if an interrupt occurred at the wrong time.
489                        We avoid this by disabling interrupts when
490                        pc contains a magic address.  */
491                     if (interrupt_request & CPU_INTERRUPT_HARD
492                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
493                             || !(env->uncached_cpsr & CPSR_I))) {
494                         env->exception_index = EXCP_IRQ;
495                         cc->do_interrupt(cpu);
496                         next_tb = 0;
497                     }
498 #elif defined(TARGET_UNICORE32)
499                     if (interrupt_request & CPU_INTERRUPT_HARD
500                         && !(env->uncached_asr & ASR_I)) {
501                         env->exception_index = UC32_EXCP_INTR;
502                         cc->do_interrupt(cpu);
503                         next_tb = 0;
504                     }
505 #elif defined(TARGET_SH4)
506                     if (interrupt_request & CPU_INTERRUPT_HARD) {
507                         cc->do_interrupt(cpu);
508                         next_tb = 0;
509                     }
510 #elif defined(TARGET_ALPHA)
511                     {
512                         int idx = -1;
513                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
514                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
515                         case 0 ... 3:
516                             if (interrupt_request & CPU_INTERRUPT_HARD) {
517                                 idx = EXCP_DEV_INTERRUPT;
518                             }
519                             /* FALLTHRU */
520                         case 4:
521                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
522                                 idx = EXCP_CLK_INTERRUPT;
523                             }
524                             /* FALLTHRU */
525                         case 5:
526                             if (interrupt_request & CPU_INTERRUPT_SMP) {
527                                 idx = EXCP_SMP_INTERRUPT;
528                             }
529                             /* FALLTHRU */
530                         case 6:
531                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
532                                 idx = EXCP_MCHK;
533                             }
534                         }
535                         if (idx >= 0) {
536                             env->exception_index = idx;
537                             env->error_code = 0;
538                             cc->do_interrupt(cpu);
539                             next_tb = 0;
540                         }
541                     }
542 #elif defined(TARGET_CRIS)
543                     if (interrupt_request & CPU_INTERRUPT_HARD
544                         && (env->pregs[PR_CCS] & I_FLAG)
545                         && !env->locked_irq) {
546                         env->exception_index = EXCP_IRQ;
547                         cc->do_interrupt(cpu);
548                         next_tb = 0;
549                     }
550                     if (interrupt_request & CPU_INTERRUPT_NMI) {
551                         unsigned int m_flag_archval;
552                         if (env->pregs[PR_VR] < 32) {
553                             m_flag_archval = M_FLAG_V10;
554                         } else {
555                             m_flag_archval = M_FLAG_V32;
556                         }
557                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
558                             env->exception_index = EXCP_NMI;
559                             cc->do_interrupt(cpu);
560                             next_tb = 0;
561                         }
562                     }
563 #elif defined(TARGET_M68K)
564                     if (interrupt_request & CPU_INTERRUPT_HARD
565                         && ((env->sr & SR_I) >> SR_I_SHIFT)
566                             < env->pending_level) {
567                         /* Real hardware gets the interrupt vector via an
568                            IACK cycle at this point.  Current emulated
569                            hardware doesn't rely on this, so we
570                            provide/save the vector when the interrupt is
571                            first signalled.  */
572                         env->exception_index = env->pending_vector;
573                         do_interrupt_m68k_hardirq(env);
574                         next_tb = 0;
575                     }
576 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
577                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
578                         (env->psw.mask & PSW_MASK_EXT)) {
579                         cc->do_interrupt(cpu);
580                         next_tb = 0;
581                     }
582 #elif defined(TARGET_XTENSA)
583                     if (interrupt_request & CPU_INTERRUPT_HARD) {
584                         env->exception_index = EXC_IRQ;
585                         cc->do_interrupt(cpu);
586                         next_tb = 0;
587                     }
588 #endif
589                    /* Don't use the cached interrupt_request value,
590                       do_interrupt may have updated the EXITTB flag. */
591                     if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
592                         cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
593                         /* ensure that no TB jump will be modified as
594                            the program flow was changed */
595                         next_tb = 0;
596                     }
597                 }
598                 if (unlikely(cpu->exit_request)) {
599                     cpu->exit_request = 0;
600                     env->exception_index = EXCP_INTERRUPT;
601                     cpu_loop_exit(env);
602                 }
603                 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
604                 tb = tb_find_fast(env);
605                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
606                    doing it in tb_find_slow */
607                 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
608                     /* as some TB could have been invalidated because
609                        of memory exceptions while generating the code, we
610                        must recompute the hash index here */
611                     next_tb = 0;
612                     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
613                 }
614                 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
615                     qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
616                              tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
617                 }
618                 /* see if we can patch the calling TB. When the TB
619                    spans two pages, we cannot safely do a direct
620                    jump. */
621                 if (next_tb != 0 && tb->page_addr[1] == -1) {
622                     tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
623                                 next_tb & TB_EXIT_MASK, tb);
624                 }
625                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
626
627                 /* cpu_interrupt might be called while translating the
628                    TB, but before it is linked into a potentially
629                    infinite loop and becomes env->current_tb. Avoid
630                    starting execution if there is a pending interrupt. */
631                 cpu->current_tb = tb;
632                 barrier();
633                 if (likely(!cpu->exit_request)) {
634                     tc_ptr = tb->tc_ptr;
635                     /* execute the generated code */
636                     next_tb = cpu_tb_exec(cpu, tc_ptr);
637                     switch (next_tb & TB_EXIT_MASK) {
638                     case TB_EXIT_REQUESTED:
639                         /* Something asked us to stop executing
640                          * chained TBs; just continue round the main
641                          * loop. Whatever requested the exit will also
642                          * have set something else (eg exit_request or
643                          * interrupt_request) which we will handle
644                          * next time around the loop.
645                          */
646                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
647                         next_tb = 0;
648                         break;
649                     case TB_EXIT_ICOUNT_EXPIRED:
650                     {
651                         /* Instruction counter expired.  */
652                         int insns_left;
653                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
654                         insns_left = env->icount_decr.u32;
655                         if (env->icount_extra && insns_left >= 0) {
656                             /* Refill decrementer and continue execution.  */
657                             env->icount_extra += insns_left;
658                             if (env->icount_extra > 0xffff) {
659                                 insns_left = 0xffff;
660                             } else {
661                                 insns_left = env->icount_extra;
662                             }
663                             env->icount_extra -= insns_left;
664                             env->icount_decr.u16.low = insns_left;
665                         } else {
666                             if (insns_left > 0) {
667                                 /* Execute remaining instructions.  */
668                                 cpu_exec_nocache(env, insns_left, tb);
669                             }
670                             env->exception_index = EXCP_INTERRUPT;
671                             next_tb = 0;
672                             cpu_loop_exit(env);
673                         }
674                         break;
675                     }
676                     default:
677                         break;
678                     }
679                 }
680                 cpu->current_tb = NULL;
681                 /* reset soft MMU for next block (it can currently
682                    only be set by a memory fault) */
683             } /* for(;;) */
684         } else {
685             /* Reload env after longjmp - the compiler may have smashed all
686              * local variables as longjmp is marked 'noreturn'. */
687             cpu = current_cpu;
688             env = cpu->env_ptr;
689 #if !(defined(CONFIG_USER_ONLY) && \
690       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
691             cc = CPU_GET_CLASS(cpu);
692 #endif
693 #ifdef TARGET_I386
694             x86_cpu = X86_CPU(cpu);
695 #endif
696         }
697     } /* for(;;) */
698
699
700 #if defined(TARGET_I386)
701     /* restore flags in standard format */
702     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
703         | (env->df & DF_MASK);
704 #elif defined(TARGET_ARM)
705     /* XXX: Save/restore host fpu exception state?.  */
706 #elif defined(TARGET_UNICORE32)
707 #elif defined(TARGET_SPARC)
708 #elif defined(TARGET_PPC)
709 #elif defined(TARGET_LM32)
710 #elif defined(TARGET_M68K)
711     cpu_m68k_flush_flags(env, env->cc_op);
712     env->cc_op = CC_OP_FLAGS;
713     env->sr = (env->sr & 0xffe0)
714               | env->cc_dest | (env->cc_x << 4);
715 #elif defined(TARGET_MICROBLAZE)
716 #elif defined(TARGET_MIPS)
717 #elif defined(TARGET_MOXIE)
718 #elif defined(TARGET_OPENRISC)
719 #elif defined(TARGET_SH4)
720 #elif defined(TARGET_ALPHA)
721 #elif defined(TARGET_CRIS)
722 #elif defined(TARGET_S390X)
723 #elif defined(TARGET_XTENSA)
724     /* XXXXX */
725 #else
726 #error unsupported target CPU
727 #endif
728
729     /* fail safe : never use current_cpu outside cpu_exec() */
730     current_cpu = NULL;
731     return ret;
732 }
This page took 0.065951 seconds and 4 git commands to generate.