]> Git Repo - qemu.git/blob - cpu-exec.c
loader: Add load_image_gzipped function.
[qemu.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "trace.h"
22 #include "disas/disas.h"
23 #include "tcg.h"
24 #include "qemu/atomic.h"
25 #include "sysemu/qtest.h"
26 #include "qemu/timer.h"
27
28 /* -icount align implementation. */
29
30 typedef struct SyncClocks {
31     int64_t diff_clk;
32     int64_t last_cpu_icount;
33     int64_t realtime_clock;
34 } SyncClocks;
35
36 #if !defined(CONFIG_USER_ONLY)
37 /* Allow the guest to have a max 3ms advance.
38  * The difference between the 2 clocks could therefore
39  * oscillate around 0.
40  */
41 #define VM_CLOCK_ADVANCE 3000000
42 #define THRESHOLD_REDUCE 1.5
43 #define MAX_DELAY_PRINT_RATE 2000000000LL
44 #define MAX_NB_PRINTS 100
45
46 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
47 {
48     int64_t cpu_icount;
49
50     if (!icount_align_option) {
51         return;
52     }
53
54     cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
55     sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
56     sc->last_cpu_icount = cpu_icount;
57
58     if (sc->diff_clk > VM_CLOCK_ADVANCE) {
59 #ifndef _WIN32
60         struct timespec sleep_delay, rem_delay;
61         sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
62         sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
63         if (nanosleep(&sleep_delay, &rem_delay) < 0) {
64             sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
65             sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
66         } else {
67             sc->diff_clk = 0;
68         }
69 #else
70         Sleep(sc->diff_clk / SCALE_MS);
71         sc->diff_clk = 0;
72 #endif
73     }
74 }
75
76 static void print_delay(const SyncClocks *sc)
77 {
78     static float threshold_delay;
79     static int64_t last_realtime_clock;
80     static int nb_prints;
81
82     if (icount_align_option &&
83         sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
84         nb_prints < MAX_NB_PRINTS) {
85         if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
86             (-sc->diff_clk / (float)1000000000LL <
87              (threshold_delay - THRESHOLD_REDUCE))) {
88             threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
89             printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
90                    threshold_delay - 1,
91                    threshold_delay);
92             nb_prints++;
93             last_realtime_clock = sc->realtime_clock;
94         }
95     }
96 }
97
98 static void init_delay_params(SyncClocks *sc,
99                               const CPUState *cpu)
100 {
101     if (!icount_align_option) {
102         return;
103     }
104     sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
105     sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
106                    sc->realtime_clock +
107                    cpu_get_clock_offset();
108     sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
109     if (sc->diff_clk < max_delay) {
110         max_delay = sc->diff_clk;
111     }
112     if (sc->diff_clk > max_advance) {
113         max_advance = sc->diff_clk;
114     }
115
116     /* Print every 2s max if the guest is late. We limit the number
117        of printed messages to NB_PRINT_MAX(currently 100) */
118     print_delay(sc);
119 }
120 #else
121 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
122 {
123 }
124
125 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
126 {
127 }
128 #endif /* CONFIG USER ONLY */
129
130 void cpu_loop_exit(CPUState *cpu)
131 {
132     cpu->current_tb = NULL;
133     siglongjmp(cpu->jmp_env, 1);
134 }
135
136 /* exit the current TB from a signal handler. The host registers are
137    restored in a state compatible with the CPU emulator
138  */
139 #if defined(CONFIG_SOFTMMU)
140 void cpu_resume_from_signal(CPUState *cpu, void *puc)
141 {
142     /* XXX: restore cpu registers saved in host registers */
143
144     cpu->exception_index = -1;
145     siglongjmp(cpu->jmp_env, 1);
146 }
147 #endif
148
149 /* Execute a TB, and fix up the CPU state afterwards if necessary */
150 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
151 {
152     CPUArchState *env = cpu->env_ptr;
153     uintptr_t next_tb;
154
155 #if defined(DEBUG_DISAS)
156     if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
157 #if defined(TARGET_I386)
158         log_cpu_state(cpu, CPU_DUMP_CCOP);
159 #elif defined(TARGET_M68K)
160         /* ??? Should not modify env state for dumping.  */
161         cpu_m68k_flush_flags(env, env->cc_op);
162         env->cc_op = CC_OP_FLAGS;
163         env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
164         log_cpu_state(cpu, 0);
165 #else
166         log_cpu_state(cpu, 0);
167 #endif
168     }
169 #endif /* DEBUG_DISAS */
170
171     next_tb = tcg_qemu_tb_exec(env, tb_ptr);
172     trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
173                        next_tb & TB_EXIT_MASK);
174
175     if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
176         /* We didn't start executing this TB (eg because the instruction
177          * counter hit zero); we must restore the guest PC to the address
178          * of the start of the TB.
179          */
180         CPUClass *cc = CPU_GET_CLASS(cpu);
181         TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
182         if (cc->synchronize_from_tb) {
183             cc->synchronize_from_tb(cpu, tb);
184         } else {
185             assert(cc->set_pc);
186             cc->set_pc(cpu, tb->pc);
187         }
188     }
189     if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
190         /* We were asked to stop executing TBs (probably a pending
191          * interrupt. We've now stopped, so clear the flag.
192          */
193         cpu->tcg_exit_req = 0;
194     }
195     return next_tb;
196 }
197
198 /* Execute the code without caching the generated code. An interpreter
199    could be used if available. */
200 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
201                              TranslationBlock *orig_tb)
202 {
203     CPUState *cpu = ENV_GET_CPU(env);
204     TranslationBlock *tb;
205
206     /* Should never happen.
207        We only end up here when an existing TB is too long.  */
208     if (max_cycles > CF_COUNT_MASK)
209         max_cycles = CF_COUNT_MASK;
210
211     tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
212                      max_cycles);
213     cpu->current_tb = tb;
214     /* execute the generated code */
215     trace_exec_tb_nocache(tb, tb->pc);
216     cpu_tb_exec(cpu, tb->tc_ptr);
217     cpu->current_tb = NULL;
218     tb_phys_invalidate(tb, -1);
219     tb_free(tb);
220 }
221
222 static TranslationBlock *tb_find_slow(CPUArchState *env,
223                                       target_ulong pc,
224                                       target_ulong cs_base,
225                                       uint64_t flags)
226 {
227     CPUState *cpu = ENV_GET_CPU(env);
228     TranslationBlock *tb, **ptb1;
229     unsigned int h;
230     tb_page_addr_t phys_pc, phys_page1;
231     target_ulong virt_page2;
232
233     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
234
235     /* find translated block using physical mappings */
236     phys_pc = get_page_addr_code(env, pc);
237     phys_page1 = phys_pc & TARGET_PAGE_MASK;
238     h = tb_phys_hash_func(phys_pc);
239     ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
240     for(;;) {
241         tb = *ptb1;
242         if (!tb)
243             goto not_found;
244         if (tb->pc == pc &&
245             tb->page_addr[0] == phys_page1 &&
246             tb->cs_base == cs_base &&
247             tb->flags == flags) {
248             /* check next page if needed */
249             if (tb->page_addr[1] != -1) {
250                 tb_page_addr_t phys_page2;
251
252                 virt_page2 = (pc & TARGET_PAGE_MASK) +
253                     TARGET_PAGE_SIZE;
254                 phys_page2 = get_page_addr_code(env, virt_page2);
255                 if (tb->page_addr[1] == phys_page2)
256                     goto found;
257             } else {
258                 goto found;
259             }
260         }
261         ptb1 = &tb->phys_hash_next;
262     }
263  not_found:
264    /* if no translated code available, then translate it now */
265     tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
266
267  found:
268     /* Move the last found TB to the head of the list */
269     if (likely(*ptb1)) {
270         *ptb1 = tb->phys_hash_next;
271         tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
272         tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
273     }
274     /* we add the TB in the virtual pc hash table */
275     cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
276     return tb;
277 }
278
279 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
280 {
281     CPUState *cpu = ENV_GET_CPU(env);
282     TranslationBlock *tb;
283     target_ulong cs_base, pc;
284     int flags;
285
286     /* we record a subset of the CPU state. It will
287        always be the same before a given translated block
288        is executed. */
289     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
290     tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
291     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
292                  tb->flags != flags)) {
293         tb = tb_find_slow(env, pc, cs_base, flags);
294     }
295     return tb;
296 }
297
298 static CPUDebugExcpHandler *debug_excp_handler;
299
300 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
301 {
302     debug_excp_handler = handler;
303 }
304
305 static void cpu_handle_debug_exception(CPUArchState *env)
306 {
307     CPUState *cpu = ENV_GET_CPU(env);
308     CPUWatchpoint *wp;
309
310     if (!cpu->watchpoint_hit) {
311         QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
312             wp->flags &= ~BP_WATCHPOINT_HIT;
313         }
314     }
315     if (debug_excp_handler) {
316         debug_excp_handler(env);
317     }
318 }
319
320 /* main execution loop */
321
322 volatile sig_atomic_t exit_request;
323
324 int cpu_exec(CPUArchState *env)
325 {
326     CPUState *cpu = ENV_GET_CPU(env);
327 #if !(defined(CONFIG_USER_ONLY) && \
328       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
329     CPUClass *cc = CPU_GET_CLASS(cpu);
330 #endif
331 #ifdef TARGET_I386
332     X86CPU *x86_cpu = X86_CPU(cpu);
333 #endif
334     int ret, interrupt_request;
335     TranslationBlock *tb;
336     uint8_t *tc_ptr;
337     uintptr_t next_tb;
338     SyncClocks sc;
339
340     /* This must be volatile so it is not trashed by longjmp() */
341     volatile bool have_tb_lock = false;
342
343     if (cpu->halted) {
344         if (!cpu_has_work(cpu)) {
345             return EXCP_HALTED;
346         }
347
348         cpu->halted = 0;
349     }
350
351     current_cpu = cpu;
352
353     /* As long as current_cpu is null, up to the assignment just above,
354      * requests by other threads to exit the execution loop are expected to
355      * be issued using the exit_request global. We must make sure that our
356      * evaluation of the global value is performed past the current_cpu
357      * value transition point, which requires a memory barrier as well as
358      * an instruction scheduling constraint on modern architectures.  */
359     smp_mb();
360
361     if (unlikely(exit_request)) {
362         cpu->exit_request = 1;
363     }
364
365 #if defined(TARGET_I386)
366     /* put eflags in CPU temporary format */
367     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
368     env->df = 1 - (2 * ((env->eflags >> 10) & 1));
369     CC_OP = CC_OP_EFLAGS;
370     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
371 #elif defined(TARGET_SPARC)
372 #elif defined(TARGET_M68K)
373     env->cc_op = CC_OP_FLAGS;
374     env->cc_dest = env->sr & 0xf;
375     env->cc_x = (env->sr >> 4) & 1;
376 #elif defined(TARGET_ALPHA)
377 #elif defined(TARGET_ARM)
378 #elif defined(TARGET_UNICORE32)
379 #elif defined(TARGET_PPC)
380     env->reserve_addr = -1;
381 #elif defined(TARGET_LM32)
382 #elif defined(TARGET_MICROBLAZE)
383 #elif defined(TARGET_MIPS)
384 #elif defined(TARGET_MOXIE)
385 #elif defined(TARGET_OPENRISC)
386 #elif defined(TARGET_SH4)
387 #elif defined(TARGET_CRIS)
388 #elif defined(TARGET_S390X)
389 #elif defined(TARGET_XTENSA)
390     /* XXXXX */
391 #else
392 #error unsupported target CPU
393 #endif
394     cpu->exception_index = -1;
395
396     /* Calculate difference between guest clock and host clock.
397      * This delay includes the delay of the last cycle, so
398      * what we have to do is sleep until it is 0. As for the
399      * advance/delay we gain here, we try to fix it next time.
400      */
401     init_delay_params(&sc, cpu);
402
403     /* prepare setjmp context for exception handling */
404     for(;;) {
405         if (sigsetjmp(cpu->jmp_env, 0) == 0) {
406             /* if an exception is pending, we execute it here */
407             if (cpu->exception_index >= 0) {
408                 if (cpu->exception_index >= EXCP_INTERRUPT) {
409                     /* exit request from the cpu execution loop */
410                     ret = cpu->exception_index;
411                     if (ret == EXCP_DEBUG) {
412                         cpu_handle_debug_exception(env);
413                     }
414                     break;
415                 } else {
416 #if defined(CONFIG_USER_ONLY)
417                     /* if user mode only, we simulate a fake exception
418                        which will be handled outside the cpu execution
419                        loop */
420 #if defined(TARGET_I386)
421                     cc->do_interrupt(cpu);
422 #endif
423                     ret = cpu->exception_index;
424                     break;
425 #else
426                     cc->do_interrupt(cpu);
427                     cpu->exception_index = -1;
428 #endif
429                 }
430             }
431
432             next_tb = 0; /* force lookup of first TB */
433             for(;;) {
434                 interrupt_request = cpu->interrupt_request;
435                 if (unlikely(interrupt_request)) {
436                     if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
437                         /* Mask out external interrupts for this step. */
438                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
439                     }
440                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
441                         cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
442                         cpu->exception_index = EXCP_DEBUG;
443                         cpu_loop_exit(cpu);
444                     }
445 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
446     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
447     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
448                     if (interrupt_request & CPU_INTERRUPT_HALT) {
449                         cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
450                         cpu->halted = 1;
451                         cpu->exception_index = EXCP_HLT;
452                         cpu_loop_exit(cpu);
453                     }
454 #endif
455 #if defined(TARGET_I386)
456                     if (interrupt_request & CPU_INTERRUPT_INIT) {
457                         cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
458                         do_cpu_init(x86_cpu);
459                         cpu->exception_index = EXCP_HALTED;
460                         cpu_loop_exit(cpu);
461                     }
462 #else
463                     if (interrupt_request & CPU_INTERRUPT_RESET) {
464                         cpu_reset(cpu);
465                     }
466 #endif
467 #if defined(TARGET_I386)
468 #if !defined(CONFIG_USER_ONLY)
469                     if (interrupt_request & CPU_INTERRUPT_POLL) {
470                         cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
471                         apic_poll_irq(x86_cpu->apic_state);
472                     }
473 #endif
474                     if (interrupt_request & CPU_INTERRUPT_SIPI) {
475                             do_cpu_sipi(x86_cpu);
476                     } else if (env->hflags2 & HF2_GIF_MASK) {
477                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
478                             !(env->hflags & HF_SMM_MASK)) {
479                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
480                                                           0);
481                             cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
482                             do_smm_enter(x86_cpu);
483                             next_tb = 0;
484                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
485                                    !(env->hflags2 & HF2_NMI_MASK)) {
486                             cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
487                             env->hflags2 |= HF2_NMI_MASK;
488                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
489                             next_tb = 0;
490                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
491                             cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
492                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
493                             next_tb = 0;
494                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
495                                    (((env->hflags2 & HF2_VINTR_MASK) && 
496                                      (env->hflags2 & HF2_HIF_MASK)) ||
497                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
498                                      (env->eflags & IF_MASK && 
499                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
500                             int intno;
501                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
502                                                           0);
503                             cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
504                                                         CPU_INTERRUPT_VIRQ);
505                             intno = cpu_get_pic_interrupt(env);
506                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
507                             do_interrupt_x86_hardirq(env, intno, 1);
508                             /* ensure that no TB jump will be modified as
509                                the program flow was changed */
510                             next_tb = 0;
511 #if !defined(CONFIG_USER_ONLY)
512                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
513                                    (env->eflags & IF_MASK) && 
514                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
515                             int intno;
516                             /* FIXME: this should respect TPR */
517                             cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
518                                                           0);
519                             intno = ldl_phys(cpu->as,
520                                              env->vm_vmcb
521                                              + offsetof(struct vmcb,
522                                                         control.int_vector));
523                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
524                             do_interrupt_x86_hardirq(env, intno, 1);
525                             cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
526                             next_tb = 0;
527 #endif
528                         }
529                     }
530 #elif defined(TARGET_PPC)
531                     if (interrupt_request & CPU_INTERRUPT_HARD) {
532                         ppc_hw_interrupt(env);
533                         if (env->pending_interrupts == 0) {
534                             cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
535                         }
536                         next_tb = 0;
537                     }
538 #elif defined(TARGET_LM32)
539                     if ((interrupt_request & CPU_INTERRUPT_HARD)
540                         && (env->ie & IE_IE)) {
541                         cpu->exception_index = EXCP_IRQ;
542                         cc->do_interrupt(cpu);
543                         next_tb = 0;
544                     }
545 #elif defined(TARGET_MICROBLAZE)
546                     if ((interrupt_request & CPU_INTERRUPT_HARD)
547                         && (env->sregs[SR_MSR] & MSR_IE)
548                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
549                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
550                         cpu->exception_index = EXCP_IRQ;
551                         cc->do_interrupt(cpu);
552                         next_tb = 0;
553                     }
554 #elif defined(TARGET_MIPS)
555                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
556                         cpu_mips_hw_interrupts_pending(env)) {
557                         /* Raise it */
558                         cpu->exception_index = EXCP_EXT_INTERRUPT;
559                         env->error_code = 0;
560                         cc->do_interrupt(cpu);
561                         next_tb = 0;
562                     }
563 #elif defined(TARGET_OPENRISC)
564                     {
565                         int idx = -1;
566                         if ((interrupt_request & CPU_INTERRUPT_HARD)
567                             && (env->sr & SR_IEE)) {
568                             idx = EXCP_INT;
569                         }
570                         if ((interrupt_request & CPU_INTERRUPT_TIMER)
571                             && (env->sr & SR_TEE)) {
572                             idx = EXCP_TICK;
573                         }
574                         if (idx >= 0) {
575                             cpu->exception_index = idx;
576                             cc->do_interrupt(cpu);
577                             next_tb = 0;
578                         }
579                     }
580 #elif defined(TARGET_SPARC)
581                     if (interrupt_request & CPU_INTERRUPT_HARD) {
582                         if (cpu_interrupts_enabled(env) &&
583                             env->interrupt_index > 0) {
584                             int pil = env->interrupt_index & 0xf;
585                             int type = env->interrupt_index & 0xf0;
586
587                             if (((type == TT_EXTINT) &&
588                                   cpu_pil_allowed(env, pil)) ||
589                                   type != TT_EXTINT) {
590                                 cpu->exception_index = env->interrupt_index;
591                                 cc->do_interrupt(cpu);
592                                 next_tb = 0;
593                             }
594                         }
595                     }
596 #elif defined(TARGET_ARM)
597                     if (interrupt_request & CPU_INTERRUPT_FIQ
598                         && !(env->daif & PSTATE_F)) {
599                         cpu->exception_index = EXCP_FIQ;
600                         cc->do_interrupt(cpu);
601                         next_tb = 0;
602                     }
603                     /* ARMv7-M interrupt return works by loading a magic value
604                        into the PC.  On real hardware the load causes the
605                        return to occur.  The qemu implementation performs the
606                        jump normally, then does the exception return when the
607                        CPU tries to execute code at the magic address.
608                        This will cause the magic PC value to be pushed to
609                        the stack if an interrupt occurred at the wrong time.
610                        We avoid this by disabling interrupts when
611                        pc contains a magic address.  */
612                     if (interrupt_request & CPU_INTERRUPT_HARD
613                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
614                             || !(env->daif & PSTATE_I))) {
615                         cpu->exception_index = EXCP_IRQ;
616                         cc->do_interrupt(cpu);
617                         next_tb = 0;
618                     }
619 #elif defined(TARGET_UNICORE32)
620                     if (interrupt_request & CPU_INTERRUPT_HARD
621                         && !(env->uncached_asr & ASR_I)) {
622                         cpu->exception_index = UC32_EXCP_INTR;
623                         cc->do_interrupt(cpu);
624                         next_tb = 0;
625                     }
626 #elif defined(TARGET_SH4)
627                     if (interrupt_request & CPU_INTERRUPT_HARD) {
628                         cc->do_interrupt(cpu);
629                         next_tb = 0;
630                     }
631 #elif defined(TARGET_ALPHA)
632                     {
633                         int idx = -1;
634                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
635                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
636                         case 0 ... 3:
637                             if (interrupt_request & CPU_INTERRUPT_HARD) {
638                                 idx = EXCP_DEV_INTERRUPT;
639                             }
640                             /* FALLTHRU */
641                         case 4:
642                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
643                                 idx = EXCP_CLK_INTERRUPT;
644                             }
645                             /* FALLTHRU */
646                         case 5:
647                             if (interrupt_request & CPU_INTERRUPT_SMP) {
648                                 idx = EXCP_SMP_INTERRUPT;
649                             }
650                             /* FALLTHRU */
651                         case 6:
652                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
653                                 idx = EXCP_MCHK;
654                             }
655                         }
656                         if (idx >= 0) {
657                             cpu->exception_index = idx;
658                             env->error_code = 0;
659                             cc->do_interrupt(cpu);
660                             next_tb = 0;
661                         }
662                     }
663 #elif defined(TARGET_CRIS)
664                     if (interrupt_request & CPU_INTERRUPT_HARD
665                         && (env->pregs[PR_CCS] & I_FLAG)
666                         && !env->locked_irq) {
667                         cpu->exception_index = EXCP_IRQ;
668                         cc->do_interrupt(cpu);
669                         next_tb = 0;
670                     }
671                     if (interrupt_request & CPU_INTERRUPT_NMI) {
672                         unsigned int m_flag_archval;
673                         if (env->pregs[PR_VR] < 32) {
674                             m_flag_archval = M_FLAG_V10;
675                         } else {
676                             m_flag_archval = M_FLAG_V32;
677                         }
678                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
679                             cpu->exception_index = EXCP_NMI;
680                             cc->do_interrupt(cpu);
681                             next_tb = 0;
682                         }
683                     }
684 #elif defined(TARGET_M68K)
685                     if (interrupt_request & CPU_INTERRUPT_HARD
686                         && ((env->sr & SR_I) >> SR_I_SHIFT)
687                             < env->pending_level) {
688                         /* Real hardware gets the interrupt vector via an
689                            IACK cycle at this point.  Current emulated
690                            hardware doesn't rely on this, so we
691                            provide/save the vector when the interrupt is
692                            first signalled.  */
693                         cpu->exception_index = env->pending_vector;
694                         do_interrupt_m68k_hardirq(env);
695                         next_tb = 0;
696                     }
697 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
698                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
699                         (env->psw.mask & PSW_MASK_EXT)) {
700                         cc->do_interrupt(cpu);
701                         next_tb = 0;
702                     }
703 #elif defined(TARGET_XTENSA)
704                     if (interrupt_request & CPU_INTERRUPT_HARD) {
705                         cpu->exception_index = EXC_IRQ;
706                         cc->do_interrupt(cpu);
707                         next_tb = 0;
708                     }
709 #endif
710                    /* Don't use the cached interrupt_request value,
711                       do_interrupt may have updated the EXITTB flag. */
712                     if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
713                         cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
714                         /* ensure that no TB jump will be modified as
715                            the program flow was changed */
716                         next_tb = 0;
717                     }
718                 }
719                 if (unlikely(cpu->exit_request)) {
720                     cpu->exit_request = 0;
721                     cpu->exception_index = EXCP_INTERRUPT;
722                     cpu_loop_exit(cpu);
723                 }
724                 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
725                 have_tb_lock = true;
726                 tb = tb_find_fast(env);
727                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
728                    doing it in tb_find_slow */
729                 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
730                     /* as some TB could have been invalidated because
731                        of memory exceptions while generating the code, we
732                        must recompute the hash index here */
733                     next_tb = 0;
734                     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
735                 }
736                 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
737                     qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
738                              tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
739                 }
740                 /* see if we can patch the calling TB. When the TB
741                    spans two pages, we cannot safely do a direct
742                    jump. */
743                 if (next_tb != 0 && tb->page_addr[1] == -1) {
744                     tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
745                                 next_tb & TB_EXIT_MASK, tb);
746                 }
747                 have_tb_lock = false;
748                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
749
750                 /* cpu_interrupt might be called while translating the
751                    TB, but before it is linked into a potentially
752                    infinite loop and becomes env->current_tb. Avoid
753                    starting execution if there is a pending interrupt. */
754                 cpu->current_tb = tb;
755                 barrier();
756                 if (likely(!cpu->exit_request)) {
757                     trace_exec_tb(tb, tb->pc);
758                     tc_ptr = tb->tc_ptr;
759                     /* execute the generated code */
760                     next_tb = cpu_tb_exec(cpu, tc_ptr);
761                     switch (next_tb & TB_EXIT_MASK) {
762                     case TB_EXIT_REQUESTED:
763                         /* Something asked us to stop executing
764                          * chained TBs; just continue round the main
765                          * loop. Whatever requested the exit will also
766                          * have set something else (eg exit_request or
767                          * interrupt_request) which we will handle
768                          * next time around the loop.
769                          */
770                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
771                         next_tb = 0;
772                         break;
773                     case TB_EXIT_ICOUNT_EXPIRED:
774                     {
775                         /* Instruction counter expired.  */
776                         int insns_left;
777                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
778                         insns_left = cpu->icount_decr.u32;
779                         if (cpu->icount_extra && insns_left >= 0) {
780                             /* Refill decrementer and continue execution.  */
781                             cpu->icount_extra += insns_left;
782                             if (cpu->icount_extra > 0xffff) {
783                                 insns_left = 0xffff;
784                             } else {
785                                 insns_left = cpu->icount_extra;
786                             }
787                             cpu->icount_extra -= insns_left;
788                             cpu->icount_decr.u16.low = insns_left;
789                         } else {
790                             if (insns_left > 0) {
791                                 /* Execute remaining instructions.  */
792                                 cpu_exec_nocache(env, insns_left, tb);
793                                 align_clocks(&sc, cpu);
794                             }
795                             cpu->exception_index = EXCP_INTERRUPT;
796                             next_tb = 0;
797                             cpu_loop_exit(cpu);
798                         }
799                         break;
800                     }
801                     default:
802                         break;
803                     }
804                 }
805                 cpu->current_tb = NULL;
806                 /* Try to align the host and virtual clocks
807                    if the guest is in advance */
808                 align_clocks(&sc, cpu);
809                 /* reset soft MMU for next block (it can currently
810                    only be set by a memory fault) */
811             } /* for(;;) */
812         } else {
813             /* Reload env after longjmp - the compiler may have smashed all
814              * local variables as longjmp is marked 'noreturn'. */
815             cpu = current_cpu;
816             env = cpu->env_ptr;
817 #if !(defined(CONFIG_USER_ONLY) && \
818       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
819             cc = CPU_GET_CLASS(cpu);
820 #endif
821 #ifdef TARGET_I386
822             x86_cpu = X86_CPU(cpu);
823 #endif
824             if (have_tb_lock) {
825                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
826                 have_tb_lock = false;
827             }
828         }
829     } /* for(;;) */
830
831
832 #if defined(TARGET_I386)
833     /* restore flags in standard format */
834     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
835         | (env->df & DF_MASK);
836 #elif defined(TARGET_ARM)
837     /* XXX: Save/restore host fpu exception state?.  */
838 #elif defined(TARGET_UNICORE32)
839 #elif defined(TARGET_SPARC)
840 #elif defined(TARGET_PPC)
841 #elif defined(TARGET_LM32)
842 #elif defined(TARGET_M68K)
843     cpu_m68k_flush_flags(env, env->cc_op);
844     env->cc_op = CC_OP_FLAGS;
845     env->sr = (env->sr & 0xffe0)
846               | env->cc_dest | (env->cc_x << 4);
847 #elif defined(TARGET_MICROBLAZE)
848 #elif defined(TARGET_MIPS)
849 #elif defined(TARGET_MOXIE)
850 #elif defined(TARGET_OPENRISC)
851 #elif defined(TARGET_SH4)
852 #elif defined(TARGET_ALPHA)
853 #elif defined(TARGET_CRIS)
854 #elif defined(TARGET_S390X)
855 #elif defined(TARGET_XTENSA)
856     /* XXXXX */
857 #else
858 #error unsupported target CPU
859 #endif
860
861     /* fail safe : never use current_cpu outside cpu_exec() */
862     current_cpu = NULL;
863     return ret;
864 }
This page took 0.075323 seconds and 4 git commands to generate.