]> Git Repo - qemu.git/blame - accel/tcg/cpu-exec.c
tricore: sync ctx.hflags with tb->flags
[qemu.git] / accel / tcg / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
fb0343d5 9 * version 2.1 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
a8d25326 19
7b31bbc2 20#include "qemu/osdep.h"
a8d25326 21#include "qemu-common.h"
cea5f9a2 22#include "cpu.h"
d9bb58e5 23#include "trace.h"
76cad711 24#include "disas/disas.h"
63c91552 25#include "exec/exec-all.h"
7cb69cae 26#include "tcg.h"
1de7afc9 27#include "qemu/atomic.h"
9c17d615 28#include "sysemu/qtest.h"
c2aa5f81 29#include "qemu/timer.h"
79e2b9ae 30#include "qemu/rcu.h"
e1b89321 31#include "exec/tb-hash.h"
f6bb84d5 32#include "exec/tb-lookup.h"
508127e2 33#include "exec/log.h"
8d04fb55 34#include "qemu/main-loop.h"
6220e900
PD
35#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
36#include "hw/i386/apic.h"
37#endif
d2528bdc 38#include "sysemu/cpus.h"
6f060969 39#include "sysemu/replay.h"
c2aa5f81
ST
40
41/* -icount align implementation. */
42
43typedef struct SyncClocks {
44 int64_t diff_clk;
45 int64_t last_cpu_icount;
7f7bc144 46 int64_t realtime_clock;
c2aa5f81
ST
47} SyncClocks;
48
49#if !defined(CONFIG_USER_ONLY)
50/* Allow the guest to have a max 3ms advance.
51 * The difference between the 2 clocks could therefore
52 * oscillate around 0.
53 */
54#define VM_CLOCK_ADVANCE 3000000
7f7bc144
ST
55#define THRESHOLD_REDUCE 1.5
56#define MAX_DELAY_PRINT_RATE 2000000000LL
57#define MAX_NB_PRINTS 100
c2aa5f81 58
5e140196 59static void align_clocks(SyncClocks *sc, CPUState *cpu)
c2aa5f81
ST
60{
61 int64_t cpu_icount;
62
63 if (!icount_align_option) {
64 return;
65 }
66
5e140196 67 cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
c2aa5f81
ST
68 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
69 sc->last_cpu_icount = cpu_icount;
70
71 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
72#ifndef _WIN32
73 struct timespec sleep_delay, rem_delay;
74 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
75 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
76 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
a498d0ef 77 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
c2aa5f81
ST
78 } else {
79 sc->diff_clk = 0;
80 }
81#else
82 Sleep(sc->diff_clk / SCALE_MS);
83 sc->diff_clk = 0;
84#endif
85 }
86}
87
7f7bc144
ST
88static void print_delay(const SyncClocks *sc)
89{
90 static float threshold_delay;
91 static int64_t last_realtime_clock;
92 static int nb_prints;
93
94 if (icount_align_option &&
95 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
96 nb_prints < MAX_NB_PRINTS) {
97 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
98 (-sc->diff_clk / (float)1000000000LL <
99 (threshold_delay - THRESHOLD_REDUCE))) {
100 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
101 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
102 threshold_delay - 1,
103 threshold_delay);
104 nb_prints++;
105 last_realtime_clock = sc->realtime_clock;
106 }
107 }
108}
109
5e140196 110static void init_delay_params(SyncClocks *sc, CPUState *cpu)
c2aa5f81
ST
111{
112 if (!icount_align_option) {
113 return;
114 }
2e91cc62
PB
115 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
116 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
5e140196
RH
117 sc->last_cpu_icount
118 = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
27498bef
ST
119 if (sc->diff_clk < max_delay) {
120 max_delay = sc->diff_clk;
121 }
122 if (sc->diff_clk > max_advance) {
123 max_advance = sc->diff_clk;
124 }
7f7bc144
ST
125
126 /* Print every 2s max if the guest is late. We limit the number
127 of printed messages to NB_PRINT_MAX(currently 100) */
128 print_delay(sc);
c2aa5f81
ST
129}
130#else
131static void align_clocks(SyncClocks *sc, const CPUState *cpu)
132{
133}
134
135static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
136{
137}
138#endif /* CONFIG USER ONLY */
7d13299d 139
77211379 140/* Execute a TB, and fix up the CPU state afterwards if necessary */
1a830635 141static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
77211379
PM
142{
143 CPUArchState *env = cpu->env_ptr;
819af24b
SF
144 uintptr_t ret;
145 TranslationBlock *last_tb;
146 int tb_exit;
e7e168f4 147 uint8_t *tb_ptr = itb->tc.ptr;
1a830635 148
d977e1c2 149 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
4fad446b
PB
150 "Trace %d: %p ["
151 TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
152 cpu->cpu_index, itb->tc.ptr,
153 itb->cs_base, itb->pc, itb->flags,
4426f83a 154 lookup_symbol(itb->pc));
03afa5f8
RH
155
156#if defined(DEBUG_DISAS)
be2208e2
RH
157 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
158 && qemu_log_in_addr_range(itb->pc)) {
1ee73216 159 qemu_log_lock();
ae765180
PM
160 int flags = 0;
161 if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
162 flags |= CPU_DUMP_FPU;
163 }
03afa5f8 164#if defined(TARGET_I386)
ae765180 165 flags |= CPU_DUMP_CCOP;
03afa5f8 166#endif
ae765180 167 log_cpu_state(cpu, flags);
1ee73216 168 qemu_log_unlock();
03afa5f8
RH
169 }
170#endif /* DEBUG_DISAS */
171
414b15c9 172 cpu->can_do_io = !use_icount;
819af24b 173 ret = tcg_qemu_tb_exec(env, tb_ptr);
626cf8f4 174 cpu->can_do_io = 1;
819af24b
SF
175 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
176 tb_exit = ret & TB_EXIT_MASK;
177 trace_exec_tb_exit(last_tb, tb_exit);
6db8b538 178
819af24b 179 if (tb_exit > TB_EXIT_IDX1) {
77211379
PM
180 /* We didn't start executing this TB (eg because the instruction
181 * counter hit zero); we must restore the guest PC to the address
182 * of the start of the TB.
183 */
bdf7ae5b 184 CPUClass *cc = CPU_GET_CLASS(cpu);
819af24b 185 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
d977e1c2
AB
186 "Stopped execution of TB chain before %p ["
187 TARGET_FMT_lx "] %s\n",
e7e168f4 188 last_tb->tc.ptr, last_tb->pc,
819af24b 189 lookup_symbol(last_tb->pc));
bdf7ae5b 190 if (cc->synchronize_from_tb) {
819af24b 191 cc->synchronize_from_tb(cpu, last_tb);
bdf7ae5b
AF
192 } else {
193 assert(cc->set_pc);
819af24b 194 cc->set_pc(cpu, last_tb->pc);
bdf7ae5b 195 }
77211379 196 }
819af24b 197 return ret;
77211379
PM
198}
199
7687bf52 200#ifndef CONFIG_USER_ONLY
2e70f6ef
PB
201/* Execute the code without caching the generated code. An interpreter
202 could be used if available. */
ea3e9847 203static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
56c0269a 204 TranslationBlock *orig_tb, bool ignore_icount)
2e70f6ef 205{
2e70f6ef 206 TranslationBlock *tb;
416986d3
RH
207 uint32_t cflags = curr_cflags() | CF_NOCACHE;
208
209 if (ignore_icount) {
210 cflags &= ~CF_USE_ICOUNT;
211 }
2e70f6ef
PB
212
213 /* Should never happen.
214 We only end up here when an existing TB is too long. */
416986d3 215 cflags |= MIN(max_cycles, CF_COUNT_MASK);
2e70f6ef 216
0ac20318 217 mmap_lock();
416986d3
RH
218 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
219 orig_tb->flags, cflags);
3359baad 220 tb->orig_tb = orig_tb;
0ac20318 221 mmap_unlock();
a5e99826 222
2e70f6ef 223 /* execute the generated code */
6db8b538 224 trace_exec_tb_nocache(tb, tb->pc);
1a830635 225 cpu_tb_exec(cpu, tb);
a5e99826 226
0ac20318 227 mmap_lock();
2e70f6ef 228 tb_phys_invalidate(tb, -1);
0ac20318 229 mmap_unlock();
be2cdc5e 230 tcg_tb_remove(tb);
2e70f6ef 231}
7687bf52 232#endif
2e70f6ef 233
ac03ee53 234void cpu_exec_step_atomic(CPUState *cpu)
fdbc2b57 235{
08e73c48 236 CPUClass *cc = CPU_GET_CLASS(cpu);
fdbc2b57
RH
237 TranslationBlock *tb;
238 target_ulong cs_base, pc;
239 uint32_t flags;
416986d3 240 uint32_t cflags = 1;
ac03ee53 241 uint32_t cf_mask = cflags & CF_HASH_MASK;
426eeecd
PM
242 /* volatile because we modify it between setjmp and longjmp */
243 volatile bool in_exclusive_region = false;
fdbc2b57 244
08e73c48 245 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
ac03ee53 246 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
4e2ca83e
EC
247 if (tb == NULL) {
248 mmap_lock();
95590e24 249 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
4e2ca83e
EC
250 mmap_unlock();
251 }
08e73c48 252
ac03ee53
EC
253 start_exclusive();
254
255 /* Since we got here, we know that parallel_cpus must be true. */
256 parallel_cpus = false;
426eeecd 257 in_exclusive_region = true;
08e73c48
PK
258 cc->cpu_exec_enter(cpu);
259 /* execute the generated code */
4e2ca83e 260 trace_exec_tb(tb, pc);
08e73c48
PK
261 cpu_tb_exec(cpu, tb);
262 cc->cpu_exec_exit(cpu);
08e73c48 263 } else {
0ac20318 264 /*
08e73c48
PK
265 * The mmap_lock is dropped by tb_gen_code if it runs out of
266 * memory.
267 */
268#ifndef CONFIG_SOFTMMU
269 tcg_debug_assert(!have_mmap_lock());
270#endif
6aaa24f9
EC
271 if (qemu_mutex_iothread_locked()) {
272 qemu_mutex_unlock_iothread();
273 }
faa9372c 274 assert_no_pages_locked();
08e73c48 275 }
426eeecd
PM
276
277 if (in_exclusive_region) {
278 /* We might longjump out of either the codegen or the
279 * execution, so must make sure we only end the exclusive
280 * region if we started it.
281 */
282 parallel_cpus = true;
283 end_exclusive();
284 }
fdbc2b57
RH
285}
286
909eaac9
EC
287struct tb_desc {
288 target_ulong pc;
289 target_ulong cs_base;
290 CPUArchState *env;
291 tb_page_addr_t phys_page1;
292 uint32_t flags;
4e2ca83e 293 uint32_t cf_mask;
61a67f71 294 uint32_t trace_vcpu_dstate;
909eaac9
EC
295};
296
61b8cef1 297static bool tb_lookup_cmp(const void *p, const void *d)
909eaac9
EC
298{
299 const TranslationBlock *tb = p;
300 const struct tb_desc *desc = d;
301
302 if (tb->pc == desc->pc &&
303 tb->page_addr[0] == desc->phys_page1 &&
304 tb->cs_base == desc->cs_base &&
6d21e420 305 tb->flags == desc->flags &&
61a67f71 306 tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
4e2ca83e 307 (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
909eaac9
EC
308 /* check next page if needed */
309 if (tb->page_addr[1] == -1) {
310 return true;
311 } else {
312 tb_page_addr_t phys_page2;
313 target_ulong virt_page2;
314
315 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
316 phys_page2 = get_page_addr_code(desc->env, virt_page2);
317 if (tb->page_addr[1] == phys_page2) {
318 return true;
319 }
320 }
321 }
322 return false;
323}
324
cedbcb01 325TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
4e2ca83e
EC
326 target_ulong cs_base, uint32_t flags,
327 uint32_t cf_mask)
8a40a180 328{
909eaac9
EC
329 tb_page_addr_t phys_pc;
330 struct tb_desc desc;
42bd3228 331 uint32_t h;
3b46e624 332
909eaac9
EC
333 desc.env = (CPUArchState *)cpu->env_ptr;
334 desc.cs_base = cs_base;
335 desc.flags = flags;
4e2ca83e 336 desc.cf_mask = cf_mask;
61a67f71 337 desc.trace_vcpu_dstate = *cpu->trace_dstate;
909eaac9
EC
338 desc.pc = pc;
339 phys_pc = get_page_addr_code(desc.env, pc);
7252f2de
PM
340 if (phys_pc == -1) {
341 return NULL;
342 }
909eaac9 343 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
4e2ca83e 344 h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
61b8cef1 345 return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
9fd1a948
PB
346}
347
a8583393
RH
348void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
349{
350 if (TCG_TARGET_HAS_direct_jump) {
351 uintptr_t offset = tb->jmp_target_arg[n];
e7e168f4 352 uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
a8583393
RH
353 tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
354 } else {
355 tb->jmp_target_arg[n] = addr;
356 }
357}
358
a8583393
RH
359static inline void tb_add_jump(TranslationBlock *tb, int n,
360 TranslationBlock *tb_next)
361{
194125e3
EC
362 uintptr_t old;
363
a8583393 364 assert(n < ARRAY_SIZE(tb->jmp_list_next));
194125e3
EC
365 qemu_spin_lock(&tb_next->jmp_lock);
366
367 /* make sure the destination TB is valid */
368 if (tb_next->cflags & CF_INVALID) {
369 goto out_unlock_next;
370 }
371 /* Atomically claim the jump destination slot only if it was NULL */
372 old = atomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, (uintptr_t)tb_next);
373 if (old) {
374 goto out_unlock_next;
a8583393 375 }
194125e3
EC
376
377 /* patch the native jump address */
378 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
379
380 /* add in TB jmp list */
381 tb->jmp_list_next[n] = tb_next->jmp_list_head;
382 tb_next->jmp_list_head = (uintptr_t)tb | n;
383
384 qemu_spin_unlock(&tb_next->jmp_lock);
385
a8583393
RH
386 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
387 "Linking TBs %p [" TARGET_FMT_lx
388 "] index %d -> %p [" TARGET_FMT_lx "]\n",
e7e168f4
EC
389 tb->tc.ptr, tb->pc, n,
390 tb_next->tc.ptr, tb_next->pc);
194125e3 391 return;
a8583393 392
194125e3
EC
393 out_unlock_next:
394 qemu_spin_unlock(&tb_next->jmp_lock);
395 return;
a8583393
RH
396}
397
bd2710d5
SF
398static inline TranslationBlock *tb_find(CPUState *cpu,
399 TranslationBlock *last_tb,
9b990ee5 400 int tb_exit, uint32_t cf_mask)
8a40a180
FB
401{
402 TranslationBlock *tb;
403 target_ulong cs_base, pc;
89fee74a 404 uint32_t flags;
8a40a180 405
4e2ca83e 406 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
f6bb84d5 407 if (tb == NULL) {
f6bb84d5 408 mmap_lock();
95590e24 409 tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
f6bb84d5 410 mmap_unlock();
bd2710d5
SF
411 /* We add the TB in the virtual pc hash table for the fast lookup */
412 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
8a40a180 413 }
c88c67e5
SF
414#ifndef CONFIG_USER_ONLY
415 /* We don't take care of direct jumps when address mapping changes in
416 * system emulation. So it's not safe to make a direct jump to a TB
417 * spanning two pages because the mapping for the second page can change.
418 */
419 if (tb->page_addr[1] != -1) {
4b7e6950 420 last_tb = NULL;
c88c67e5
SF
421 }
422#endif
a0522c7a 423 /* See if we can patch the calling TB. */
d7f425fd 424 if (last_tb) {
194125e3 425 tb_add_jump(last_tb, tb_exit, tb);
74d356dd 426 }
8a40a180
FB
427 return tb;
428}
429
8b2d34e9
SF
430static inline bool cpu_handle_halt(CPUState *cpu)
431{
432 if (cpu->halted) {
433#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
434 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
435 && replay_interrupt()) {
436 X86CPU *x86_cpu = X86_CPU(cpu);
8d04fb55 437 qemu_mutex_lock_iothread();
8b2d34e9
SF
438 apic_poll_irq(x86_cpu->apic_state);
439 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
8d04fb55 440 qemu_mutex_unlock_iothread();
8b2d34e9
SF
441 }
442#endif
443 if (!cpu_has_work(cpu)) {
8b2d34e9
SF
444 return true;
445 }
446
447 cpu->halted = 0;
448 }
449
450 return false;
451}
452
ea284766 453static inline void cpu_handle_debug_exception(CPUState *cpu)
1009d2ed 454{
86025ee4 455 CPUClass *cc = CPU_GET_CLASS(cpu);
1009d2ed
JK
456 CPUWatchpoint *wp;
457
ff4700b0
AF
458 if (!cpu->watchpoint_hit) {
459 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1009d2ed
JK
460 wp->flags &= ~BP_WATCHPOINT_HIT;
461 }
462 }
86025ee4
PM
463
464 cc->debug_excp_handler(cpu);
1009d2ed
JK
465}
466
ea284766
SF
467static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
468{
17b50b0c
PD
469 if (cpu->exception_index < 0) {
470#ifndef CONFIG_USER_ONLY
471 if (replay_has_exception()
5e140196 472 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
17b50b0c
PD
473 /* try to cause an exception pending in the log */
474 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
475 }
476#endif
477 if (cpu->exception_index < 0) {
478 return false;
479 }
480 }
481
482 if (cpu->exception_index >= EXCP_INTERRUPT) {
483 /* exit request from the cpu execution loop */
484 *ret = cpu->exception_index;
485 if (*ret == EXCP_DEBUG) {
486 cpu_handle_debug_exception(cpu);
487 }
488 cpu->exception_index = -1;
489 return true;
490 } else {
ea284766 491#if defined(CONFIG_USER_ONLY)
17b50b0c
PD
492 /* if user mode only, we simulate a fake exception
493 which will be handled outside the cpu execution
494 loop */
ea284766 495#if defined(TARGET_I386)
17b50b0c
PD
496 CPUClass *cc = CPU_GET_CLASS(cpu);
497 cc->do_interrupt(cpu);
498#endif
499 *ret = cpu->exception_index;
500 cpu->exception_index = -1;
501 return true;
502#else
503 if (replay_exception()) {
ea284766 504 CPUClass *cc = CPU_GET_CLASS(cpu);
17b50b0c 505 qemu_mutex_lock_iothread();
ea284766 506 cc->do_interrupt(cpu);
17b50b0c 507 qemu_mutex_unlock_iothread();
ea284766 508 cpu->exception_index = -1;
17b50b0c
PD
509 } else if (!replay_has_interrupt()) {
510 /* give a chance to iothread in replay mode */
511 *ret = EXCP_INTERRUPT;
ea284766 512 return true;
ea284766 513 }
ea284766
SF
514#endif
515 }
516
517 return false;
518}
519
209b71b6 520static inline bool cpu_handle_interrupt(CPUState *cpu,
c385e6e4
SF
521 TranslationBlock **last_tb)
522{
523 CPUClass *cc = CPU_GET_CLASS(cpu);
17b50b0c
PD
524
525 /* Clear the interrupt flag now since we're processing
526 * cpu->interrupt_request and cpu->exit_request.
d84be02d
DH
527 * Ensure zeroing happens before reading cpu->exit_request or
528 * cpu->interrupt_request (see also smp_wmb in cpu_exit())
17b50b0c 529 */
5e140196 530 atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
c385e6e4 531
8d04fb55
JK
532 if (unlikely(atomic_read(&cpu->interrupt_request))) {
533 int interrupt_request;
534 qemu_mutex_lock_iothread();
535 interrupt_request = cpu->interrupt_request;
c385e6e4
SF
536 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
537 /* Mask out external interrupts for this step. */
538 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
539 }
540 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
541 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
542 cpu->exception_index = EXCP_DEBUG;
8d04fb55 543 qemu_mutex_unlock_iothread();
209b71b6 544 return true;
c385e6e4
SF
545 }
546 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
547 /* Do nothing */
548 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
549 replay_interrupt();
550 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
551 cpu->halted = 1;
552 cpu->exception_index = EXCP_HLT;
8d04fb55 553 qemu_mutex_unlock_iothread();
209b71b6 554 return true;
c385e6e4
SF
555 }
556#if defined(TARGET_I386)
557 else if (interrupt_request & CPU_INTERRUPT_INIT) {
558 X86CPU *x86_cpu = X86_CPU(cpu);
559 CPUArchState *env = &x86_cpu->env;
560 replay_interrupt();
65c9d60a 561 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
c385e6e4
SF
562 do_cpu_init(x86_cpu);
563 cpu->exception_index = EXCP_HALTED;
8d04fb55 564 qemu_mutex_unlock_iothread();
209b71b6 565 return true;
c385e6e4
SF
566 }
567#else
568 else if (interrupt_request & CPU_INTERRUPT_RESET) {
569 replay_interrupt();
570 cpu_reset(cpu);
8d04fb55 571 qemu_mutex_unlock_iothread();
209b71b6 572 return true;
c385e6e4
SF
573 }
574#endif
575 /* The target hook has 3 exit conditions:
576 False when the interrupt isn't processed,
577 True when it is, and we should restart on a new TB,
578 and via longjmp via cpu_loop_exit. */
579 else {
c385e6e4 580 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
d718b14b 581 replay_interrupt();
5f3bdfd4 582 cpu->exception_index = -1;
c385e6e4
SF
583 *last_tb = NULL;
584 }
8b1fe3f4
SF
585 /* The target hook may have updated the 'cpu->interrupt_request';
586 * reload the 'interrupt_request' value */
587 interrupt_request = cpu->interrupt_request;
c385e6e4 588 }
8b1fe3f4 589 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
c385e6e4
SF
590 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
591 /* ensure that no TB jump will be modified as
592 the program flow was changed */
593 *last_tb = NULL;
594 }
8d04fb55
JK
595
596 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
597 qemu_mutex_unlock_iothread();
c385e6e4 598 }
8d04fb55 599
cfb2d02b 600 /* Finally, check if we need to exit to the main loop. */
5e140196
RH
601 if (unlikely(atomic_read(&cpu->exit_request))
602 || (use_icount
603 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
027d9a7d 604 atomic_set(&cpu->exit_request, 0);
5f3bdfd4
PD
605 if (cpu->exception_index == -1) {
606 cpu->exception_index = EXCP_INTERRUPT;
607 }
209b71b6 608 return true;
c385e6e4 609 }
209b71b6
PB
610
611 return false;
c385e6e4
SF
612}
613
928de9ee 614static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
cfb2d02b 615 TranslationBlock **last_tb, int *tb_exit)
928de9ee
SF
616{
617 uintptr_t ret;
1aab16c2 618 int32_t insns_left;
928de9ee
SF
619
620 trace_exec_tb(tb, tb->pc);
621 ret = cpu_tb_exec(cpu, tb);
43d70ddf 622 tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
928de9ee 623 *tb_exit = ret & TB_EXIT_MASK;
1aab16c2
PB
624 if (*tb_exit != TB_EXIT_REQUESTED) {
625 *last_tb = tb;
626 return;
627 }
628
629 *last_tb = NULL;
5e140196 630 insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32);
1aab16c2 631 if (insns_left < 0) {
e5143e30
AB
632 /* Something asked us to stop executing chained TBs; just
633 * continue round the main loop. Whatever requested the exit
30f3dda2 634 * will also have set something else (eg exit_request or
17b50b0c
PD
635 * interrupt_request) which will be handled by
636 * cpu_handle_interrupt. cpu_handle_interrupt will also
637 * clear cpu->icount_decr.u16.high.
928de9ee 638 */
1aab16c2 639 return;
928de9ee 640 }
1aab16c2
PB
641
642 /* Instruction counter expired. */
643 assert(use_icount);
644#ifndef CONFIG_USER_ONLY
eda5f7c6
AB
645 /* Ensure global icount has gone forward */
646 cpu_update_icount(cpu);
647 /* Refill decrementer and continue execution. */
648 insns_left = MIN(0xffff, cpu->icount_budget);
5e140196 649 cpu_neg(cpu)->icount_decr.u16.low = insns_left;
eda5f7c6
AB
650 cpu->icount_extra = cpu->icount_budget - insns_left;
651 if (!cpu->icount_extra) {
1aab16c2
PB
652 /* Execute any remaining instructions, then let the main loop
653 * handle the next event.
654 */
655 if (insns_left > 0) {
656 cpu_exec_nocache(cpu, insns_left, tb, false);
1aab16c2 657 }
928de9ee 658 }
1aab16c2 659#endif
928de9ee
SF
660}
661
7d13299d
FB
662/* main execution loop */
663
ea3e9847 664int cpu_exec(CPUState *cpu)
7d13299d 665{
97a8ea5a 666 CPUClass *cc = CPU_GET_CLASS(cpu);
c385e6e4 667 int ret;
cfb2d02b 668 SyncClocks sc = { 0 };
c2aa5f81 669
6f060969
PD
670 /* replay_interrupt may need current_cpu */
671 current_cpu = cpu;
672
8b2d34e9
SF
673 if (cpu_handle_halt(cpu)) {
674 return EXCP_HALTED;
eda48c34 675 }
5a1e3cfc 676
79e2b9ae
PB
677 rcu_read_lock();
678
cffe7b32 679 cc->cpu_exec_enter(cpu);
9d27abd9 680
c2aa5f81
ST
681 /* Calculate difference between guest clock and host clock.
682 * This delay includes the delay of the last cycle, so
683 * what we have to do is sleep until it is 0. As for the
684 * advance/delay we gain here, we try to fix it next time.
685 */
686 init_delay_params(&sc, cpu);
687
4515e58d
PB
688 /* prepare setjmp context for exception handling */
689 if (sigsetjmp(cpu->jmp_env, 0) != 0) {
0448f5f8 690#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
4515e58d
PB
691 /* Some compilers wrongly smash all local variables after
692 * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
693 * Reload essential local variables here for those compilers.
694 * Newer versions of gcc would complain about this code (-Wclobbered). */
695 cpu = current_cpu;
696 cc = CPU_GET_CLASS(cpu);
0448f5f8 697#else /* buggy compiler */
4515e58d
PB
698 /* Assert that the compiler does not smash local variables. */
699 g_assert(cpu == current_cpu);
700 g_assert(cc == CPU_GET_CLASS(cpu));
0448f5f8 701#endif /* buggy compiler */
0ac20318
EC
702#ifndef CONFIG_SOFTMMU
703 tcg_debug_assert(!have_mmap_lock());
704#endif
8d04fb55
JK
705 if (qemu_mutex_iothread_locked()) {
706 qemu_mutex_unlock_iothread();
707 }
8fd3a9b8 708 assert_no_pages_locked();
4515e58d
PB
709 }
710
711 /* if an exception is pending, we execute it here */
712 while (!cpu_handle_exception(cpu, &ret)) {
713 TranslationBlock *last_tb = NULL;
714 int tb_exit = 0;
715
716 while (!cpu_handle_interrupt(cpu, &last_tb)) {
9b990ee5
RH
717 uint32_t cflags = cpu->cflags_next_tb;
718 TranslationBlock *tb;
719
720 /* When requested, use an exact setting for cflags for the next
721 execution. This is used for icount, precise smc, and stop-
722 after-access watchpoints. Since this request should never
723 have CF_INVALID set, -1 is a convenient invalid value that
724 does not require tcg headers for cpu_common_reset. */
725 if (cflags == -1) {
726 cflags = curr_cflags();
727 } else {
728 cpu->cflags_next_tb = -1;
729 }
730
731 tb = tb_find(cpu, last_tb, tb_exit, cflags);
cfb2d02b 732 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
4515e58d
PB
733 /* Try to align the host and virtual clocks
734 if the guest is in advance */
735 align_clocks(&sc, cpu);
7d13299d 736 }
4515e58d 737 }
3fb2ded1 738
cffe7b32 739 cc->cpu_exec_exit(cpu);
79e2b9ae 740 rcu_read_unlock();
1057eaa7 741
7d13299d
FB
742 return ret;
743}
This page took 1.073827 seconds and 4 git commands to generate.