4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include <sys/types.h>
33 #include "qemu-common.h"
34 #define NO_CPU_IO_DEFS
37 #include "disas/disas.h"
39 #if defined(CONFIG_USER_ONLY)
41 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42 #include <sys/param.h>
43 #if __FreeBSD_version >= 700104
44 #define HAVE_KINFO_GETVMMAP
45 #define sigqueue sigqueue_freebsd /* avoid redefinition */
48 #include <machine/profile.h>
57 #include "exec/address-spaces.h"
60 #include "exec/cputlb.h"
61 #include "exec/tb-hash.h"
62 #include "translate-all.h"
63 #include "qemu/bitmap.h"
64 #include "qemu/timer.h"
66 //#define DEBUG_TB_INVALIDATE
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
71 #if !defined(CONFIG_USER_ONLY)
72 /* TB consistency checks only implemented for usermode emulation. */
76 #define SMC_BITMAP_USE_THRESHOLD 10
78 typedef struct PageDesc {
79 /* list of TBs intersecting this ram page */
80 TranslationBlock *first_tb;
81 /* in order to optimize self modifying code, we count the number
82 of lookups we do to a given page to use a bitmap */
83 unsigned int code_write_count;
84 unsigned long *code_bitmap;
85 #if defined(CONFIG_USER_ONLY)
90 /* In system mode we want L1_MAP to be based on ram offsets,
91 while in user mode we want it to be based on virtual addresses. */
92 #if !defined(CONFIG_USER_ONLY)
93 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
94 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
96 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
99 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
102 /* Size of the L2 (and L3, etc) page tables. */
104 #define V_L2_SIZE (1 << V_L2_BITS)
106 /* The bits remaining after N lower levels of page tables. */
107 #define V_L1_BITS_REM \
108 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
110 #if V_L1_BITS_REM < 4
111 #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
113 #define V_L1_BITS V_L1_BITS_REM
116 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
118 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
120 uintptr_t qemu_host_page_size;
121 uintptr_t qemu_host_page_mask;
123 /* The bottom level has pointers to PageDesc */
124 static void *l1_map[V_L1_SIZE];
126 /* code generation context */
129 /* translation block context */
130 #ifdef CONFIG_USER_ONLY
131 __thread int have_tb_lock;
136 #ifdef CONFIG_USER_ONLY
137 assert(!have_tb_lock);
138 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
145 #ifdef CONFIG_USER_ONLY
146 assert(have_tb_lock);
148 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
152 void tb_lock_reset(void)
154 #ifdef CONFIG_USER_ONLY
156 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
162 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
163 tb_page_addr_t phys_page2);
164 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
166 void cpu_gen_init(void)
168 tcg_context_init(&tcg_ctx);
171 /* The cpu state corresponding to 'searched_pc' is restored. */
172 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
173 uintptr_t searched_pc)
175 CPUArchState *env = cpu->env_ptr;
176 TCGContext *s = &tcg_ctx;
179 #ifdef CONFIG_PROFILER
183 #ifdef CONFIG_PROFILER
184 ti = profile_getclock();
188 gen_intermediate_code_pc(env, tb);
190 if (tb->cflags & CF_USE_ICOUNT) {
192 /* Reset the cycle counter to the start of the block. */
193 cpu->icount_decr.u16.low += tb->icount;
194 /* Clear the IO flag. */
198 /* find opc index corresponding to search_pc */
199 tc_ptr = (uintptr_t)tb->tc_ptr;
200 if (searched_pc < tc_ptr)
203 s->tb_next_offset = tb->tb_next_offset;
204 #ifdef USE_DIRECT_JUMP
205 s->tb_jmp_offset = tb->tb_jmp_offset;
208 s->tb_jmp_offset = NULL;
209 s->tb_next = tb->tb_next;
211 j = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr,
212 searched_pc - tc_ptr);
215 /* now find start of instruction before */
216 while (s->gen_opc_instr_start[j] == 0) {
219 cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
221 restore_state_to_opc(env, tb, s->gen_opc_data);
223 #ifdef CONFIG_PROFILER
224 s->restore_time += profile_getclock() - ti;
230 bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
232 TranslationBlock *tb;
234 tb = tb_find_pc(retaddr);
236 cpu_restore_state_from_tb(cpu, tb, retaddr);
237 if (tb->cflags & CF_NOCACHE) {
238 /* one-shot translation, invalidate it immediately */
239 cpu->current_tb = NULL;
240 tb_phys_invalidate(tb, -1);
249 static __attribute__((unused)) void map_exec(void *addr, long size)
252 VirtualProtect(addr, size,
253 PAGE_EXECUTE_READWRITE, &old_protect);
256 static __attribute__((unused)) void map_exec(void *addr, long size)
258 unsigned long start, end, page_size;
260 page_size = getpagesize();
261 start = (unsigned long)addr;
262 start &= ~(page_size - 1);
264 end = (unsigned long)addr + size;
265 end += page_size - 1;
266 end &= ~(page_size - 1);
268 mprotect((void *)start, end - start,
269 PROT_READ | PROT_WRITE | PROT_EXEC);
273 void page_size_init(void)
275 /* NOTE: we can always suppose that qemu_host_page_size >=
277 qemu_real_host_page_size = getpagesize();
278 qemu_real_host_page_mask = ~(qemu_real_host_page_size - 1);
279 if (qemu_host_page_size == 0) {
280 qemu_host_page_size = qemu_real_host_page_size;
282 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
283 qemu_host_page_size = TARGET_PAGE_SIZE;
285 qemu_host_page_mask = ~(qemu_host_page_size - 1);
288 static void page_init(void)
291 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
293 #ifdef HAVE_KINFO_GETVMMAP
294 struct kinfo_vmentry *freep;
297 freep = kinfo_getvmmap(getpid(), &cnt);
300 for (i = 0; i < cnt; i++) {
301 unsigned long startaddr, endaddr;
303 startaddr = freep[i].kve_start;
304 endaddr = freep[i].kve_end;
305 if (h2g_valid(startaddr)) {
306 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
308 if (h2g_valid(endaddr)) {
309 endaddr = h2g(endaddr);
310 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
312 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
314 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
325 last_brk = (unsigned long)sbrk(0);
327 f = fopen("/compat/linux/proc/self/maps", "r");
332 unsigned long startaddr, endaddr;
335 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
337 if (n == 2 && h2g_valid(startaddr)) {
338 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
340 if (h2g_valid(endaddr)) {
341 endaddr = h2g(endaddr);
345 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
358 * Called with mmap_lock held for user-mode emulation.
360 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
366 /* Level 1. Always allocated. */
367 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
370 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
371 void **p = atomic_rcu_read(lp);
377 p = g_new0(void *, V_L2_SIZE);
378 atomic_rcu_set(lp, p);
381 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
384 pd = atomic_rcu_read(lp);
389 pd = g_new0(PageDesc, V_L2_SIZE);
390 atomic_rcu_set(lp, pd);
393 return pd + (index & (V_L2_SIZE - 1));
396 static inline PageDesc *page_find(tb_page_addr_t index)
398 return page_find_alloc(index, 0);
401 #if defined(CONFIG_USER_ONLY)
402 /* Currently it is not recommended to allocate big chunks of data in
403 user mode. It will change when a dedicated libc will be used. */
404 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
405 region in which the guest needs to run. Revisit this. */
406 #define USE_STATIC_CODE_GEN_BUFFER
409 /* ??? Should configure for this, not list operating systems here. */
410 #if (defined(__linux__) \
411 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
412 || defined(__DragonFly__) || defined(__OpenBSD__) \
413 || defined(__NetBSD__))
417 /* Minimum size of the code gen buffer. This number is randomly chosen,
418 but not so small that we can't have a fair number of TB's live. */
419 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
421 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
422 indicated, this is constrained by the range of direct branches on the
423 host cpu, as used by the TCG implementation of goto_tb. */
424 #if defined(__x86_64__)
425 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
426 #elif defined(__sparc__)
427 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
428 #elif defined(__aarch64__)
429 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
430 #elif defined(__arm__)
431 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
432 #elif defined(__s390x__)
433 /* We have a +- 4GB range on the branches; leave some slop. */
434 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
435 #elif defined(__mips__)
436 /* We have a 256MB branch region, but leave room to make sure the
437 main executable is also within that region. */
438 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
440 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
443 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
445 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
446 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
447 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
449 static inline size_t size_code_gen_buffer(size_t tb_size)
451 /* Size the buffer. */
453 #ifdef USE_STATIC_CODE_GEN_BUFFER
454 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
456 /* ??? Needs adjustments. */
457 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
458 static buffer, we could size this on RESERVED_VA, on the text
459 segment size of the executable, or continue to use the default. */
460 tb_size = (unsigned long)(ram_size / 4);
463 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
464 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
466 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
467 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
469 tcg_ctx.code_gen_buffer_size = tb_size;
474 /* In order to use J and JAL within the code_gen_buffer, we require
475 that the buffer not cross a 256MB boundary. */
476 static inline bool cross_256mb(void *addr, size_t size)
478 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
481 /* We weren't able to allocate a buffer without crossing that boundary,
482 so make do with the larger portion of the buffer that doesn't cross.
483 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
484 static inline void *split_cross_256mb(void *buf1, size_t size1)
486 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
487 size_t size2 = buf1 + size1 - buf2;
495 tcg_ctx.code_gen_buffer_size = size1;
500 #ifdef USE_STATIC_CODE_GEN_BUFFER
501 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
502 __attribute__((aligned(CODE_GEN_ALIGN)));
504 static inline void *alloc_code_gen_buffer(void)
506 void *buf = static_code_gen_buffer;
508 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
509 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
512 map_exec(buf, tcg_ctx.code_gen_buffer_size);
515 #elif defined(USE_MMAP)
516 static inline void *alloc_code_gen_buffer(void)
518 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
522 /* Constrain the position of the buffer based on the host cpu.
523 Note that these addresses are chosen in concert with the
524 addresses assigned in the relevant linker script file. */
525 # if defined(__PIE__) || defined(__PIC__)
526 /* Don't bother setting a preferred location if we're building
527 a position-independent executable. We're more likely to get
528 an address near the main executable if we let the kernel
529 choose the address. */
530 # elif defined(__x86_64__) && defined(MAP_32BIT)
531 /* Force the memory down into low memory with the executable.
532 Leave the choice of exact location with the kernel. */
534 /* Cannot expect to map more than 800MB in low memory. */
535 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
536 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
538 # elif defined(__sparc__)
539 start = 0x40000000ul;
540 # elif defined(__s390x__)
541 start = 0x90000000ul;
542 # elif defined(__mips__)
543 /* ??? We ought to more explicitly manage layout for softmmu too. */
544 # ifdef CONFIG_USER_ONLY
545 start = 0x68000000ul;
546 # elif _MIPS_SIM == _ABI64
547 start = 0x128000000ul;
549 start = 0x08000000ul;
553 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
554 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
555 if (buf == MAP_FAILED) {
560 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
561 /* Try again, with the original still mapped, to avoid re-acquiring
562 that 256mb crossing. This time don't specify an address. */
563 size_t size2, size1 = tcg_ctx.code_gen_buffer_size;
564 void *buf2 = mmap(NULL, size1, PROT_WRITE | PROT_READ | PROT_EXEC,
566 if (buf2 != MAP_FAILED) {
567 if (!cross_256mb(buf2, size1)) {
568 /* Success! Use the new buffer. */
572 /* Failure. Work with what we had. */
576 /* Split the original buffer. Free the smaller half. */
577 buf2 = split_cross_256mb(buf, size1);
578 size2 = tcg_ctx.code_gen_buffer_size;
579 munmap(buf + (buf == buf2 ? size2 : 0), size1 - size2);
587 static inline void *alloc_code_gen_buffer(void)
589 void *buf = g_try_malloc(tcg_ctx.code_gen_buffer_size);
596 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
597 void *buf2 = g_malloc(tcg_ctx.code_gen_buffer_size);
598 if (buf2 != NULL && !cross_256mb(buf2, size1)) {
599 /* Success! Use the new buffer. */
603 /* Failure. Work with what we had. Since this is malloc
604 and not mmap, we can't free the other half. */
606 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
611 map_exec(buf, tcg_ctx.code_gen_buffer_size);
614 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
616 static inline void code_gen_alloc(size_t tb_size)
618 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
619 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
620 if (tcg_ctx.code_gen_buffer == NULL) {
621 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
625 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
628 /* Steal room for the prologue at the end of the buffer. This ensures
629 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
630 from TB's to the prologue are going to be in range. It also means
631 that we don't need to mark (additional) portions of the data segment
633 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
634 tcg_ctx.code_gen_buffer_size - 1024;
635 tcg_ctx.code_gen_buffer_size -= 1024;
637 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
638 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
639 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
640 CODE_GEN_AVG_BLOCK_SIZE;
642 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
643 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
646 /* Must be called before using the QEMU cpus. 'tb_size' is the size
647 (in bytes) allocated to the translation buffer. Zero means default
649 void tcg_exec_init(unsigned long tb_size)
652 code_gen_alloc(tb_size);
653 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
654 tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
656 #if defined(CONFIG_SOFTMMU)
657 /* There's no guest base to take into account, so go ahead and
658 initialize the prologue now. */
659 tcg_prologue_init(&tcg_ctx);
663 bool tcg_enabled(void)
665 return tcg_ctx.code_gen_buffer != NULL;
668 /* Allocate a new translation block. Flush the translation buffer if
669 too many translation blocks or too much generated code. */
670 static TranslationBlock *tb_alloc(target_ulong pc)
672 TranslationBlock *tb;
674 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
675 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
676 tcg_ctx.code_gen_buffer_max_size) {
679 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
685 void tb_free(TranslationBlock *tb)
687 /* In practice this is mostly used for single use temporary TB
688 Ignore the hard cases and just back up if this TB happens to
689 be the last one generated. */
690 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
691 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
692 tcg_ctx.code_gen_ptr = tb->tc_ptr;
693 tcg_ctx.tb_ctx.nb_tbs--;
697 static inline void invalidate_page_bitmap(PageDesc *p)
699 g_free(p->code_bitmap);
700 p->code_bitmap = NULL;
701 p->code_write_count = 0;
704 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
705 static void page_flush_tb_1(int level, void **lp)
715 for (i = 0; i < V_L2_SIZE; ++i) {
716 pd[i].first_tb = NULL;
717 invalidate_page_bitmap(pd + i);
722 for (i = 0; i < V_L2_SIZE; ++i) {
723 page_flush_tb_1(level - 1, pp + i);
728 static void page_flush_tb(void)
732 for (i = 0; i < V_L1_SIZE; i++) {
733 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
737 /* flush all the translation blocks */
738 /* XXX: tb_flush is currently not thread safe */
739 void tb_flush(CPUState *cpu)
741 #if defined(DEBUG_FLUSH)
742 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
743 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
744 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
745 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
746 tcg_ctx.tb_ctx.nb_tbs : 0);
748 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
749 > tcg_ctx.code_gen_buffer_size) {
750 cpu_abort(cpu, "Internal error: code buffer overflow\n");
752 tcg_ctx.tb_ctx.nb_tbs = 0;
755 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
758 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
761 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
762 /* XXX: flush processor icache at this point if cache flush is
764 tcg_ctx.tb_ctx.tb_flush_count++;
767 #ifdef DEBUG_TB_CHECK
769 static void tb_invalidate_check(target_ulong address)
771 TranslationBlock *tb;
774 address &= TARGET_PAGE_MASK;
775 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
776 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
777 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
778 address >= tb->pc + tb->size)) {
779 printf("ERROR invalidate: address=" TARGET_FMT_lx
780 " PC=%08lx size=%04x\n",
781 address, (long)tb->pc, tb->size);
787 /* verify that all the pages have correct rights for code */
788 static void tb_page_check(void)
790 TranslationBlock *tb;
791 int i, flags1, flags2;
793 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
794 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
795 tb = tb->phys_hash_next) {
796 flags1 = page_get_flags(tb->pc);
797 flags2 = page_get_flags(tb->pc + tb->size - 1);
798 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
799 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
800 (long)tb->pc, tb->size, flags1, flags2);
808 static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
810 TranslationBlock *tb1;
815 *ptb = tb1->phys_hash_next;
818 ptb = &tb1->phys_hash_next;
822 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
824 TranslationBlock *tb1;
829 n1 = (uintptr_t)tb1 & 3;
830 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
832 *ptb = tb1->page_next[n1];
835 ptb = &tb1->page_next[n1];
839 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
841 TranslationBlock *tb1, **ptb;
844 ptb = &tb->jmp_next[n];
847 /* find tb(n) in circular list */
850 n1 = (uintptr_t)tb1 & 3;
851 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
852 if (n1 == n && tb1 == tb) {
856 ptb = &tb1->jmp_first;
858 ptb = &tb1->jmp_next[n1];
861 /* now we can suppress tb(n) from the list */
862 *ptb = tb->jmp_next[n];
864 tb->jmp_next[n] = NULL;
868 /* reset the jump entry 'n' of a TB so that it is not chained to
870 static inline void tb_reset_jump(TranslationBlock *tb, int n)
872 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
875 /* invalidate one TB */
876 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
881 tb_page_addr_t phys_pc;
882 TranslationBlock *tb1, *tb2;
884 /* remove the TB from the hash list */
885 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
886 h = tb_phys_hash_func(phys_pc);
887 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
889 /* remove the TB from the page list */
890 if (tb->page_addr[0] != page_addr) {
891 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
892 tb_page_remove(&p->first_tb, tb);
893 invalidate_page_bitmap(p);
895 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
896 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
897 tb_page_remove(&p->first_tb, tb);
898 invalidate_page_bitmap(p);
901 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
903 /* remove the TB from the hash list */
904 h = tb_jmp_cache_hash_func(tb->pc);
906 if (cpu->tb_jmp_cache[h] == tb) {
907 cpu->tb_jmp_cache[h] = NULL;
911 /* suppress this TB from the two jump lists */
912 tb_jmp_remove(tb, 0);
913 tb_jmp_remove(tb, 1);
915 /* suppress any remaining jumps to this TB */
918 n1 = (uintptr_t)tb1 & 3;
922 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
923 tb2 = tb1->jmp_next[n1];
924 tb_reset_jump(tb1, n1);
925 tb1->jmp_next[n1] = NULL;
928 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
930 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
933 static void build_page_bitmap(PageDesc *p)
935 int n, tb_start, tb_end;
936 TranslationBlock *tb;
938 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
942 n = (uintptr_t)tb & 3;
943 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
944 /* NOTE: this is subtle as a TB may span two physical pages */
946 /* NOTE: tb_end may be after the end of the page, but
947 it is not a problem */
948 tb_start = tb->pc & ~TARGET_PAGE_MASK;
949 tb_end = tb_start + tb->size;
950 if (tb_end > TARGET_PAGE_SIZE) {
951 tb_end = TARGET_PAGE_SIZE;
955 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
957 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
958 tb = tb->page_next[n];
962 /* Called with mmap_lock held for user mode emulation. */
963 TranslationBlock *tb_gen_code(CPUState *cpu,
964 target_ulong pc, target_ulong cs_base,
965 int flags, int cflags)
967 CPUArchState *env = cpu->env_ptr;
968 TranslationBlock *tb;
969 tb_page_addr_t phys_pc, phys_page2;
970 target_ulong virt_page2;
971 tcg_insn_unit *gen_code_buf;
973 #ifdef CONFIG_PROFILER
977 phys_pc = get_page_addr_code(env, pc);
979 cflags |= CF_USE_ICOUNT;
983 /* flush must be done */
985 /* cannot fail at this point */
987 /* Don't forget to invalidate previous TB info. */
988 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
991 gen_code_buf = tcg_ctx.code_gen_ptr;
992 tb->tc_ptr = gen_code_buf;
993 tb->cs_base = cs_base;
997 #ifdef CONFIG_PROFILER
998 tcg_ctx.tb_count1++; /* includes aborted translations because of
1000 ti = profile_getclock();
1003 tcg_func_start(&tcg_ctx);
1005 gen_intermediate_code(env, tb);
1007 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1009 /* generate machine code */
1010 tb->tb_next_offset[0] = 0xffff;
1011 tb->tb_next_offset[1] = 0xffff;
1012 tcg_ctx.tb_next_offset = tb->tb_next_offset;
1013 #ifdef USE_DIRECT_JUMP
1014 tcg_ctx.tb_jmp_offset = tb->tb_jmp_offset;
1015 tcg_ctx.tb_next = NULL;
1017 tcg_ctx.tb_jmp_offset = NULL;
1018 tcg_ctx.tb_next = tb->tb_next;
1021 #ifdef CONFIG_PROFILER
1023 tcg_ctx.interm_time += profile_getclock() - ti;
1024 tcg_ctx.code_time -= profile_getclock();
1027 gen_code_size = tcg_gen_code(&tcg_ctx, gen_code_buf);
1029 #ifdef CONFIG_PROFILER
1030 tcg_ctx.code_time += profile_getclock();
1031 tcg_ctx.code_in_len += tb->size;
1032 tcg_ctx.code_out_len += gen_code_size;
1036 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
1037 qemu_log("OUT: [size=%d]\n", gen_code_size);
1038 log_disas(tb->tc_ptr, gen_code_size);
1044 tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)gen_code_buf +
1045 gen_code_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1047 /* check next page if needed */
1048 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1050 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1051 phys_page2 = get_page_addr_code(env, virt_page2);
1053 tb_link_page(tb, phys_pc, phys_page2);
1058 * Invalidate all TBs which intersect with the target physical address range
1059 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1060 * 'is_cpu_write_access' should be true if called from a real cpu write
1061 * access: the virtual CPU will exit the current TB if code is modified inside
1064 * Called with mmap_lock held for user-mode emulation
1066 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1068 while (start < end) {
1069 tb_invalidate_phys_page_range(start, end, 0);
1070 start &= TARGET_PAGE_MASK;
1071 start += TARGET_PAGE_SIZE;
1076 * Invalidate all TBs which intersect with the target physical address range
1077 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1078 * 'is_cpu_write_access' should be true if called from a real cpu write
1079 * access: the virtual CPU will exit the current TB if code is modified inside
1082 * Called with mmap_lock held for user-mode emulation
1084 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1085 int is_cpu_write_access)
1087 TranslationBlock *tb, *tb_next, *saved_tb;
1088 CPUState *cpu = current_cpu;
1089 #if defined(TARGET_HAS_PRECISE_SMC)
1090 CPUArchState *env = NULL;
1092 tb_page_addr_t tb_start, tb_end;
1095 #ifdef TARGET_HAS_PRECISE_SMC
1096 int current_tb_not_found = is_cpu_write_access;
1097 TranslationBlock *current_tb = NULL;
1098 int current_tb_modified = 0;
1099 target_ulong current_pc = 0;
1100 target_ulong current_cs_base = 0;
1101 int current_flags = 0;
1102 #endif /* TARGET_HAS_PRECISE_SMC */
1104 p = page_find(start >> TARGET_PAGE_BITS);
1108 #if defined(TARGET_HAS_PRECISE_SMC)
1114 /* we remove all the TBs in the range [start, end[ */
1115 /* XXX: see if in some cases it could be faster to invalidate all
1118 while (tb != NULL) {
1119 n = (uintptr_t)tb & 3;
1120 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1121 tb_next = tb->page_next[n];
1122 /* NOTE: this is subtle as a TB may span two physical pages */
1124 /* NOTE: tb_end may be after the end of the page, but
1125 it is not a problem */
1126 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1127 tb_end = tb_start + tb->size;
1129 tb_start = tb->page_addr[1];
1130 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1132 if (!(tb_end <= start || tb_start >= end)) {
1133 #ifdef TARGET_HAS_PRECISE_SMC
1134 if (current_tb_not_found) {
1135 current_tb_not_found = 0;
1137 if (cpu->mem_io_pc) {
1138 /* now we have a real cpu fault */
1139 current_tb = tb_find_pc(cpu->mem_io_pc);
1142 if (current_tb == tb &&
1143 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1144 /* If we are modifying the current TB, we must stop
1145 its execution. We could be more precise by checking
1146 that the modification is after the current PC, but it
1147 would require a specialized function to partially
1148 restore the CPU state */
1150 current_tb_modified = 1;
1151 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1152 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1155 #endif /* TARGET_HAS_PRECISE_SMC */
1156 /* we need to do that to handle the case where a signal
1157 occurs while doing tb_phys_invalidate() */
1160 saved_tb = cpu->current_tb;
1161 cpu->current_tb = NULL;
1163 tb_phys_invalidate(tb, -1);
1165 cpu->current_tb = saved_tb;
1166 if (cpu->interrupt_request && cpu->current_tb) {
1167 cpu_interrupt(cpu, cpu->interrupt_request);
1173 #if !defined(CONFIG_USER_ONLY)
1174 /* if no code remaining, no need to continue to use slow writes */
1176 invalidate_page_bitmap(p);
1177 tlb_unprotect_code(start);
1180 #ifdef TARGET_HAS_PRECISE_SMC
1181 if (current_tb_modified) {
1182 /* we generate a block containing just the instruction
1183 modifying the memory. It will ensure that it cannot modify
1185 cpu->current_tb = NULL;
1186 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1187 cpu_resume_from_signal(cpu, NULL);
1192 /* len must be <= 8 and start must be a multiple of len */
1193 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1199 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1200 cpu_single_env->mem_io_vaddr, len,
1201 cpu_single_env->eip,
1202 cpu_single_env->eip +
1203 (intptr_t)cpu_single_env->segs[R_CS].base);
1206 p = page_find(start >> TARGET_PAGE_BITS);
1210 if (!p->code_bitmap &&
1211 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1212 /* build code bitmap */
1213 build_page_bitmap(p);
1215 if (p->code_bitmap) {
1219 nr = start & ~TARGET_PAGE_MASK;
1220 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1221 if (b & ((1 << len) - 1)) {
1226 tb_invalidate_phys_page_range(start, start + len, 1);
1230 #if !defined(CONFIG_SOFTMMU)
1231 /* Called with mmap_lock held. */
1232 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1233 uintptr_t pc, void *puc,
1236 TranslationBlock *tb;
1239 #ifdef TARGET_HAS_PRECISE_SMC
1240 TranslationBlock *current_tb = NULL;
1241 CPUState *cpu = current_cpu;
1242 CPUArchState *env = NULL;
1243 int current_tb_modified = 0;
1244 target_ulong current_pc = 0;
1245 target_ulong current_cs_base = 0;
1246 int current_flags = 0;
1249 addr &= TARGET_PAGE_MASK;
1250 p = page_find(addr >> TARGET_PAGE_BITS);
1255 #ifdef TARGET_HAS_PRECISE_SMC
1256 if (tb && pc != 0) {
1257 current_tb = tb_find_pc(pc);
1263 while (tb != NULL) {
1264 n = (uintptr_t)tb & 3;
1265 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1266 #ifdef TARGET_HAS_PRECISE_SMC
1267 if (current_tb == tb &&
1268 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1269 /* If we are modifying the current TB, we must stop
1270 its execution. We could be more precise by checking
1271 that the modification is after the current PC, but it
1272 would require a specialized function to partially
1273 restore the CPU state */
1275 current_tb_modified = 1;
1276 cpu_restore_state_from_tb(cpu, current_tb, pc);
1277 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1280 #endif /* TARGET_HAS_PRECISE_SMC */
1281 tb_phys_invalidate(tb, addr);
1282 tb = tb->page_next[n];
1285 #ifdef TARGET_HAS_PRECISE_SMC
1286 if (current_tb_modified) {
1287 /* we generate a block containing just the instruction
1288 modifying the memory. It will ensure that it cannot modify
1290 cpu->current_tb = NULL;
1291 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1295 cpu_resume_from_signal(cpu, puc);
1301 /* add the tb in the target page and protect it if necessary
1303 * Called with mmap_lock held for user-mode emulation.
1305 static inline void tb_alloc_page(TranslationBlock *tb,
1306 unsigned int n, tb_page_addr_t page_addr)
1309 #ifndef CONFIG_USER_ONLY
1310 bool page_already_protected;
1313 tb->page_addr[n] = page_addr;
1314 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1315 tb->page_next[n] = p->first_tb;
1316 #ifndef CONFIG_USER_ONLY
1317 page_already_protected = p->first_tb != NULL;
1319 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1320 invalidate_page_bitmap(p);
1322 #if defined(CONFIG_USER_ONLY)
1323 if (p->flags & PAGE_WRITE) {
1328 /* force the host page as non writable (writes will have a
1329 page fault + mprotect overhead) */
1330 page_addr &= qemu_host_page_mask;
1332 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1333 addr += TARGET_PAGE_SIZE) {
1335 p2 = page_find(addr >> TARGET_PAGE_BITS);
1340 p2->flags &= ~PAGE_WRITE;
1342 mprotect(g2h(page_addr), qemu_host_page_size,
1343 (prot & PAGE_BITS) & ~PAGE_WRITE);
1344 #ifdef DEBUG_TB_INVALIDATE
1345 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1350 /* if some code is already present, then the pages are already
1351 protected. So we handle the case where only the first TB is
1352 allocated in a physical page */
1353 if (!page_already_protected) {
1354 tlb_protect_code(page_addr);
1359 /* add a new TB and link it to the physical page tables. phys_page2 is
1360 * (-1) to indicate that only one page contains the TB.
1362 * Called with mmap_lock held for user-mode emulation.
1364 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1365 tb_page_addr_t phys_page2)
1368 TranslationBlock **ptb;
1370 /* add in the physical hash table */
1371 h = tb_phys_hash_func(phys_pc);
1372 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1373 tb->phys_hash_next = *ptb;
1376 /* add in the page list */
1377 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1378 if (phys_page2 != -1) {
1379 tb_alloc_page(tb, 1, phys_page2);
1381 tb->page_addr[1] = -1;
1384 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1385 tb->jmp_next[0] = NULL;
1386 tb->jmp_next[1] = NULL;
1388 /* init original jump addresses */
1389 if (tb->tb_next_offset[0] != 0xffff) {
1390 tb_reset_jump(tb, 0);
1392 if (tb->tb_next_offset[1] != 0xffff) {
1393 tb_reset_jump(tb, 1);
1396 #ifdef DEBUG_TB_CHECK
1401 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1402 tb[1].tc_ptr. Return NULL if not found */
1403 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1405 int m_min, m_max, m;
1407 TranslationBlock *tb;
1409 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1412 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1413 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1416 /* binary search (cf Knuth) */
1418 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1419 while (m_min <= m_max) {
1420 m = (m_min + m_max) >> 1;
1421 tb = &tcg_ctx.tb_ctx.tbs[m];
1422 v = (uintptr_t)tb->tc_ptr;
1425 } else if (tc_ptr < v) {
1431 return &tcg_ctx.tb_ctx.tbs[m_max];
1434 #if !defined(CONFIG_USER_ONLY)
1435 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1437 ram_addr_t ram_addr;
1442 mr = address_space_translate(as, addr, &addr, &l, false);
1443 if (!(memory_region_is_ram(mr)
1444 || memory_region_is_romd(mr))) {
1448 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1450 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1453 #endif /* !defined(CONFIG_USER_ONLY) */
1455 void tb_check_watchpoint(CPUState *cpu)
1457 TranslationBlock *tb;
1459 tb = tb_find_pc(cpu->mem_io_pc);
1461 /* We can use retranslation to find the PC. */
1462 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1463 tb_phys_invalidate(tb, -1);
1465 /* The exception probably happened in a helper. The CPU state should
1466 have been saved before calling it. Fetch the PC from there. */
1467 CPUArchState *env = cpu->env_ptr;
1468 target_ulong pc, cs_base;
1469 tb_page_addr_t addr;
1472 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1473 addr = get_page_addr_code(env, pc);
1474 tb_invalidate_phys_range(addr, addr + 1);
1478 #ifndef CONFIG_USER_ONLY
1479 /* in deterministic execution mode, instructions doing device I/Os
1480 must be at the end of the TB */
1481 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1483 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1484 CPUArchState *env = cpu->env_ptr;
1486 TranslationBlock *tb;
1488 target_ulong pc, cs_base;
1491 tb = tb_find_pc(retaddr);
1493 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1496 n = cpu->icount_decr.u16.low + tb->icount;
1497 cpu_restore_state_from_tb(cpu, tb, retaddr);
1498 /* Calculate how many instructions had been executed before the fault
1500 n = n - cpu->icount_decr.u16.low;
1501 /* Generate a new TB ending on the I/O insn. */
1503 /* On MIPS and SH, delay slot instructions can only be restarted if
1504 they were already the first instruction in the TB. If this is not
1505 the first instruction in a TB then re-execute the preceding
1507 #if defined(TARGET_MIPS)
1508 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1509 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1510 cpu->icount_decr.u16.low++;
1511 env->hflags &= ~MIPS_HFLAG_BMASK;
1513 #elif defined(TARGET_SH4)
1514 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1517 cpu->icount_decr.u16.low++;
1518 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1521 /* This should never happen. */
1522 if (n > CF_COUNT_MASK) {
1523 cpu_abort(cpu, "TB too big during recompile");
1526 cflags = n | CF_LAST_IO;
1528 cs_base = tb->cs_base;
1530 tb_phys_invalidate(tb, -1);
1531 if (tb->cflags & CF_NOCACHE) {
1533 /* Invalidate original TB if this TB was generated in
1534 * cpu_exec_nocache() */
1535 tb_phys_invalidate(tb->orig_tb, -1);
1539 /* FIXME: In theory this could raise an exception. In practice
1540 we have already translated the block once so it's probably ok. */
1541 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1542 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1543 the first in the TB) then we end up generating a whole new TB and
1544 repeating the fault, which is horribly inefficient.
1545 Better would be to execute just this insn uncached, or generate a
1547 cpu_resume_from_signal(cpu, NULL);
1550 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1554 /* Discard jump cache entries for any tb which might potentially
1555 overlap the flushed page. */
1556 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1557 memset(&cpu->tb_jmp_cache[i], 0,
1558 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1560 i = tb_jmp_cache_hash_page(addr);
1561 memset(&cpu->tb_jmp_cache[i], 0,
1562 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1565 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1567 int i, target_code_size, max_target_code_size;
1568 int direct_jmp_count, direct_jmp2_count, cross_page;
1569 TranslationBlock *tb;
1571 target_code_size = 0;
1572 max_target_code_size = 0;
1574 direct_jmp_count = 0;
1575 direct_jmp2_count = 0;
1576 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1577 tb = &tcg_ctx.tb_ctx.tbs[i];
1578 target_code_size += tb->size;
1579 if (tb->size > max_target_code_size) {
1580 max_target_code_size = tb->size;
1582 if (tb->page_addr[1] != -1) {
1585 if (tb->tb_next_offset[0] != 0xffff) {
1587 if (tb->tb_next_offset[1] != 0xffff) {
1588 direct_jmp2_count++;
1592 /* XXX: avoid using doubles ? */
1593 cpu_fprintf(f, "Translation buffer state:\n");
1594 cpu_fprintf(f, "gen code size %td/%zd\n",
1595 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1596 tcg_ctx.code_gen_buffer_max_size);
1597 cpu_fprintf(f, "TB count %d/%d\n",
1598 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1599 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1600 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1601 tcg_ctx.tb_ctx.nb_tbs : 0,
1602 max_target_code_size);
1603 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1604 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1605 tcg_ctx.code_gen_buffer) /
1606 tcg_ctx.tb_ctx.nb_tbs : 0,
1607 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1608 tcg_ctx.code_gen_buffer) /
1609 target_code_size : 0);
1610 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1611 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1612 tcg_ctx.tb_ctx.nb_tbs : 0);
1613 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1615 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1616 tcg_ctx.tb_ctx.nb_tbs : 0,
1618 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1619 tcg_ctx.tb_ctx.nb_tbs : 0);
1620 cpu_fprintf(f, "\nStatistics:\n");
1621 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1622 cpu_fprintf(f, "TB invalidate count %d\n",
1623 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1624 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1625 tcg_dump_info(f, cpu_fprintf);
1628 void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1630 tcg_dump_op_count(f, cpu_fprintf);
1633 #else /* CONFIG_USER_ONLY */
1635 void cpu_interrupt(CPUState *cpu, int mask)
1637 cpu->interrupt_request |= mask;
1638 cpu->tcg_exit_req = 1;
1642 * Walks guest process memory "regions" one by one
1643 * and calls callback function 'fn' for each region.
1645 struct walk_memory_regions_data {
1646 walk_memory_regions_fn fn;
1652 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1653 target_ulong end, int new_prot)
1655 if (data->start != -1u) {
1656 int rc = data->fn(data->priv, data->start, end, data->prot);
1662 data->start = (new_prot ? end : -1u);
1663 data->prot = new_prot;
1668 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1669 target_ulong base, int level, void **lp)
1675 return walk_memory_regions_end(data, base, 0);
1681 for (i = 0; i < V_L2_SIZE; ++i) {
1682 int prot = pd[i].flags;
1684 pa = base | (i << TARGET_PAGE_BITS);
1685 if (prot != data->prot) {
1686 rc = walk_memory_regions_end(data, pa, prot);
1695 for (i = 0; i < V_L2_SIZE; ++i) {
1696 pa = base | ((target_ulong)i <<
1697 (TARGET_PAGE_BITS + V_L2_BITS * level));
1698 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1708 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1710 struct walk_memory_regions_data data;
1718 for (i = 0; i < V_L1_SIZE; i++) {
1719 int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
1720 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
1726 return walk_memory_regions_end(&data, 0, 0);
1729 static int dump_region(void *priv, target_ulong start,
1730 target_ulong end, unsigned long prot)
1732 FILE *f = (FILE *)priv;
1734 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1735 " "TARGET_FMT_lx" %c%c%c\n",
1736 start, end, end - start,
1737 ((prot & PAGE_READ) ? 'r' : '-'),
1738 ((prot & PAGE_WRITE) ? 'w' : '-'),
1739 ((prot & PAGE_EXEC) ? 'x' : '-'));
1744 /* dump memory mappings */
1745 void page_dump(FILE *f)
1747 const int length = sizeof(target_ulong) * 2;
1748 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1749 length, "start", length, "end", length, "size", "prot");
1750 walk_memory_regions(f, dump_region);
1753 int page_get_flags(target_ulong address)
1757 p = page_find(address >> TARGET_PAGE_BITS);
1764 /* Modify the flags of a page and invalidate the code if necessary.
1765 The flag PAGE_WRITE_ORG is positioned automatically depending
1766 on PAGE_WRITE. The mmap_lock should already be held. */
1767 void page_set_flags(target_ulong start, target_ulong end, int flags)
1769 target_ulong addr, len;
1771 /* This function should never be called with addresses outside the
1772 guest address space. If this assert fires, it probably indicates
1773 a missing call to h2g_valid. */
1774 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1775 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1777 assert(start < end);
1779 start = start & TARGET_PAGE_MASK;
1780 end = TARGET_PAGE_ALIGN(end);
1782 if (flags & PAGE_WRITE) {
1783 flags |= PAGE_WRITE_ORG;
1786 for (addr = start, len = end - start;
1788 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1789 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1791 /* If the write protection bit is set, then we invalidate
1793 if (!(p->flags & PAGE_WRITE) &&
1794 (flags & PAGE_WRITE) &&
1796 tb_invalidate_phys_page(addr, 0, NULL, false);
1802 int page_check_range(target_ulong start, target_ulong len, int flags)
1808 /* This function should never be called with addresses outside the
1809 guest address space. If this assert fires, it probably indicates
1810 a missing call to h2g_valid. */
1811 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1812 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1818 if (start + len - 1 < start) {
1819 /* We've wrapped around. */
1823 /* must do before we loose bits in the next step */
1824 end = TARGET_PAGE_ALIGN(start + len);
1825 start = start & TARGET_PAGE_MASK;
1827 for (addr = start, len = end - start;
1829 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1830 p = page_find(addr >> TARGET_PAGE_BITS);
1834 if (!(p->flags & PAGE_VALID)) {
1838 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1841 if (flags & PAGE_WRITE) {
1842 if (!(p->flags & PAGE_WRITE_ORG)) {
1845 /* unprotect the page if it was put read-only because it
1846 contains translated code */
1847 if (!(p->flags & PAGE_WRITE)) {
1848 if (!page_unprotect(addr, 0, NULL)) {
1857 /* called from signal handler: invalidate the code and unprotect the
1858 page. Return TRUE if the fault was successfully handled. */
1859 int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1863 target_ulong host_start, host_end, addr;
1865 /* Technically this isn't safe inside a signal handler. However we
1866 know this only ever happens in a synchronous SEGV handler, so in
1867 practice it seems to be ok. */
1870 p = page_find(address >> TARGET_PAGE_BITS);
1876 /* if the page was really writable, then we change its
1877 protection back to writable */
1878 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1879 host_start = address & qemu_host_page_mask;
1880 host_end = host_start + qemu_host_page_size;
1883 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1884 p = page_find(addr >> TARGET_PAGE_BITS);
1885 p->flags |= PAGE_WRITE;
1888 /* and since the content will be modified, we must invalidate
1889 the corresponding translated code. */
1890 tb_invalidate_phys_page(addr, pc, puc, true);
1891 #ifdef DEBUG_TB_CHECK
1892 tb_invalidate_check(addr);
1895 mprotect((void *)g2h(host_start), qemu_host_page_size,
1904 #endif /* CONFIG_USER_ONLY */