*/
#ifdef _WIN32
#include <windows.h>
-#else
-#include <sys/types.h>
-#include <sys/mman.h>
#endif
-#include <stdarg.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <inttypes.h>
+#include "qemu/osdep.h"
-#include "config.h"
#include "qemu-common.h"
#define NO_CPU_IO_DEFS
#include "cpu.h"
#include "trace.h"
#include "disas/disas.h"
+#include "exec/exec-all.h"
#include "tcg.h"
#if defined(CONFIG_USER_ONLY)
#include "qemu.h"
#if __FreeBSD_version >= 700104
#define HAVE_KINFO_GETVMMAP
#define sigqueue sigqueue_freebsd /* avoid redefinition */
-#include <sys/time.h>
#include <sys/proc.h>
#include <machine/profile.h>
#define _KERNEL
#include "translate-all.h"
#include "qemu/bitmap.h"
#include "qemu/timer.h"
+#include "exec/log.h"
//#define DEBUG_TB_INVALIDATE
//#define DEBUG_FLUSH
typedef struct PageDesc {
/* list of TBs intersecting this ram page */
TranslationBlock *first_tb;
+#ifdef CONFIG_SOFTMMU
/* in order to optimize self modifying code, we count the number
of lookups we do to a given page to use a bitmap */
unsigned int code_write_count;
unsigned long *code_bitmap;
-#if defined(CONFIG_USER_ONLY)
+#else
unsigned long flags;
#endif
} PageDesc;
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
uintptr_t qemu_host_page_size;
-uintptr_t qemu_host_page_mask;
+intptr_t qemu_host_page_mask;
/* The bottom level has pointers to PageDesc */
static void *l1_map[V_L1_SIZE];
#endif
}
-static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
- tb_page_addr_t phys_page2);
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
void cpu_gen_init(void)
cpu_restore_state_from_tb(cpu, tb, retaddr);
if (tb->cflags & CF_NOCACHE) {
/* one-shot translation, invalidate it immediately */
- cpu->current_tb = NULL;
tb_phys_invalidate(tb, -1);
tb_free(tb);
}
/* NOTE: we can always suppose that qemu_host_page_size >=
TARGET_PAGE_SIZE */
qemu_real_host_page_size = getpagesize();
- qemu_real_host_page_mask = ~(qemu_real_host_page_size - 1);
+ qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
if (qemu_host_page_size == 0) {
qemu_host_page_size = qemu_real_host_page_size;
}
if (qemu_host_page_size < TARGET_PAGE_SIZE) {
qemu_host_page_size = TARGET_PAGE_SIZE;
}
- qemu_host_page_mask = ~(qemu_host_page_size - 1);
+ qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
}
static void page_init(void)
# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
#elif defined(__sparc__)
# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
+#elif defined(__powerpc64__)
+# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
+#elif defined(__powerpc__)
+# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
#elif defined(__aarch64__)
# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
#elif defined(__arm__)
if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
tb_size = MAX_CODE_GEN_BUFFER_SIZE;
}
- tcg_ctx.code_gen_buffer_size = tb_size;
return tb_size;
}
that the buffer not cross a 256MB boundary. */
static inline bool cross_256mb(void *addr, size_t size)
{
- return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
+ return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
}
/* We weren't able to allocate a buffer without crossing that boundary,
Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
static inline void *split_cross_256mb(void *buf1, size_t size1)
{
- void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
+ void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
size_t size2 = buf1 + size1 - buf2;
size1 = buf2 - buf1;
case 1:
if (!cross_256mb(buf2, size)) {
/* Success! Use the new buffer. */
- munmap(buf, size);
+ munmap(buf, size + qemu_real_host_page_size);
break;
}
/* Failure. Work with what we had. */
- munmap(buf2, size);
+ munmap(buf2, size + qemu_real_host_page_size);
/* fallthru */
default:
/* Split the original buffer. Free the smaller half. */
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
}
+static void tb_htable_init(void)
+{
+ unsigned int mode = QHT_MODE_AUTO_RESIZE;
+
+ qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
+}
+
/* Must be called before using the QEMU cpus. 'tb_size' is the size
(in bytes) allocated to the translation buffer. Zero means default
size. */
{
cpu_gen_init();
page_init();
+ tb_htable_init();
code_gen_alloc(tb_size);
#if defined(CONFIG_SOFTMMU)
/* There's no guest base to take into account, so go ahead and
static inline void invalidate_page_bitmap(PageDesc *p)
{
+#ifdef CONFIG_SOFTMMU
g_free(p->code_bitmap);
p->code_bitmap = NULL;
p->code_write_count = 0;
+#endif
}
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
CPU_FOREACH(cpu) {
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
+ cpu->tb_flushed = true;
}
- memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
+ qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
page_flush_tb();
tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
#ifdef DEBUG_TB_CHECK
-static void tb_invalidate_check(target_ulong address)
+static void
+do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
{
- TranslationBlock *tb;
- int i;
+ TranslationBlock *tb = p;
+ target_ulong addr = *(target_ulong *)userp;
- address &= TARGET_PAGE_MASK;
- for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
- for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
- if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
- address >= tb->pc + tb->size)) {
- printf("ERROR invalidate: address=" TARGET_FMT_lx
- " PC=%08lx size=%04x\n",
- address, (long)tb->pc, tb->size);
- }
- }
+ if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
+ printf("ERROR invalidate: address=" TARGET_FMT_lx
+ " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
}
}
-/* verify that all the pages have correct rights for code */
-static void tb_page_check(void)
+static void tb_invalidate_check(target_ulong address)
{
- TranslationBlock *tb;
- int i, flags1, flags2;
-
- for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
- for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
- tb = tb->phys_hash_next) {
- flags1 = page_get_flags(tb->pc);
- flags2 = page_get_flags(tb->pc + tb->size - 1);
- if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
- printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
- (long)tb->pc, tb->size, flags1, flags2);
- }
- }
- }
+ address &= TARGET_PAGE_MASK;
+ qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
}
-#endif
-
-static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
+static void
+do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
{
- TranslationBlock *tb1;
+ TranslationBlock *tb = p;
+ int flags1, flags2;
- for (;;) {
- tb1 = *ptb;
- if (tb1 == tb) {
- *ptb = tb1->phys_hash_next;
- break;
- }
- ptb = &tb1->phys_hash_next;
+ flags1 = page_get_flags(tb->pc);
+ flags2 = page_get_flags(tb->pc + tb->size - 1);
+ if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
+ printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
+ (long)tb->pc, tb->size, flags1, flags2);
}
}
+/* verify that all the pages have correct rights for code */
+static void tb_page_check(void)
+{
+ qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
+}
+
+#endif
+
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
{
TranslationBlock *tb1;
}
}
-static inline void tb_jmp_remove(TranslationBlock *tb, int n)
+/* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
+static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
{
- TranslationBlock *tb1, **ptb;
+ TranslationBlock *tb1;
+ uintptr_t *ptb, ntb;
unsigned int n1;
- ptb = &tb->jmp_next[n];
- tb1 = *ptb;
- if (tb1) {
+ ptb = &tb->jmp_list_next[n];
+ if (*ptb) {
/* find tb(n) in circular list */
for (;;) {
- tb1 = *ptb;
- n1 = (uintptr_t)tb1 & 3;
- tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
+ ntb = *ptb;
+ n1 = ntb & 3;
+ tb1 = (TranslationBlock *)(ntb & ~3);
if (n1 == n && tb1 == tb) {
break;
}
if (n1 == 2) {
- ptb = &tb1->jmp_first;
+ ptb = &tb1->jmp_list_first;
} else {
- ptb = &tb1->jmp_next[n1];
+ ptb = &tb1->jmp_list_next[n1];
}
}
/* now we can suppress tb(n) from the list */
- *ptb = tb->jmp_next[n];
+ *ptb = tb->jmp_list_next[n];
- tb->jmp_next[n] = NULL;
+ tb->jmp_list_next[n] = (uintptr_t)NULL;
}
}
another TB */
static inline void tb_reset_jump(TranslationBlock *tb, int n)
{
- tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
+ uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
+ tb_set_jmp_target(tb, n, addr);
+}
+
+/* remove any jumps to the TB */
+static inline void tb_jmp_unlink(TranslationBlock *tb)
+{
+ TranslationBlock *tb1;
+ uintptr_t *ptb, ntb;
+ unsigned int n1;
+
+ ptb = &tb->jmp_list_first;
+ for (;;) {
+ ntb = *ptb;
+ n1 = ntb & 3;
+ tb1 = (TranslationBlock *)(ntb & ~3);
+ if (n1 == 2) {
+ break;
+ }
+ tb_reset_jump(tb1, n1);
+ *ptb = tb1->jmp_list_next[n1];
+ tb1->jmp_list_next[n1] = (uintptr_t)NULL;
+ }
}
/* invalidate one TB */
{
CPUState *cpu;
PageDesc *p;
- unsigned int h, n1;
+ uint32_t h;
tb_page_addr_t phys_pc;
- TranslationBlock *tb1, *tb2;
/* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
- h = tb_phys_hash_func(phys_pc);
- tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags);
+ qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
/* remove the TB from the page list */
if (tb->page_addr[0] != page_addr) {
invalidate_page_bitmap(p);
}
- tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
-
/* remove the TB from the hash list */
h = tb_jmp_cache_hash_func(tb->pc);
CPU_FOREACH(cpu) {
}
/* suppress this TB from the two jump lists */
- tb_jmp_remove(tb, 0);
- tb_jmp_remove(tb, 1);
+ tb_remove_from_jmp_list(tb, 0);
+ tb_remove_from_jmp_list(tb, 1);
/* suppress any remaining jumps to this TB */
- tb1 = tb->jmp_first;
- for (;;) {
- n1 = (uintptr_t)tb1 & 3;
- if (n1 == 2) {
- break;
- }
- tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
- tb2 = tb1->jmp_next[n1];
- tb_reset_jump(tb1, n1);
- tb1->jmp_next[n1] = NULL;
- tb1 = tb2;
- }
- tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
+ tb_jmp_unlink(tb);
tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
}
+#ifdef CONFIG_SOFTMMU
static void build_page_bitmap(PageDesc *p)
{
int n, tb_start, tb_end;
tb = tb->page_next[n];
}
}
+#endif
+
+/* add the tb in the target page and protect it if necessary
+ *
+ * Called with mmap_lock held for user-mode emulation.
+ */
+static inline void tb_alloc_page(TranslationBlock *tb,
+ unsigned int n, tb_page_addr_t page_addr)
+{
+ PageDesc *p;
+#ifndef CONFIG_USER_ONLY
+ bool page_already_protected;
+#endif
+
+ tb->page_addr[n] = page_addr;
+ p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
+ tb->page_next[n] = p->first_tb;
+#ifndef CONFIG_USER_ONLY
+ page_already_protected = p->first_tb != NULL;
+#endif
+ p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
+ invalidate_page_bitmap(p);
+
+#if defined(CONFIG_USER_ONLY)
+ if (p->flags & PAGE_WRITE) {
+ target_ulong addr;
+ PageDesc *p2;
+ int prot;
+
+ /* force the host page as non writable (writes will have a
+ page fault + mprotect overhead) */
+ page_addr &= qemu_host_page_mask;
+ prot = 0;
+ for (addr = page_addr; addr < page_addr + qemu_host_page_size;
+ addr += TARGET_PAGE_SIZE) {
+
+ p2 = page_find(addr >> TARGET_PAGE_BITS);
+ if (!p2) {
+ continue;
+ }
+ prot |= p2->flags;
+ p2->flags &= ~PAGE_WRITE;
+ }
+ mprotect(g2h(page_addr), qemu_host_page_size,
+ (prot & PAGE_BITS) & ~PAGE_WRITE);
+#ifdef DEBUG_TB_INVALIDATE
+ printf("protecting code page: 0x" TARGET_FMT_lx "\n",
+ page_addr);
+#endif
+ }
+#else
+ /* if some code is already present, then the pages are already
+ protected. So we handle the case where only the first TB is
+ allocated in a physical page */
+ if (!page_already_protected) {
+ tlb_protect_code(page_addr);
+ }
+#endif
+}
+
+/* add a new TB and link it to the physical page tables. phys_page2 is
+ * (-1) to indicate that only one page contains the TB.
+ *
+ * Called with mmap_lock held for user-mode emulation.
+ */
+static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
+ tb_page_addr_t phys_page2)
+{
+ uint32_t h;
+
+ /* add in the hash table */
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags);
+ qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
+
+ /* add in the page list */
+ tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
+ if (phys_page2 != -1) {
+ tb_alloc_page(tb, 1, phys_page2);
+ } else {
+ tb->page_addr[1] = -1;
+ }
+
+#ifdef DEBUG_TB_CHECK
+ tb_page_check();
+#endif
+}
/* Called with mmap_lock held for user mode emulation. */
TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base,
- int flags, int cflags)
+ uint32_t flags, int cflags)
{
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb;
#endif
phys_pc = get_page_addr_code(env, pc);
- if (use_icount) {
+ if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
cflags |= CF_USE_ICOUNT;
}
/* cannot fail at this point */
tb = tb_alloc(pc);
assert(tb != NULL);
- /* Don't forget to invalidate previous TB info. */
- tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
}
gen_code_buf = tcg_ctx.code_gen_ptr;
tcg_func_start(&tcg_ctx);
+ tcg_ctx.cpu = ENV_GET_CPU(env);
gen_intermediate_code(env, tb);
+ tcg_ctx.cpu = NULL;
trace_translate_block(tb, tb->pc, tb->tc_ptr);
/* generate machine code */
- tb->tb_next_offset[0] = 0xffff;
- tb->tb_next_offset[1] = 0xffff;
- tcg_ctx.tb_next_offset = tb->tb_next_offset;
+ tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
+ tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
+ tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
#ifdef USE_DIRECT_JUMP
- tcg_ctx.tb_jmp_offset = tb->tb_jmp_offset;
- tcg_ctx.tb_next = NULL;
+ tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
+ tcg_ctx.tb_jmp_target_addr = NULL;
#else
- tcg_ctx.tb_jmp_offset = NULL;
- tcg_ctx.tb_next = tb->tb_next;
+ tcg_ctx.tb_jmp_insn_offset = NULL;
+ tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
#endif
#ifdef CONFIG_PROFILER
the tcg optimization currently hidden inside tcg_gen_code. All
that should be required is to flush the TBs, allocate a new TB,
re-initialize it per above, and re-do the actual code generation. */
- gen_code_size = tcg_gen_code(&tcg_ctx, gen_code_buf);
+ gen_code_size = tcg_gen_code(&tcg_ctx, tb);
if (unlikely(gen_code_size < 0)) {
goto buffer_overflow;
}
#endif
#ifdef DEBUG_DISAS
- if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
+ if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
+ qemu_log_in_addr_range(tb->pc)) {
qemu_log("OUT: [size=%d]\n", gen_code_size);
log_disas(tb->tc_ptr, gen_code_size);
qemu_log("\n");
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
CODE_GEN_ALIGN);
+ /* init jump list */
+ assert(((uintptr_t)tb & 3) == 0);
+ tb->jmp_list_first = (uintptr_t)tb | 2;
+ tb->jmp_list_next[0] = (uintptr_t)NULL;
+ tb->jmp_list_next[1] = (uintptr_t)NULL;
+
+ /* init original jump addresses wich has been set during tcg_gen_code() */
+ if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
+ tb_reset_jump(tb, 0);
+ }
+ if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
+ tb_reset_jump(tb, 1);
+ }
+
/* check next page if needed */
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
phys_page2 = -1;
if ((pc & TARGET_PAGE_MASK) != virt_page2) {
phys_page2 = get_page_addr_code(env, virt_page2);
}
+ /* As long as consistency of the TB stuff is provided by tb_lock in user
+ * mode and is implicit in single-threaded softmmu emulation, no explicit
+ * memory barrier is required before tb_link_page() makes the TB visible
+ * through the physical hash table and physical page list.
+ */
tb_link_page(tb, phys_pc, phys_page2);
return tb;
}
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access)
{
- TranslationBlock *tb, *tb_next, *saved_tb;
- CPUState *cpu = current_cpu;
+ TranslationBlock *tb, *tb_next;
#if defined(TARGET_HAS_PRECISE_SMC)
+ CPUState *cpu = current_cpu;
CPUArchState *env = NULL;
#endif
tb_page_addr_t tb_start, tb_end;
int current_tb_modified = 0;
target_ulong current_pc = 0;
target_ulong current_cs_base = 0;
- int current_flags = 0;
+ uint32_t current_flags = 0;
#endif /* TARGET_HAS_PRECISE_SMC */
p = page_find(start >> TARGET_PAGE_BITS);
¤t_flags);
}
#endif /* TARGET_HAS_PRECISE_SMC */
- /* we need to do that to handle the case where a signal
- occurs while doing tb_phys_invalidate() */
- saved_tb = NULL;
- if (cpu != NULL) {
- saved_tb = cpu->current_tb;
- cpu->current_tb = NULL;
- }
tb_phys_invalidate(tb, -1);
- if (cpu != NULL) {
- cpu->current_tb = saved_tb;
- if (cpu->interrupt_request && cpu->current_tb) {
- cpu_interrupt(cpu, cpu->interrupt_request);
- }
- }
}
tb = tb_next;
}
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
- cpu->current_tb = NULL;
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
- cpu_resume_from_signal(cpu, NULL);
+ cpu_loop_exit_noexc(cpu);
}
#endif
}
+#ifdef CONFIG_SOFTMMU
/* len must be <= 8 and start must be a multiple of len */
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
{
tb_invalidate_phys_page_range(start, start + len, 1);
}
}
-
-#if !defined(CONFIG_SOFTMMU)
-/* Called with mmap_lock held. */
-static void tb_invalidate_phys_page(tb_page_addr_t addr,
- uintptr_t pc, void *puc,
- bool locked)
+#else
+/* Called with mmap_lock held. If pc is not 0 then it indicates the
+ * host PC of the faulting store instruction that caused this invalidate.
+ * Returns true if the caller needs to abort execution of the current
+ * TB (because it was modified by this store and the guest CPU has
+ * precise-SMC semantics).
+ */
+static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
{
TranslationBlock *tb;
PageDesc *p;
int current_tb_modified = 0;
target_ulong current_pc = 0;
target_ulong current_cs_base = 0;
- int current_flags = 0;
+ uint32_t current_flags = 0;
#endif
addr &= TARGET_PAGE_MASK;
p = page_find(addr >> TARGET_PAGE_BITS);
if (!p) {
- return;
+ return false;
}
tb = p->first_tb;
#ifdef TARGET_HAS_PRECISE_SMC
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
- cpu->current_tb = NULL;
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
- if (locked) {
- mmap_unlock();
- }
- cpu_resume_from_signal(cpu, puc);
- }
-#endif
-}
-#endif
-
-/* add the tb in the target page and protect it if necessary
- *
- * Called with mmap_lock held for user-mode emulation.
- */
-static inline void tb_alloc_page(TranslationBlock *tb,
- unsigned int n, tb_page_addr_t page_addr)
-{
- PageDesc *p;
-#ifndef CONFIG_USER_ONLY
- bool page_already_protected;
-#endif
-
- tb->page_addr[n] = page_addr;
- p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
- tb->page_next[n] = p->first_tb;
-#ifndef CONFIG_USER_ONLY
- page_already_protected = p->first_tb != NULL;
-#endif
- p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
- invalidate_page_bitmap(p);
-
-#if defined(CONFIG_USER_ONLY)
- if (p->flags & PAGE_WRITE) {
- target_ulong addr;
- PageDesc *p2;
- int prot;
-
- /* force the host page as non writable (writes will have a
- page fault + mprotect overhead) */
- page_addr &= qemu_host_page_mask;
- prot = 0;
- for (addr = page_addr; addr < page_addr + qemu_host_page_size;
- addr += TARGET_PAGE_SIZE) {
-
- p2 = page_find(addr >> TARGET_PAGE_BITS);
- if (!p2) {
- continue;
- }
- prot |= p2->flags;
- p2->flags &= ~PAGE_WRITE;
- }
- mprotect(g2h(page_addr), qemu_host_page_size,
- (prot & PAGE_BITS) & ~PAGE_WRITE);
-#ifdef DEBUG_TB_INVALIDATE
- printf("protecting code page: 0x" TARGET_FMT_lx "\n",
- page_addr);
-#endif
- }
-#else
- /* if some code is already present, then the pages are already
- protected. So we handle the case where only the first TB is
- allocated in a physical page */
- if (!page_already_protected) {
- tlb_protect_code(page_addr);
+ return true;
}
#endif
+ return false;
}
-
-/* add a new TB and link it to the physical page tables. phys_page2 is
- * (-1) to indicate that only one page contains the TB.
- *
- * Called with mmap_lock held for user-mode emulation.
- */
-static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
- tb_page_addr_t phys_page2)
-{
- unsigned int h;
- TranslationBlock **ptb;
-
- /* add in the physical hash table */
- h = tb_phys_hash_func(phys_pc);
- ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
- tb->phys_hash_next = *ptb;
- *ptb = tb;
-
- /* add in the page list */
- tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
- if (phys_page2 != -1) {
- tb_alloc_page(tb, 1, phys_page2);
- } else {
- tb->page_addr[1] = -1;
- }
-
- tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
- tb->jmp_next[0] = NULL;
- tb->jmp_next[1] = NULL;
-
- /* init original jump addresses */
- if (tb->tb_next_offset[0] != 0xffff) {
- tb_reset_jump(tb, 0);
- }
- if (tb->tb_next_offset[1] != 0xffff) {
- tb_reset_jump(tb, 1);
- }
-
-#ifdef DEBUG_TB_CHECK
- tb_page_check();
#endif
-}
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
tb[1].tc_ptr. Return NULL if not found */
rcu_read_unlock();
return;
}
- ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
- + addr;
+ ram_addr = memory_region_get_ram_addr(mr) + addr;
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
rcu_read_unlock();
}
CPUArchState *env = cpu->env_ptr;
target_ulong pc, cs_base;
tb_page_addr_t addr;
- int flags;
+ uint32_t flags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
addr = get_page_addr_code(env, pc);
TranslationBlock *tb;
uint32_t n, cflags;
target_ulong pc, cs_base;
- uint64_t flags;
+ uint32_t flags;
tb = tb_find_pc(retaddr);
if (!tb) {
repeating the fault, which is horribly inefficient.
Better would be to execute just this insn uncached, or generate a
second new TB. */
- cpu_resume_from_signal(cpu, NULL);
+ cpu_loop_exit_noexc(cpu);
}
void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
int i, target_code_size, max_target_code_size;
int direct_jmp_count, direct_jmp2_count, cross_page;
TranslationBlock *tb;
+ struct qht_stats hst;
+ uint32_t hgram_opts;
+ size_t hgram_bins;
+ char *hgram;
target_code_size = 0;
max_target_code_size = 0;
if (tb->page_addr[1] != -1) {
cross_page++;
}
- if (tb->tb_next_offset[0] != 0xffff) {
+ if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
direct_jmp_count++;
- if (tb->tb_next_offset[1] != 0xffff) {
+ if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
direct_jmp2_count++;
}
}
direct_jmp2_count,
tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
tcg_ctx.tb_ctx.nb_tbs : 0);
+
+ qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
+
+ cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
+ hst.used_head_buckets, hst.head_buckets,
+ (double)hst.used_head_buckets / hst.head_buckets * 100);
+
+ hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
+ hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
+ if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
+ hgram_opts |= QDIST_PR_NODECIMAL;
+ }
+ hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
+ cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
+ qdist_avg(&hst.occupancy) * 100, hgram);
+ g_free(hgram);
+
+ hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
+ hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
+ if (hgram_bins > 10) {
+ hgram_bins = 10;
+ } else {
+ hgram_bins = 0;
+ hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
+ }
+ hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
+ cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
+ qdist_avg(&hst.chain), hgram);
+ g_free(hgram);
+
+ qht_statistics_destroy(&hst);
+
cpu_fprintf(f, "\nStatistics:\n");
cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
cpu_fprintf(f, "TB invalidate count %d\n",
if (!(p->flags & PAGE_WRITE) &&
(flags & PAGE_WRITE) &&
p->first_tb) {
- tb_invalidate_phys_page(addr, 0, NULL, false);
+ tb_invalidate_phys_page(addr, 0);
}
p->flags = flags;
}
/* unprotect the page if it was put read-only because it
contains translated code */
if (!(p->flags & PAGE_WRITE)) {
- if (!page_unprotect(addr, 0, NULL)) {
+ if (!page_unprotect(addr, 0)) {
return -1;
}
}
}
/* called from signal handler: invalidate the code and unprotect the
- page. Return TRUE if the fault was successfully handled. */
-int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
+ * page. Return 0 if the fault was not handled, 1 if it was handled,
+ * and 2 if it was handled but the caller must cause the TB to be
+ * immediately exited. (We can only return 2 if the 'pc' argument is
+ * non-zero.)
+ */
+int page_unprotect(target_ulong address, uintptr_t pc)
{
unsigned int prot;
PageDesc *p;
/* and since the content will be modified, we must invalidate
the corresponding translated code. */
- tb_invalidate_phys_page(addr, pc, puc, true);
+ if (tb_invalidate_phys_page(addr, pc)) {
+ mmap_unlock();
+ return 2;
+ }
#ifdef DEBUG_TB_CHECK
tb_invalidate_check(addr);
#endif