*/
#ifdef _WIN32
#include <windows.h>
-#else
-#include <sys/mman.h>
#endif
#include "qemu/osdep.h"
#include "cpu.h"
#include "trace.h"
#include "disas/disas.h"
+#include "exec/exec-all.h"
#include "tcg.h"
#if defined(CONFIG_USER_ONLY)
#include "qemu.h"
typedef struct PageDesc {
/* list of TBs intersecting this ram page */
TranslationBlock *first_tb;
+#ifdef CONFIG_SOFTMMU
/* in order to optimize self modifying code, we count the number
of lookups we do to a given page to use a bitmap */
unsigned int code_write_count;
unsigned long *code_bitmap;
-#if defined(CONFIG_USER_ONLY)
+#else
unsigned long flags;
#endif
} PageDesc;
cpu_restore_state_from_tb(cpu, tb, retaddr);
if (tb->cflags & CF_NOCACHE) {
/* one-shot translation, invalidate it immediately */
- cpu->current_tb = NULL;
tb_phys_invalidate(tb, -1);
tb_free(tb);
}
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
}
+static void tb_htable_init(void)
+{
+ unsigned int mode = QHT_MODE_AUTO_RESIZE;
+
+ qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
+}
+
/* Must be called before using the QEMU cpus. 'tb_size' is the size
(in bytes) allocated to the translation buffer. Zero means default
size. */
{
cpu_gen_init();
page_init();
+ tb_htable_init();
code_gen_alloc(tb_size);
#if defined(CONFIG_SOFTMMU)
/* There's no guest base to take into account, so go ahead and
tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
tb->pc = pc;
tb->cflags = 0;
+ tb->invalid = false;
return tb;
}
static inline void invalidate_page_bitmap(PageDesc *p)
{
+#ifdef CONFIG_SOFTMMU
g_free(p->code_bitmap);
p->code_bitmap = NULL;
p->code_write_count = 0;
+#endif
}
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
/* XXX: tb_flush is currently not thread safe */
void tb_flush(CPUState *cpu)
{
+ if (!tcg_enabled()) {
+ return;
+ }
#if defined(DEBUG_FLUSH)
printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
(unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
> tcg_ctx.code_gen_buffer_size) {
cpu_abort(cpu, "Internal error: code buffer overflow\n");
}
- tcg_ctx.tb_ctx.nb_tbs = 0;
CPU_FOREACH(cpu) {
- memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
+ int i;
+
+ for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
+ atomic_set(&cpu->tb_jmp_cache[i], NULL);
+ }
+ atomic_mb_set(&cpu->tb_flushed, true);
}
- memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
+ tcg_ctx.tb_ctx.nb_tbs = 0;
+ qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
page_flush_tb();
tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
#ifdef DEBUG_TB_CHECK
-static void tb_invalidate_check(target_ulong address)
+static void
+do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
{
- TranslationBlock *tb;
- int i;
+ TranslationBlock *tb = p;
+ target_ulong addr = *(target_ulong *)userp;
- address &= TARGET_PAGE_MASK;
- for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
- for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
- tb = tb->phys_hash_next) {
- if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
- address >= tb->pc + tb->size)) {
- printf("ERROR invalidate: address=" TARGET_FMT_lx
- " PC=%08lx size=%04x\n",
- address, (long)tb->pc, tb->size);
- }
- }
+ if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
+ printf("ERROR invalidate: address=" TARGET_FMT_lx
+ " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
}
}
-/* verify that all the pages have correct rights for code */
-static void tb_page_check(void)
+static void tb_invalidate_check(target_ulong address)
{
- TranslationBlock *tb;
- int i, flags1, flags2;
-
- for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
- for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
- tb = tb->phys_hash_next) {
- flags1 = page_get_flags(tb->pc);
- flags2 = page_get_flags(tb->pc + tb->size - 1);
- if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
- printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
- (long)tb->pc, tb->size, flags1, flags2);
- }
- }
- }
+ address &= TARGET_PAGE_MASK;
+ qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
}
-#endif
-
-static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
+static void
+do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
{
- TranslationBlock *tb1;
+ TranslationBlock *tb = p;
+ int flags1, flags2;
- for (;;) {
- tb1 = *ptb;
- if (tb1 == tb) {
- *ptb = tb1->phys_hash_next;
- break;
- }
- ptb = &tb1->phys_hash_next;
+ flags1 = page_get_flags(tb->pc);
+ flags2 = page_get_flags(tb->pc + tb->size - 1);
+ if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
+ printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
+ (long)tb->pc, tb->size, flags1, flags2);
}
}
+/* verify that all the pages have correct rights for code */
+static void tb_page_check(void)
+{
+ qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
+}
+
+#endif
+
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
{
TranslationBlock *tb1;
/* remove any jumps to the TB */
static inline void tb_jmp_unlink(TranslationBlock *tb)
{
- uintptr_t tb1, tb2;
+ TranslationBlock *tb1;
+ uintptr_t *ptb, ntb;
unsigned int n1;
- tb1 = tb->jmp_list_first;
+ ptb = &tb->jmp_list_first;
for (;;) {
- TranslationBlock *tmp_tb;
- n1 = tb1 & 3;
+ ntb = *ptb;
+ n1 = ntb & 3;
+ tb1 = (TranslationBlock *)(ntb & ~3);
if (n1 == 2) {
break;
}
- tmp_tb = (TranslationBlock *)(tb1 & ~3);
- tb2 = tmp_tb->jmp_list_next[n1];
- tb_reset_jump(tmp_tb, n1);
- tmp_tb->jmp_list_next[n1] = (uintptr_t)NULL;
- tb1 = tb2;
+ tb_reset_jump(tb1, n1);
+ *ptb = tb1->jmp_list_next[n1];
+ tb1->jmp_list_next[n1] = (uintptr_t)NULL;
}
-
- assert(((uintptr_t)tb & 3) == 0);
- tb->jmp_list_first = (uintptr_t)tb | 2; /* fail safe */
}
/* invalidate one TB */
{
CPUState *cpu;
PageDesc *p;
- unsigned int h;
+ uint32_t h;
tb_page_addr_t phys_pc;
+ atomic_set(&tb->invalid, true);
+
/* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
- h = tb_phys_hash_func(phys_pc);
- tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags);
+ qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
/* remove the TB from the page list */
if (tb->page_addr[0] != page_addr) {
invalidate_page_bitmap(p);
}
- tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
-
/* remove the TB from the hash list */
h = tb_jmp_cache_hash_func(tb->pc);
CPU_FOREACH(cpu) {
- if (cpu->tb_jmp_cache[h] == tb) {
- cpu->tb_jmp_cache[h] = NULL;
+ if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
+ atomic_set(&cpu->tb_jmp_cache[h], NULL);
}
}
tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
}
+#ifdef CONFIG_SOFTMMU
static void build_page_bitmap(PageDesc *p)
{
int n, tb_start, tb_end;
tb = tb->page_next[n];
}
}
+#endif
/* add the tb in the target page and protect it if necessary
*
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
tb_page_addr_t phys_page2)
{
- unsigned int h;
- TranslationBlock **ptb;
-
- /* add in the physical hash table */
- h = tb_phys_hash_func(phys_pc);
- ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
- tb->phys_hash_next = *ptb;
- *ptb = tb;
+ uint32_t h;
/* add in the page list */
tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
tb->page_addr[1] = -1;
}
+ /* add in the hash table */
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags);
+ qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
+
#ifdef DEBUG_TB_CHECK
tb_page_check();
#endif
/* cannot fail at this point */
tb = tb_alloc(pc);
assert(tb != NULL);
- /* Don't forget to invalidate previous TB info. */
- tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
}
gen_code_buf = tcg_ctx.code_gen_ptr;
tcg_func_start(&tcg_ctx);
+ tcg_ctx.cpu = ENV_GET_CPU(env);
gen_intermediate_code(env, tb);
+ tcg_ctx.cpu = NULL;
trace_translate_block(tb, tb->pc, tb->tc_ptr);
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access)
{
- TranslationBlock *tb, *tb_next, *saved_tb;
- CPUState *cpu = current_cpu;
+ TranslationBlock *tb, *tb_next;
#if defined(TARGET_HAS_PRECISE_SMC)
+ CPUState *cpu = current_cpu;
CPUArchState *env = NULL;
#endif
tb_page_addr_t tb_start, tb_end;
¤t_flags);
}
#endif /* TARGET_HAS_PRECISE_SMC */
- /* we need to do that to handle the case where a signal
- occurs while doing tb_phys_invalidate() */
- saved_tb = NULL;
- if (cpu != NULL) {
- saved_tb = cpu->current_tb;
- cpu->current_tb = NULL;
- }
tb_phys_invalidate(tb, -1);
- if (cpu != NULL) {
- cpu->current_tb = saved_tb;
- if (cpu->interrupt_request && cpu->current_tb) {
- cpu_interrupt(cpu, cpu->interrupt_request);
- }
- }
}
tb = tb_next;
}
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
- cpu->current_tb = NULL;
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
- cpu_resume_from_signal(cpu, NULL);
+ cpu_loop_exit_noexc(cpu);
}
#endif
}
+#ifdef CONFIG_SOFTMMU
/* len must be <= 8 and start must be a multiple of len */
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
{
tb_invalidate_phys_page_range(start, start + len, 1);
}
}
-
-#if !defined(CONFIG_SOFTMMU)
-/* Called with mmap_lock held. */
-static void tb_invalidate_phys_page(tb_page_addr_t addr,
- uintptr_t pc, void *puc,
- bool locked)
+#else
+/* Called with mmap_lock held. If pc is not 0 then it indicates the
+ * host PC of the faulting store instruction that caused this invalidate.
+ * Returns true if the caller needs to abort execution of the current
+ * TB (because it was modified by this store and the guest CPU has
+ * precise-SMC semantics).
+ */
+static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
{
TranslationBlock *tb;
PageDesc *p;
addr &= TARGET_PAGE_MASK;
p = page_find(addr >> TARGET_PAGE_BITS);
if (!p) {
- return;
+ return false;
}
tb = p->first_tb;
#ifdef TARGET_HAS_PRECISE_SMC
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
- cpu->current_tb = NULL;
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
- if (locked) {
- mmap_unlock();
- }
- cpu_resume_from_signal(cpu, puc);
+ return true;
}
#endif
+ return false;
}
#endif
rcu_read_unlock();
return;
}
- ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
- + addr;
+ ram_addr = memory_region_get_ram_addr(mr) + addr;
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
rcu_read_unlock();
}
repeating the fault, which is horribly inefficient.
Better would be to execute just this insn uncached, or generate a
second new TB. */
- cpu_resume_from_signal(cpu, NULL);
+ cpu_loop_exit_noexc(cpu);
}
void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
}
+static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
+ struct qht_stats hst)
+{
+ uint32_t hgram_opts;
+ size_t hgram_bins;
+ char *hgram;
+
+ if (!hst.head_buckets) {
+ return;
+ }
+ cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
+ hst.used_head_buckets, hst.head_buckets,
+ (double)hst.used_head_buckets / hst.head_buckets * 100);
+
+ hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
+ hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
+ if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
+ hgram_opts |= QDIST_PR_NODECIMAL;
+ }
+ hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
+ cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
+ qdist_avg(&hst.occupancy) * 100, hgram);
+ g_free(hgram);
+
+ hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
+ hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
+ if (hgram_bins > 10) {
+ hgram_bins = 10;
+ } else {
+ hgram_bins = 0;
+ hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
+ }
+ hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
+ cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
+ qdist_avg(&hst.chain), hgram);
+ g_free(hgram);
+}
+
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
{
int i, target_code_size, max_target_code_size;
int direct_jmp_count, direct_jmp2_count, cross_page;
TranslationBlock *tb;
+ struct qht_stats hst;
target_code_size = 0;
max_target_code_size = 0;
direct_jmp2_count,
tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
tcg_ctx.tb_ctx.nb_tbs : 0);
+
+ qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
+ print_qht_statistics(f, cpu_fprintf, hst);
+ qht_statistics_destroy(&hst);
+
cpu_fprintf(f, "\nStatistics:\n");
cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
cpu_fprintf(f, "TB invalidate count %d\n",
if (!(p->flags & PAGE_WRITE) &&
(flags & PAGE_WRITE) &&
p->first_tb) {
- tb_invalidate_phys_page(addr, 0, NULL, false);
+ tb_invalidate_phys_page(addr, 0);
}
p->flags = flags;
}
/* unprotect the page if it was put read-only because it
contains translated code */
if (!(p->flags & PAGE_WRITE)) {
- if (!page_unprotect(addr, 0, NULL)) {
+ if (!page_unprotect(addr, 0)) {
return -1;
}
}
}
/* called from signal handler: invalidate the code and unprotect the
- page. Return TRUE if the fault was successfully handled. */
-int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
+ * page. Return 0 if the fault was not handled, 1 if it was handled,
+ * and 2 if it was handled but the caller must cause the TB to be
+ * immediately exited. (We can only return 2 if the 'pc' argument is
+ * non-zero.)
+ */
+int page_unprotect(target_ulong address, uintptr_t pc)
{
unsigned int prot;
+ bool current_tb_invalidated;
PageDesc *p;
target_ulong host_start, host_end, addr;
host_end = host_start + qemu_host_page_size;
prot = 0;
+ current_tb_invalidated = false;
for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
p = page_find(addr >> TARGET_PAGE_BITS);
p->flags |= PAGE_WRITE;
/* and since the content will be modified, we must invalidate
the corresponding translated code. */
- tb_invalidate_phys_page(addr, pc, puc, true);
+ current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
#ifdef DEBUG_TB_CHECK
tb_invalidate_check(addr);
#endif
prot & PAGE_BITS);
mmap_unlock();
- return 1;
+ /* If current TB was invalidated return to main loop */
+ return current_tb_invalidated ? 2 : 1;
}
mmap_unlock();
return 0;