#include "cpu.h"
#include "exec-all.h"
#include "qemu-common.h"
+#include "tcg.h"
+#include "hw/hw.h"
+#include "osdep.h"
+#include "kvm.h"
#if defined(CONFIG_USER_ONLY)
#include <qemu.h>
#endif
#undef DEBUG_TB_CHECK
#endif
-/* threshold to flush the translated code buffer */
-#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
-
#define SMC_BITMAP_USE_THRESHOLD 10
#define MMAP_AREA_START 0x00000000
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#endif
-TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
+static TranslationBlock *tbs;
+int code_gen_max_blocks;
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
-int nb_tbs;
+static int nb_tbs;
/* any access to the tbs or the page table must use this lock */
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
-uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
-uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
+#if defined(__arm__) || defined(__sparc_v9__)
+/* The prologue must be reachable with a direct jump. ARM and Sparc64
+ have limited branch ranges (possibly also PPC) so place it in a
+ section close to code segment. */
+#define code_gen_section \
+ __attribute__((__section__(".gen_code"))) \
+ __attribute__((aligned (32)))
+#else
+#define code_gen_section \
+ __attribute__((aligned (32)))
+#endif
+
+uint8_t code_gen_prologue[1024] code_gen_section;
+static uint8_t *code_gen_buffer;
+static unsigned long code_gen_buffer_size;
+/* threshold to flush the translated code buffer */
+static unsigned long code_gen_buffer_max_size;
uint8_t *code_gen_ptr;
+#if !defined(CONFIG_USER_ONLY)
ram_addr_t phys_ram_size;
int phys_ram_fd;
uint8_t *phys_ram_base;
uint8_t *phys_ram_dirty;
+static int in_migration;
static ram_addr_t phys_ram_alloc_offset = 0;
+#endif
CPUState *first_cpu;
/* current CPU in the current thread. It is only valid inside
cpu_exec() */
CPUState *cpu_single_env;
+/* 0 = Do not count executed instructions.
+ 1 = Precise instruction counting.
+ 2 = Adaptive rate instruction counting. */
+int use_icount = 0;
+/* Current instruction counter. While executing translated code this may
+ include some instructions that have not yet been executed. */
+int64_t qemu_icount;
typedef struct PageDesc {
/* list of TBs intersecting this ram page */
} PageDesc;
typedef struct PhysPageDesc {
- /* offset in host memory of the page + io_index in the low 12 bits */
+ /* offset in host memory of the page + io_index in the low bits */
ram_addr_t phys_offset;
} PhysPageDesc;
#define L1_SIZE (1 << L1_BITS)
#define L2_SIZE (1 << L2_BITS)
-static void io_mem_init(void);
-
unsigned long qemu_real_host_page_size;
unsigned long qemu_host_page_bits;
unsigned long qemu_host_page_size;
/* XXX: for system emulation, it could just be an array */
static PageDesc *l1_map[L1_SIZE];
-PhysPageDesc **l1_phys_map;
+static PhysPageDesc **l1_phys_map;
+
+#if !defined(CONFIG_USER_ONLY)
+static void io_mem_init(void);
/* io memory support */
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
static int io_mem_nb;
-#if defined(CONFIG_SOFTMMU)
static int io_mem_watch;
#endif
/* log support */
-char *logfilename = "/tmp/qemu.log";
+static const char *logfilename = "/tmp/qemu.log";
FILE *logfile;
int loglevel;
static int log_append = 0;
#else
static void map_exec(void *addr, long size)
{
- unsigned long start, end;
+ unsigned long start, end, page_size;
+ page_size = getpagesize();
start = (unsigned long)addr;
- start &= ~(qemu_real_host_page_size - 1);
+ start &= ~(page_size - 1);
end = (unsigned long)addr + size;
- end += qemu_real_host_page_size - 1;
- end &= ~(qemu_real_host_page_size - 1);
+ end += page_size - 1;
+ end &= ~(page_size - 1);
mprotect((void *)start, end - start,
PROT_READ | PROT_WRITE | PROT_EXEC);
#ifdef _WIN32
{
SYSTEM_INFO system_info;
- DWORD old_protect;
GetSystemInfo(&system_info);
qemu_real_host_page_size = system_info.dwPageSize;
#else
qemu_real_host_page_size = getpagesize();
#endif
- map_exec(code_gen_buffer, sizeof(code_gen_buffer));
- map_exec(code_gen_prologue, sizeof(code_gen_prologue));
-
if (qemu_host_page_size == 0)
qemu_host_page_size = qemu_real_host_page_size;
if (qemu_host_page_size < TARGET_PAGE_SIZE)
FILE *f;
int n;
+ mmap_lock();
+ last_brk = (unsigned long)sbrk(0);
f = fopen("/proc/self/maps", "r");
if (f) {
do {
(1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
endaddr = MIN(endaddr,
(1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
- page_set_flags(TARGET_PAGE_ALIGN(startaddr),
+ page_set_flags(startaddr & TARGET_PAGE_MASK,
TARGET_PAGE_ALIGN(endaddr),
PAGE_RESERVED);
}
} while (!feof(f));
fclose(f);
}
+ mmap_unlock();
}
#endif
}
+static inline PageDesc **page_l1_map(target_ulong index)
+{
+#if TARGET_LONG_BITS > 32
+ /* Host memory outside guest VM. For 32-bit targets we have already
+ excluded high addresses. */
+ if (index > ((target_ulong)L2_SIZE * L1_SIZE))
+ return NULL;
+#endif
+ return &l1_map[index >> L2_BITS];
+}
+
static inline PageDesc *page_find_alloc(target_ulong index)
{
PageDesc **lp, *p;
+ lp = page_l1_map(index);
+ if (!lp)
+ return NULL;
- lp = &l1_map[index >> L2_BITS];
p = *lp;
if (!p) {
/* allocate if not found */
- p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
- memset(p, 0, sizeof(PageDesc) * L2_SIZE);
+#if defined(CONFIG_USER_ONLY)
+ unsigned long addr;
+ size_t len = sizeof(PageDesc) * L2_SIZE;
+ /* Don't use qemu_malloc because it may recurse. */
+ p = mmap(0, len, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ *lp = p;
+ addr = h2g(p);
+ if (addr == (target_ulong)addr) {
+ page_set_flags(addr & TARGET_PAGE_MASK,
+ TARGET_PAGE_ALIGN(addr + len),
+ PAGE_RESERVED);
+ }
+#else
+ p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
*lp = p;
+#endif
}
return p + (index & (L2_SIZE - 1));
}
static inline PageDesc *page_find(target_ulong index)
{
- PageDesc *p;
+ PageDesc **lp, *p;
+ lp = page_l1_map(index);
+ if (!lp)
+ return NULL;
- p = l1_map[index >> L2_BITS];
+ p = *lp;
if (!p)
return 0;
return p + (index & (L2_SIZE - 1));
static void tlb_protect_code(ram_addr_t ram_addr);
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
target_ulong vaddr);
+#define mmap_lock() do { } while(0)
+#define mmap_unlock() do { } while(0)
+#endif
+
+#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
+
+#if defined(CONFIG_USER_ONLY)
+/* Currently it is not recommanded to allocate big chunks of data in
+ user mode. It will change when a dedicated libc will be used */
+#define USE_STATIC_CODE_GEN_BUFFER
+#endif
+
+#ifdef USE_STATIC_CODE_GEN_BUFFER
+static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
+#endif
+
+static void code_gen_alloc(unsigned long tb_size)
+{
+#ifdef USE_STATIC_CODE_GEN_BUFFER
+ code_gen_buffer = static_code_gen_buffer;
+ code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
+ map_exec(code_gen_buffer, code_gen_buffer_size);
+#else
+ code_gen_buffer_size = tb_size;
+ if (code_gen_buffer_size == 0) {
+#if defined(CONFIG_USER_ONLY)
+ /* in user mode, phys_ram_size is not meaningful */
+ code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
+#else
+ /* XXX: needs ajustments */
+ code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
+#endif
+ }
+ if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
+ code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
+ /* The code gen buffer location may have constraints depending on
+ the host cpu and OS */
+#if defined(__linux__)
+ {
+ int flags;
+ void *start = NULL;
+
+ flags = MAP_PRIVATE | MAP_ANONYMOUS;
+#if defined(__x86_64__)
+ flags |= MAP_32BIT;
+ /* Cannot map more than that */
+ if (code_gen_buffer_size > (800 * 1024 * 1024))
+ code_gen_buffer_size = (800 * 1024 * 1024);
+#elif defined(__sparc_v9__)
+ // Map the buffer below 2G, so we can use direct calls and branches
+ flags |= MAP_FIXED;
+ start = (void *) 0x60000000UL;
+ if (code_gen_buffer_size > (512 * 1024 * 1024))
+ code_gen_buffer_size = (512 * 1024 * 1024);
+#endif
+ code_gen_buffer = mmap(start, code_gen_buffer_size,
+ PROT_WRITE | PROT_READ | PROT_EXEC,
+ flags, -1, 0);
+ if (code_gen_buffer == MAP_FAILED) {
+ fprintf(stderr, "Could not allocate dynamic translator buffer\n");
+ exit(1);
+ }
+ }
+#elif defined(__FreeBSD__)
+ {
+ int flags;
+ void *addr = NULL;
+ flags = MAP_PRIVATE | MAP_ANONYMOUS;
+#if defined(__x86_64__)
+ /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
+ * 0x40000000 is free */
+ flags |= MAP_FIXED;
+ addr = (void *)0x40000000;
+ /* Cannot map more than that */
+ if (code_gen_buffer_size > (800 * 1024 * 1024))
+ code_gen_buffer_size = (800 * 1024 * 1024);
+#endif
+ code_gen_buffer = mmap(addr, code_gen_buffer_size,
+ PROT_WRITE | PROT_READ | PROT_EXEC,
+ flags, -1, 0);
+ if (code_gen_buffer == MAP_FAILED) {
+ fprintf(stderr, "Could not allocate dynamic translator buffer\n");
+ exit(1);
+ }
+ }
+#else
+ code_gen_buffer = qemu_malloc(code_gen_buffer_size);
+ if (!code_gen_buffer) {
+ fprintf(stderr, "Could not allocate dynamic translator buffer\n");
+ exit(1);
+ }
+ map_exec(code_gen_buffer, code_gen_buffer_size);
+#endif
+#endif /* !USE_STATIC_CODE_GEN_BUFFER */
+ map_exec(code_gen_prologue, sizeof(code_gen_prologue));
+ code_gen_buffer_max_size = code_gen_buffer_size -
+ code_gen_max_block_size();
+ code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
+ tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
+}
+
+/* Must be called before using the QEMU cpus. 'tb_size' is the size
+ (in bytes) allocated to the translation buffer. Zero means default
+ size. */
+void cpu_exec_init_all(unsigned long tb_size)
+{
+ cpu_gen_init();
+ code_gen_alloc(tb_size);
+ code_gen_ptr = code_gen_buffer;
+ page_init();
+#if !defined(CONFIG_USER_ONLY)
+ io_mem_init();
+#endif
+}
+
+#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
+
+#define CPU_COMMON_SAVE_VERSION 1
+
+static void cpu_common_save(QEMUFile *f, void *opaque)
+{
+ CPUState *env = opaque;
+
+ qemu_put_be32s(f, &env->halted);
+ qemu_put_be32s(f, &env->interrupt_request);
+}
+
+static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
+{
+ CPUState *env = opaque;
+
+ if (version_id != CPU_COMMON_SAVE_VERSION)
+ return -EINVAL;
+
+ qemu_get_be32s(f, &env->halted);
+ qemu_get_be32s(f, &env->interrupt_request);
+ tlb_flush(env, 1);
+
+ return 0;
+}
#endif
void cpu_exec_init(CPUState *env)
CPUState **penv;
int cpu_index;
- if (!code_gen_ptr) {
- cpu_gen_init();
- code_gen_ptr = code_gen_buffer;
- page_init();
- io_mem_init();
- }
env->next_cpu = NULL;
penv = &first_cpu;
cpu_index = 0;
cpu_index++;
}
env->cpu_index = cpu_index;
- env->nb_watchpoints = 0;
*penv = env;
+#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
+ register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
+ cpu_common_save, cpu_common_load, env);
+ register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
+ cpu_save, cpu_load, env);
+#endif
}
static inline void invalidate_page_bitmap(PageDesc *p)
nb_tbs, nb_tbs > 0 ?
((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
#endif
- if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
+ if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
cpu_abort(env1, "Internal error: code buffer overflow\n");
nb_tbs = 0;
}
}
-void tb_jmp_check(TranslationBlock *tb)
+static void tb_jmp_check(TranslationBlock *tb)
{
TranslationBlock *tb1;
unsigned int n1;
tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
}
-static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
+void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
{
CPUState *env;
PageDesc *p;
int n, tb_start, tb_end;
TranslationBlock *tb;
- p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
+ p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
if (!p->code_bitmap)
return;
- memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
tb = p->first_tb;
while (tb != NULL) {
}
}
-#ifdef TARGET_HAS_PRECISE_SMC
-
-static void tb_gen_code(CPUState *env,
- target_ulong pc, target_ulong cs_base, int flags,
- int cflags)
+TranslationBlock *tb_gen_code(CPUState *env,
+ target_ulong pc, target_ulong cs_base,
+ int flags, int cflags)
{
TranslationBlock *tb;
uint8_t *tc_ptr;
tb_flush(env);
/* cannot fail at this point */
tb = tb_alloc(pc);
+ /* Don't forget to invalidate previous TB info. */
+ tb_invalidated_flag = 1;
}
tc_ptr = code_gen_ptr;
tb->tc_ptr = tc_ptr;
phys_page2 = get_phys_addr_code(env, virt_page2);
}
tb_link_phys(tb, phys_pc, phys_page2);
+ return tb;
}
-#endif
/* invalidate all TBs which intersect with the target physical page
starting in range [start;end[. NOTE: start and end must refer to
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
int is_cpu_write_access)
{
- int n, current_tb_modified, current_tb_not_found, current_flags;
+ TranslationBlock *tb, *tb_next, *saved_tb;
CPUState *env = cpu_single_env;
- PageDesc *p;
- TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
target_ulong tb_start, tb_end;
- target_ulong current_pc, current_cs_base;
+ PageDesc *p;
+ int n;
+#ifdef TARGET_HAS_PRECISE_SMC
+ int current_tb_not_found = is_cpu_write_access;
+ TranslationBlock *current_tb = NULL;
+ int current_tb_modified = 0;
+ target_ulong current_pc = 0;
+ target_ulong current_cs_base = 0;
+ int current_flags = 0;
+#endif /* TARGET_HAS_PRECISE_SMC */
p = page_find(start >> TARGET_PAGE_BITS);
if (!p)
/* we remove all the TBs in the range [start, end[ */
/* XXX: see if in some cases it could be faster to invalidate all the code */
- current_tb_not_found = is_cpu_write_access;
- current_tb_modified = 0;
- current_tb = NULL; /* avoid warning */
- current_pc = 0; /* avoid warning */
- current_cs_base = 0; /* avoid warning */
- current_flags = 0; /* avoid warning */
tb = p->first_tb;
while (tb != NULL) {
n = (long)tb & 3;
if (current_tb_not_found) {
current_tb_not_found = 0;
current_tb = NULL;
- if (env->mem_write_pc) {
+ if (env->mem_io_pc) {
/* now we have a real cpu fault */
- current_tb = tb_find_pc(env->mem_write_pc);
+ current_tb = tb_find_pc(env->mem_io_pc);
}
}
if (current_tb == tb &&
- !(current_tb->cflags & CF_SINGLE_INSN)) {
+ (current_tb->cflags & CF_COUNT_MASK) != 1) {
/* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking
that the modification is after the current PC, but it
current_tb_modified = 1;
cpu_restore_state(current_tb, env,
- env->mem_write_pc, NULL);
-#if defined(TARGET_I386)
- current_flags = env->hflags;
- current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
- current_cs_base = (target_ulong)env->segs[R_CS].base;
- current_pc = current_cs_base + env->eip;
-#else
-#error unsupported CPU
-#endif
+ env->mem_io_pc, NULL);
+ cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
+ ¤t_flags);
}
#endif /* TARGET_HAS_PRECISE_SMC */
/* we need to do that to handle the case where a signal
if (!p->first_tb) {
invalidate_page_bitmap(p);
if (is_cpu_write_access) {
- tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
+ tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
}
}
#endif
modifying the memory. It will ensure that it cannot modify
itself */
env->current_tb = NULL;
- tb_gen_code(env, current_pc, current_cs_base, current_flags,
- CF_SINGLE_INSN);
+ tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
cpu_resume_from_signal(env, NULL);
}
#endif
if (1) {
if (loglevel) {
fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
- cpu_single_env->mem_write_vaddr, len,
+ cpu_single_env->mem_io_vaddr, len,
cpu_single_env->eip,
cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
}
static void tb_invalidate_phys_page(target_phys_addr_t addr,
unsigned long pc, void *puc)
{
- int n, current_flags, current_tb_modified;
- target_ulong current_pc, current_cs_base;
+ TranslationBlock *tb;
PageDesc *p;
- TranslationBlock *tb, *current_tb;
+ int n;
#ifdef TARGET_HAS_PRECISE_SMC
+ TranslationBlock *current_tb = NULL;
CPUState *env = cpu_single_env;
+ int current_tb_modified = 0;
+ target_ulong current_pc = 0;
+ target_ulong current_cs_base = 0;
+ int current_flags = 0;
#endif
addr &= TARGET_PAGE_MASK;
if (!p)
return;
tb = p->first_tb;
- current_tb_modified = 0;
- current_tb = NULL;
- current_pc = 0; /* avoid warning */
- current_cs_base = 0; /* avoid warning */
- current_flags = 0; /* avoid warning */
#ifdef TARGET_HAS_PRECISE_SMC
if (tb && pc != 0) {
current_tb = tb_find_pc(pc);
tb = (TranslationBlock *)((long)tb & ~3);
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb == tb &&
- !(current_tb->cflags & CF_SINGLE_INSN)) {
+ (current_tb->cflags & CF_COUNT_MASK) != 1) {
/* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking
that the modification is after the current PC, but it
current_tb_modified = 1;
cpu_restore_state(current_tb, env, pc, puc);
-#if defined(TARGET_I386)
- current_flags = env->hflags;
- current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
- current_cs_base = (target_ulong)env->segs[R_CS].base;
- current_pc = current_cs_base + env->eip;
-#else
-#error unsupported CPU
-#endif
+ cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
+ ¤t_flags);
}
#endif /* TARGET_HAS_PRECISE_SMC */
tb_phys_invalidate(tb, addr);
modifying the memory. It will ensure that it cannot modify
itself */
env->current_tb = NULL;
- tb_gen_code(env, current_pc, current_cs_base, current_flags,
- CF_SINGLE_INSN);
+ tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
cpu_resume_from_signal(env, puc);
}
#endif
{
TranslationBlock *tb;
- if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
- (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
+ if (nb_tbs >= code_gen_max_blocks ||
+ (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
return NULL;
tb = &tbs[nb_tbs++];
tb->pc = pc;
return tb;
}
+void tb_free(TranslationBlock *tb)
+{
+ /* In practice this is mostly used for single use temporary TB
+ Ignore the hard cases and just back up if this TB happens to
+ be the last one generated. */
+ if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
+ code_gen_ptr = tb->tc_ptr;
+ nb_tbs--;
+ }
+}
+
/* add a new TB and link it to the physical page tables. phys_page2 is
(-1) to indicate that only one page contains the TB. */
void tb_link_phys(TranslationBlock *tb,
unsigned int h;
TranslationBlock **ptb;
+ /* Grab the mmap lock to stop another thread invalidating this TB
+ before we are done. */
+ mmap_lock();
/* add in the physical hash table */
h = tb_phys_hash_func(phys_pc);
ptb = &tb_phys_hash[h];
#ifdef DEBUG_TB_CHECK
tb_page_check();
#endif
+ mmap_unlock();
}
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
#endif
/* Add a watchpoint. */
-int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
-{
- int i;
+int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
+ int flags, CPUWatchpoint **watchpoint)
+{
+ target_ulong len_mask = ~(len - 1);
+ CPUWatchpoint *wp, *prev_wp;
+
+ /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
+ if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
+ fprintf(stderr, "qemu: tried to set invalid watchpoint at "
+ TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
+ return -EINVAL;
+ }
+ wp = qemu_malloc(sizeof(*wp));
+ if (!wp)
+ return -ENOMEM;
+
+ wp->vaddr = addr;
+ wp->len_mask = len_mask;
+ wp->flags = flags;
+
+ /* keep all GDB-injected watchpoints in front */
+ if (!(flags & BP_GDB) && env->watchpoints) {
+ prev_wp = env->watchpoints;
+ while (prev_wp->next != NULL && (prev_wp->next->flags & BP_GDB))
+ prev_wp = prev_wp->next;
+ } else {
+ prev_wp = NULL;
+ }
- for (i = 0; i < env->nb_watchpoints; i++) {
- if (addr == env->watchpoint[i].vaddr)
- return 0;
+ /* Insert new watchpoint */
+ if (prev_wp) {
+ wp->next = prev_wp->next;
+ prev_wp->next = wp;
+ } else {
+ wp->next = env->watchpoints;
+ env->watchpoints = wp;
}
- if (env->nb_watchpoints >= MAX_WATCHPOINTS)
- return -1;
+ if (wp->next)
+ wp->next->prev = wp;
+ wp->prev = prev_wp;
- i = env->nb_watchpoints++;
- env->watchpoint[i].vaddr = addr;
tlb_flush_page(env, addr);
- /* FIXME: This flush is needed because of the hack to make memory ops
- terminate the TB. It can be removed once the proper IO trap and
- re-execute bits are in. */
- tb_flush(env);
- return i;
+
+ if (watchpoint)
+ *watchpoint = wp;
+ return 0;
}
-/* Remove a watchpoint. */
-int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
+/* Remove a specific watchpoint. */
+int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
+ int flags)
{
- int i;
+ target_ulong len_mask = ~(len - 1);
+ CPUWatchpoint *wp;
- for (i = 0; i < env->nb_watchpoints; i++) {
- if (addr == env->watchpoint[i].vaddr) {
- env->nb_watchpoints--;
- env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
- tlb_flush_page(env, addr);
+ for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
+ if (addr == wp->vaddr && len_mask == wp->len_mask
+ && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
+ cpu_watchpoint_remove_by_ref(env, wp);
return 0;
}
}
- return -1;
+ return -ENOENT;
+}
+
+/* Remove a specific watchpoint by reference. */
+void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
+{
+ if (watchpoint->next)
+ watchpoint->next->prev = watchpoint->prev;
+ if (watchpoint->prev)
+ watchpoint->prev->next = watchpoint->next;
+ else
+ env->watchpoints = watchpoint->next;
+
+ tlb_flush_page(env, watchpoint->vaddr);
+
+ qemu_free(watchpoint);
+}
+
+/* Remove all matching watchpoints. */
+void cpu_watchpoint_remove_all(CPUState *env, int mask)
+{
+ CPUWatchpoint *wp;
+
+ for (wp = env->watchpoints; wp != NULL; wp = wp->next)
+ if (wp->flags & mask)
+ cpu_watchpoint_remove_by_ref(env, wp);
}
-/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
- breakpoint is reached */
-int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
+/* Add a breakpoint. */
+int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
+ CPUBreakpoint **breakpoint)
{
#if defined(TARGET_HAS_ICE)
- int i;
+ CPUBreakpoint *bp, *prev_bp;
- for(i = 0; i < env->nb_breakpoints; i++) {
- if (env->breakpoints[i] == pc)
- return 0;
+ bp = qemu_malloc(sizeof(*bp));
+ if (!bp)
+ return -ENOMEM;
+
+ bp->pc = pc;
+ bp->flags = flags;
+
+ /* keep all GDB-injected breakpoints in front */
+ if (!(flags & BP_GDB) && env->breakpoints) {
+ prev_bp = env->breakpoints;
+ while (prev_bp->next != NULL && (prev_bp->next->flags & BP_GDB))
+ prev_bp = prev_bp->next;
+ } else {
+ prev_bp = NULL;
}
- if (env->nb_breakpoints >= MAX_BREAKPOINTS)
- return -1;
- env->breakpoints[env->nb_breakpoints++] = pc;
+ /* Insert new breakpoint */
+ if (prev_bp) {
+ bp->next = prev_bp->next;
+ prev_bp->next = bp;
+ } else {
+ bp->next = env->breakpoints;
+ env->breakpoints = bp;
+ }
+ if (bp->next)
+ bp->next->prev = bp;
+ bp->prev = prev_bp;
breakpoint_invalidate(env, pc);
+
+ if (breakpoint)
+ *breakpoint = bp;
return 0;
#else
- return -1;
+ return -ENOSYS;
#endif
}
-/* remove a breakpoint */
-int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
+/* Remove a specific breakpoint. */
+int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
{
#if defined(TARGET_HAS_ICE)
- int i;
- for(i = 0; i < env->nb_breakpoints; i++) {
- if (env->breakpoints[i] == pc)
- goto found;
- }
- return -1;
- found:
- env->nb_breakpoints--;
- if (i < env->nb_breakpoints)
- env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
+ CPUBreakpoint *bp;
- breakpoint_invalidate(env, pc);
- return 0;
+ for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
+ if (bp->pc == pc && bp->flags == flags) {
+ cpu_breakpoint_remove_by_ref(env, bp);
+ return 0;
+ }
+ }
+ return -ENOENT;
#else
- return -1;
+ return -ENOSYS;
+#endif
+}
+
+/* Remove a specific breakpoint by reference. */
+void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
+{
+#if defined(TARGET_HAS_ICE)
+ if (breakpoint->next)
+ breakpoint->next->prev = breakpoint->prev;
+ if (breakpoint->prev)
+ breakpoint->prev->next = breakpoint->next;
+ else
+ env->breakpoints = breakpoint->next;
+
+ breakpoint_invalidate(env, breakpoint->pc);
+
+ qemu_free(breakpoint);
+#endif
+}
+
+/* Remove all matching breakpoints. */
+void cpu_breakpoint_remove_all(CPUState *env, int mask)
+{
+#if defined(TARGET_HAS_ICE)
+ CPUBreakpoint *bp;
+
+ for (bp = env->breakpoints; bp != NULL; bp = bp->next)
+ if (bp->flags & mask)
+ cpu_breakpoint_remove_by_ref(env, bp);
#endif
}
#if !defined(CONFIG_SOFTMMU)
/* must avoid mmap() usage of glibc by setting a buffer "by hand" */
{
- static uint8_t logfile_buf[4096];
+ static char logfile_buf[4096];
setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
}
#else
/* mask must never be zero, except for A20 change call */
void cpu_interrupt(CPUState *env, int mask)
{
+#if !defined(USE_NPTL)
TranslationBlock *tb;
static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
+#endif
+ int old_mask;
+ old_mask = env->interrupt_request;
+ /* FIXME: This is probably not threadsafe. A different thread could
+ be in the middle of a read-modify-write operation. */
env->interrupt_request |= mask;
- /* if the cpu is currently executing code, we must unlink it and
- all the potentially executing TB */
- tb = env->current_tb;
- if (tb && !testandset(&interrupt_lock)) {
- env->current_tb = NULL;
- tb_reset_jump_recursive(tb);
- resetlock(&interrupt_lock);
+#if defined(USE_NPTL)
+ /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
+ problem and hope the cpu will stop of its own accord. For userspace
+ emulation this often isn't actually as bad as it sounds. Often
+ signals are used primarily to interrupt blocking syscalls. */
+#else
+ if (use_icount) {
+ env->icount_decr.u16.high = 0xffff;
+#ifndef CONFIG_USER_ONLY
+ /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
+ an async event happened and we need to process it. */
+ if (!can_do_io(env)
+ && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
+ cpu_abort(env, "Raised interrupt while not in I/O function");
+ }
+#endif
+ } else {
+ tb = env->current_tb;
+ /* if the cpu is currently executing code, we must unlink it and
+ all the potentially executing TB */
+ if (tb && !testandset(&interrupt_lock)) {
+ env->current_tb = NULL;
+ tb_reset_jump_recursive(tb);
+ resetlock(&interrupt_lock);
+ }
}
+#endif
}
void cpu_reset_interrupt(CPUState *env, int mask)
env->interrupt_request &= ~mask;
}
-CPULogItem cpu_log_items[] = {
+const CPULogItem cpu_log_items[] = {
{ CPU_LOG_TB_OUT_ASM, "out_asm",
"show generated host assembly code for each compiled TB" },
{ CPU_LOG_TB_IN_ASM, "in_asm",
/* takes a comma separated list of log masks. Return 0 if error. */
int cpu_str_to_log_mask(const char *str)
{
- CPULogItem *item;
+ const CPULogItem *item;
int mask;
const char *p, *p1;
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
#ifdef TARGET_I386
- if(env->intercept & INTERCEPT_SVM_MASK) {
- /* most probably the virtual machine should not
- be shut down but rather caught by the VMM */
- vmexit(SVM_EXIT_SHUTDOWN, 0);
- }
cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
#else
cpu_dump_state(env, stderr, fprintf, 0);
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
-#if !defined(CONFIG_SOFTMMU)
- munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
-#endif
#ifdef USE_KQEMU
if (env->kqemu_enabled) {
kqemu_flush(env, flush_global);
tlb_flush_jmp_cache(env, addr);
-#if !defined(CONFIG_SOFTMMU)
- if (addr < MMAP_AREA_END)
- munmap((void *)addr, TARGET_PAGE_SIZE);
-#endif
#ifdef USE_KQEMU
if (env->kqemu_enabled) {
kqemu_flush_page(env, addr);
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
if ((addr - start) < length) {
- tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
+ tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
}
}
}
#endif
#endif
}
+}
-#if !defined(CONFIG_SOFTMMU)
- /* XXX: this is expensive */
- {
- VirtPageDesc *p;
- int j;
- target_ulong addr;
+int cpu_physical_memory_set_dirty_tracking(int enable)
+{
+ in_migration = enable;
+ return 0;
+}
- for(i = 0; i < L1_SIZE; i++) {
- p = l1_virt_map[i];
- if (p) {
- addr = i << (TARGET_PAGE_BITS + L2_BITS);
- for(j = 0; j < L2_SIZE; j++) {
- if (p->valid_tag == virt_valid_tag &&
- p->phys_addr >= start && p->phys_addr < end &&
- (p->prot & PROT_WRITE)) {
- if (addr < MMAP_AREA_END) {
- mprotect((void *)addr, TARGET_PAGE_SIZE,
- p->prot & ~PROT_WRITE);
- }
- }
- addr += TARGET_PAGE_SIZE;
- p++;
- }
- }
- }
- }
-#endif
+int cpu_physical_memory_get_dirty_tracking(void)
+{
+ return in_migration;
}
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
tlb_entry->addend - (unsigned long)phys_ram_base;
if (!cpu_physical_memory_is_dirty(ram_addr)) {
- tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
+ tlb_entry->addr_write |= TLB_NOTDIRTY;
}
}
}
#endif
}
-static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
- unsigned long start)
+static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
{
- unsigned long addr;
- if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
- addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
- if (addr == start) {
- tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
- }
- }
+ if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
+ tlb_entry->addr_write = vaddr;
}
-/* update the TLB corresponding to virtual page vaddr and phys addr
- addr so that it is no longer dirty */
-static inline void tlb_set_dirty(CPUState *env,
- unsigned long addr, target_ulong vaddr)
+/* update the TLB corresponding to virtual page vaddr
+ so that it is no longer dirty */
+static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
{
int i;
- addr &= TARGET_PAGE_MASK;
+ vaddr &= TARGET_PAGE_MASK;
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- tlb_set_dirty1(&env->tlb_table[0][i], addr);
- tlb_set_dirty1(&env->tlb_table[1][i], addr);
+ tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
+ tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
#if (NB_MMU_MODES >= 3)
- tlb_set_dirty1(&env->tlb_table[2][i], addr);
+ tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
#if (NB_MMU_MODES == 4)
- tlb_set_dirty1(&env->tlb_table[3][i], addr);
+ tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
#endif
#endif
}
unsigned long pd;
unsigned int index;
target_ulong address;
+ target_ulong code_address;
target_phys_addr_t addend;
int ret;
CPUTLBEntry *te;
- int i;
+ CPUWatchpoint *wp;
+ target_phys_addr_t iotlb;
p = phys_page_find(paddr >> TARGET_PAGE_BITS);
if (!p) {
#endif
ret = 0;
-#if !defined(CONFIG_SOFTMMU)
- if (is_softmmu)
-#endif
- {
- if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
- /* IO memory case */
- address = vaddr | pd;
- addend = paddr;
- } else {
- /* standard memory */
- address = vaddr;
- addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
- }
-
- /* Make accesses to pages with watchpoints go via the
- watchpoint trap routines. */
- for (i = 0; i < env->nb_watchpoints; i++) {
- if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
- if (address & ~TARGET_PAGE_MASK) {
- env->watchpoint[i].addend = 0;
- address = vaddr | io_mem_watch;
- } else {
- env->watchpoint[i].addend = pd - paddr +
- (unsigned long) phys_ram_base;
- /* TODO: Figure out how to make read watchpoints coexist
- with code. */
- pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
- }
- }
- }
-
- index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- addend -= vaddr;
- te = &env->tlb_table[mmu_idx][index];
- te->addend = addend;
- if (prot & PAGE_READ) {
- te->addr_read = address;
- } else {
- te->addr_read = -1;
+ address = vaddr;
+ if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
+ /* IO memory case (romd handled later) */
+ address |= TLB_MMIO;
+ }
+ addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
+ if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
+ /* Normal RAM. */
+ iotlb = pd & TARGET_PAGE_MASK;
+ if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
+ iotlb |= IO_MEM_NOTDIRTY;
+ else
+ iotlb |= IO_MEM_ROM;
+ } else {
+ /* IO handlers are currently passed a phsical address.
+ It would be nice to pass an offset from the base address
+ of that region. This would avoid having to special case RAM,
+ and avoid full address decoding in every device.
+ We can't use the high bits of pd for this because
+ IO_MEM_ROMD uses these as a ram address. */
+ iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
+ }
+
+ code_address = address;
+ /* Make accesses to pages with watchpoints go via the
+ watchpoint trap routines. */
+ for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
+ if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
+ iotlb = io_mem_watch + paddr;
+ /* TODO: The memory case can be optimized by not trapping
+ reads of pages with a write breakpoint. */
+ address |= TLB_MMIO;
}
+ }
- if (te->addr_code != -1) {
- tlb_flush_jmp_cache(env, te->addr_code);
- }
- if (prot & PAGE_EXEC) {
- te->addr_code = address;
- } else {
- te->addr_code = -1;
- }
- if (prot & PAGE_WRITE) {
- if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
- (pd & IO_MEM_ROMD)) {
- /* write access calls the I/O callback */
- te->addr_write = vaddr |
- (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
- } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
- !cpu_physical_memory_is_dirty(pd)) {
- te->addr_write = vaddr | IO_MEM_NOTDIRTY;
- } else {
- te->addr_write = address;
- }
- } else {
- te->addr_write = -1;
- }
+ index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ env->iotlb[mmu_idx][index] = iotlb - vaddr;
+ te = &env->tlb_table[mmu_idx][index];
+ te->addend = addend - vaddr;
+ if (prot & PAGE_READ) {
+ te->addr_read = address;
+ } else {
+ te->addr_read = -1;
}
-#if !defined(CONFIG_SOFTMMU)
- else {
- if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
- /* IO access: no mapping is done as it will be handled by the
- soft MMU */
- if (!(env->hflags & HF_SOFTMMU_MASK))
- ret = 2;
- } else {
- void *map_addr;
- if (vaddr >= MMAP_AREA_END) {
- ret = 2;
- } else {
- if (prot & PROT_WRITE) {
- if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
-#if defined(TARGET_HAS_SMC) || 1
- first_tb ||
-#endif
- ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
- !cpu_physical_memory_is_dirty(pd))) {
- /* ROM: we do as if code was inside */
- /* if code is present, we only map as read only and save the
- original mapping */
- VirtPageDesc *vp;
-
- vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
- vp->phys_addr = pd;
- vp->prot = prot;
- vp->valid_tag = virt_valid_tag;
- prot &= ~PAGE_WRITE;
- }
- }
- map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
- MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
- if (map_addr == MAP_FAILED) {
- cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
- paddr, vaddr);
- }
- }
+ if (prot & PAGE_EXEC) {
+ te->addr_code = code_address;
+ } else {
+ te->addr_code = -1;
+ }
+ if (prot & PAGE_WRITE) {
+ if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
+ (pd & IO_MEM_ROMD)) {
+ /* Write access calls the I/O callback. */
+ te->addr_write = address | TLB_MMIO;
+ } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
+ !cpu_physical_memory_is_dirty(pd)) {
+ te->addr_write = address | TLB_NOTDIRTY;
+ } else {
+ te->addr_write = address;
}
+ } else {
+ te->addr_write = -1;
}
-#endif
return ret;
}
-/* called from signal handler: invalidate the code and unprotect the
- page. Return TRUE if the fault was succesfully handled. */
-int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
-{
-#if !defined(CONFIG_SOFTMMU)
- VirtPageDesc *vp;
-
-#if defined(DEBUG_TLB)
- printf("page_unprotect: addr=0x%08x\n", addr);
-#endif
- addr &= TARGET_PAGE_MASK;
-
- /* if it is not mapped, no need to worry here */
- if (addr >= MMAP_AREA_END)
- return 0;
- vp = virt_page_find(addr >> TARGET_PAGE_BITS);
- if (!vp)
- return 0;
- /* NOTE: in this case, validate_tag is _not_ tested as it
- validates only the code TLB */
- if (vp->valid_tag != virt_valid_tag)
- return 0;
- if (!(vp->prot & PAGE_WRITE))
- return 0;
-#if defined(DEBUG_TLB)
- printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
- addr, vp->phys_addr, vp->prot);
-#endif
- if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
- cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
- (unsigned long)addr, vp->prot);
- /* set the dirty bit */
- phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
- /* flush the code inside */
- tb_invalidate_phys_page(vp->phys_addr, pc, puc);
- return 1;
-#else
- return 0;
-#endif
-}
-
#else
void tlb_flush(CPUState *env, int flush_global)
PageDesc *p;
target_ulong addr;
+ /* mmap_lock should already be held. */
start = start & TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
if (flags & PAGE_WRITE)
flags |= PAGE_WRITE_ORG;
- spin_lock(&tb_lock);
for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
p = page_find_alloc(addr >> TARGET_PAGE_BITS);
+ /* We may be called for host regions that are outside guest
+ address space. */
+ if (!p)
+ return;
/* if the write protection is set, then we invalidate the code
inside */
if (!(p->flags & PAGE_WRITE) &&
}
p->flags = flags;
}
- spin_unlock(&tb_lock);
}
int page_check_range(target_ulong start, target_ulong len, int flags)
target_ulong end;
target_ulong addr;
+ if (start + len < start)
+ /* we've wrapped around */
+ return -1;
+
end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
start = start & TARGET_PAGE_MASK;
- if( end < start )
- /* we've wrapped around */
- return -1;
for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
p = page_find(addr >> TARGET_PAGE_BITS);
if( !p )
PageDesc *p, *p1;
target_ulong host_start, host_end, addr;
+ /* Technically this isn't safe inside a signal handler. However we
+ know this only ever happens in a synchronous SEGV handler, so in
+ practice it seems to be ok. */
+ mmap_lock();
+
host_start = address & qemu_host_page_mask;
page_index = host_start >> TARGET_PAGE_BITS;
p1 = page_find(page_index);
- if (!p1)
+ if (!p1) {
+ mmap_unlock();
return 0;
+ }
host_end = host_start + qemu_host_page_size;
p = p1;
prot = 0;
#ifdef DEBUG_TB_CHECK
tb_invalidate_check(address);
#endif
+ mmap_unlock();
return 1;
}
}
+ mmap_unlock();
return 0;
}
}
#endif /* defined(CONFIG_USER_ONLY) */
+#if !defined(CONFIG_USER_ONLY)
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
ram_addr_t memory);
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
ram_addr_t orig_size = size;
void *subpage;
+#ifdef USE_KQEMU
+ /* XXX: should not depend on cpu context */
+ env = first_cpu;
+ if (env->kqemu_enabled) {
+ kqemu_set_phys_mem(start_addr, size, phys_offset);
+ }
+#endif
+ if (kvm_enabled())
+ kvm_set_phys_mem(start_addr, size, phys_offset);
+
size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
end_addr = start_addr + (target_phys_addr_t)size;
for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
{
ram_addr_t addr;
if ((phys_ram_alloc_offset + size) > phys_ram_size) {
- fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
- size, phys_ram_size);
+ fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
+ (uint64_t)size, (uint64_t)phys_ram_size);
abort();
}
addr = phys_ram_alloc_offset;
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
-#ifdef TARGET_SPARC
- do_unassigned_access(addr, 0, 0, 0);
-#elif TARGET_CRIS
- do_unassigned_access(addr, 0, 0, 0);
+#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+ do_unassigned_access(addr, 0, 0, 0, 1);
+#endif
+ return 0;
+}
+
+static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
+{
+#ifdef DEBUG_UNASSIGNED
+ printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
+#endif
+#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+ do_unassigned_access(addr, 0, 0, 0, 2);
+#endif
+ return 0;
+}
+
+static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
+{
+#ifdef DEBUG_UNASSIGNED
+ printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
+#endif
+#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+ do_unassigned_access(addr, 0, 0, 0, 4);
#endif
return 0;
}
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
-#ifdef TARGET_SPARC
- do_unassigned_access(addr, 1, 0, 0);
-#elif TARGET_CRIS
- do_unassigned_access(addr, 1, 0, 0);
+#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+ do_unassigned_access(addr, 1, 0, 0, 1);
+#endif
+}
+
+static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
+{
+#ifdef DEBUG_UNASSIGNED
+ printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
+#endif
+#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+ do_unassigned_access(addr, 1, 0, 0, 2);
+#endif
+}
+
+static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
+{
+#ifdef DEBUG_UNASSIGNED
+ printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
+#endif
+#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+ do_unassigned_access(addr, 1, 0, 0, 4);
#endif
}
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
unassigned_mem_readb,
- unassigned_mem_readb,
- unassigned_mem_readb,
+ unassigned_mem_readw,
+ unassigned_mem_readl,
};
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
unassigned_mem_writeb,
- unassigned_mem_writeb,
- unassigned_mem_writeb,
+ unassigned_mem_writew,
+ unassigned_mem_writel,
};
-static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
+static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
+ uint32_t val)
{
- unsigned long ram_addr;
int dirty_flags;
- ram_addr = addr - (unsigned long)phys_ram_base;
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
#endif
}
- stb_p((uint8_t *)(long)addr, val);
+ stb_p(phys_ram_base + ram_addr, val);
#ifdef USE_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
- tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
+ tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
}
-static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
+static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
+ uint32_t val)
{
- unsigned long ram_addr;
int dirty_flags;
- ram_addr = addr - (unsigned long)phys_ram_base;
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
#endif
}
- stw_p((uint8_t *)(long)addr, val);
+ stw_p(phys_ram_base + ram_addr, val);
#ifdef USE_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
- tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
+ tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
}
-static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
+static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
+ uint32_t val)
{
- unsigned long ram_addr;
int dirty_flags;
- ram_addr = addr - (unsigned long)phys_ram_base;
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
#endif
}
- stl_p((uint8_t *)(long)addr, val);
+ stl_p(phys_ram_base + ram_addr, val);
#ifdef USE_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
- tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
+ tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
}
static CPUReadMemoryFunc *error_mem_read[3] = {
notdirty_mem_writel,
};
-#if defined(CONFIG_SOFTMMU)
+/* Generate a debug exception if a watchpoint has been hit. */
+static void check_watchpoint(int offset, int len_mask, int flags)
+{
+ CPUState *env = cpu_single_env;
+ target_ulong pc, cs_base;
+ TranslationBlock *tb;
+ target_ulong vaddr;
+ CPUWatchpoint *wp;
+ int cpu_flags;
+
+ if (env->watchpoint_hit) {
+ /* We re-entered the check after replacing the TB. Now raise
+ * the debug interrupt so that is will trigger after the
+ * current instruction. */
+ cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
+ return;
+ }
+ vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
+ for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
+ if ((vaddr == (wp->vaddr & len_mask) ||
+ (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
+ wp->flags |= BP_WATCHPOINT_HIT;
+ if (!env->watchpoint_hit) {
+ env->watchpoint_hit = wp;
+ tb = tb_find_pc(env->mem_io_pc);
+ if (!tb) {
+ cpu_abort(env, "check_watchpoint: could not find TB for "
+ "pc=%p", (void *)env->mem_io_pc);
+ }
+ cpu_restore_state(tb, env, env->mem_io_pc, NULL);
+ tb_phys_invalidate(tb, -1);
+ if (wp->flags & BP_STOP_BEFORE_ACCESS) {
+ env->exception_index = EXCP_DEBUG;
+ } else {
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
+ tb_gen_code(env, pc, cs_base, cpu_flags, 1);
+ }
+ cpu_resume_from_signal(env, NULL);
+ }
+ } else {
+ wp->flags &= ~BP_WATCHPOINT_HIT;
+ }
+ }
+}
+
/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
so these check for a hit then pass through to the normal out-of-line
phys routines. */
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
{
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
return ldub_phys(addr);
}
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
{
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
return lduw_phys(addr);
}
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
{
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
return ldl_phys(addr);
}
-/* Generate a debug exception if a watchpoint has been hit.
- Returns the real physical address of the access. addr will be a host
- address in case of a RAM location. */
-static target_ulong check_watchpoint(target_phys_addr_t addr)
-{
- CPUState *env = cpu_single_env;
- target_ulong watch;
- target_ulong retaddr;
- int i;
-
- retaddr = addr;
- for (i = 0; i < env->nb_watchpoints; i++) {
- watch = env->watchpoint[i].vaddr;
- if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
- retaddr = addr - env->watchpoint[i].addend;
- if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
- cpu_single_env->watchpoint_hit = i + 1;
- cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
- break;
- }
- }
- }
- return retaddr;
-}
-
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
- addr = check_watchpoint(addr);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
stb_phys(addr, val);
}
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
- addr = check_watchpoint(addr);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
stw_phys(addr, val);
}
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
- addr = check_watchpoint(addr);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
stl_phys(addr, val);
}
watch_mem_writew,
watch_mem_writel,
};
-#endif
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
unsigned int len)
cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
io_mem_nb = 5;
-#if defined(CONFIG_SOFTMMU)
- io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
+ io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
watch_mem_write, NULL);
-#endif
/* alloc dirty bits array */
phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
return io_mem_read[io_index >> IO_MEM_SHIFT];
}
+#endif /* !defined(CONFIG_USER_ONLY) */
+
/* physical memory access (slow version, mainly for debug) */
#if defined(CONFIG_USER_ONLY)
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
} else {
- ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
- (addr & ~TARGET_PAGE_MASK);
+ unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ ptr = phys_ram_base + addr1;
stl_p(ptr, val);
+
+ if (unlikely(in_migration)) {
+ if (!cpu_physical_memory_is_dirty(addr1)) {
+ /* invalidate code */
+ tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
+ /* set dirty bit */
+ phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
+ (0xff & ~CODE_DIRTY_FLAG);
+ }
+ }
}
}
return 0;
}
+/* in deterministic execution mode, instructions doing device I/Os
+ must be at the end of the TB */
+void cpu_io_recompile(CPUState *env, void *retaddr)
+{
+ TranslationBlock *tb;
+ uint32_t n, cflags;
+ target_ulong pc, cs_base;
+ uint64_t flags;
+
+ tb = tb_find_pc((unsigned long)retaddr);
+ if (!tb) {
+ cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
+ retaddr);
+ }
+ n = env->icount_decr.u16.low + tb->icount;
+ cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
+ /* Calculate how many instructions had been executed before the fault
+ occurred. */
+ n = n - env->icount_decr.u16.low;
+ /* Generate a new TB ending on the I/O insn. */
+ n++;
+ /* On MIPS and SH, delay slot instructions can only be restarted if
+ they were already the first instruction in the TB. If this is not
+ the first instruction in a TB then re-execute the preceding
+ branch. */
+#if defined(TARGET_MIPS)
+ if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
+ env->active_tc.PC -= 4;
+ env->icount_decr.u16.low++;
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ }
+#elif defined(TARGET_SH4)
+ if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
+ && n > 1) {
+ env->pc -= 2;
+ env->icount_decr.u16.low++;
+ env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
+ }
+#endif
+ /* This should never happen. */
+ if (n > CF_COUNT_MASK)
+ cpu_abort(env, "TB too big during recompile");
+
+ cflags = n | CF_LAST_IO;
+ pc = tb->pc;
+ cs_base = tb->cs_base;
+ flags = tb->flags;
+ tb_phys_invalidate(tb, -1);
+ /* FIXME: In theory this could raise an exception. In practice
+ we have already translated the block once so it's probably ok. */
+ tb_gen_code(env, pc, cs_base, flags, cflags);
+ /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
+ the first in the TB) then we end up generating a whole new TB and
+ repeating the fault, which is horribly inefficient.
+ Better would be to execute just this insn uncached, or generate a
+ second new TB. */
+ cpu_resume_from_signal(env, NULL);
+}
+
void dump_exec_info(FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
{
}
/* XXX: avoid using doubles ? */
cpu_fprintf(f, "Translation buffer state:\n");
- cpu_fprintf(f, "TB count %d\n", nb_tbs);
+ cpu_fprintf(f, "gen code size %ld/%ld\n",
+ code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
+ cpu_fprintf(f, "TB count %d/%d\n",
+ nb_tbs, code_gen_max_blocks);
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
nb_tbs ? target_code_size / nb_tbs : 0,
max_target_code_size);
cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
-#ifdef CONFIG_PROFILER
- {
- int64_t tot;
- tot = dyngen_interm_time + dyngen_code_time;
- cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
- tot, tot / 2.4e9);
- cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
- dyngen_tb_count,
- dyngen_tb_count1 - dyngen_tb_count,
- dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
- cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
- dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
- cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
- dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
- cpu_fprintf(f, "deleted ops/TB %0.2f\n",
- dyngen_tb_count ?
- (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
- cpu_fprintf(f, "cycles/op %0.1f\n",
- dyngen_op_count ? (double)tot / dyngen_op_count : 0);
- cpu_fprintf(f, "cycles/in byte %0.1f\n",
- dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
- cpu_fprintf(f, "cycles/out byte %0.1f\n",
- dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
- if (tot == 0)
- tot = 1;
- cpu_fprintf(f, " gen_interm time %0.1f%%\n",
- (double)dyngen_interm_time / tot * 100.0);
- cpu_fprintf(f, " gen_code time %0.1f%%\n",
- (double)dyngen_code_time / tot * 100.0);
- cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
- dyngen_restore_count);
- cpu_fprintf(f, " avg cycles %0.1f\n",
- dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
- {
- extern void dump_op_count(void);
- dump_op_count();
- }
- }
-#endif
+ tcg_dump_info(f, cpu_fprintf);
}
#if !defined(CONFIG_USER_ONLY)