#include "qemu-common.h"
#include "tcg.h"
#include "hw/hw.h"
+#include "osdep.h"
+#include "kvm.h"
#if defined(CONFIG_USER_ONLY)
#include <qemu.h>
#endif
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#endif
-TranslationBlock *tbs;
+static TranslationBlock *tbs;
int code_gen_max_blocks;
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
-int nb_tbs;
+static int nb_tbs;
/* any access to the tbs or the page table must use this lock */
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
-uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
-uint8_t *code_gen_buffer;
-unsigned long code_gen_buffer_size;
+#if defined(__arm__) || defined(__sparc_v9__)
+/* The prologue must be reachable with a direct jump. ARM and Sparc64
+ have limited branch ranges (possibly also PPC) so place it in a
+ section close to code segment. */
+#define code_gen_section \
+ __attribute__((__section__(".gen_code"))) \
+ __attribute__((aligned (32)))
+#else
+#define code_gen_section \
+ __attribute__((aligned (32)))
+#endif
+
+uint8_t code_gen_prologue[1024] code_gen_section;
+static uint8_t *code_gen_buffer;
+static unsigned long code_gen_buffer_size;
/* threshold to flush the translated code buffer */
-unsigned long code_gen_buffer_max_size;
+static unsigned long code_gen_buffer_max_size;
uint8_t *code_gen_ptr;
#if !defined(CONFIG_USER_ONLY)
int phys_ram_fd;
uint8_t *phys_ram_base;
uint8_t *phys_ram_dirty;
+static int in_migration;
static ram_addr_t phys_ram_alloc_offset = 0;
#endif
/* XXX: for system emulation, it could just be an array */
static PageDesc *l1_map[L1_SIZE];
-PhysPageDesc **l1_phys_map;
+static PhysPageDesc **l1_phys_map;
#if !defined(CONFIG_USER_ONLY)
static void io_mem_init(void);
#endif
/* log support */
-char *logfilename = "/tmp/qemu.log";
+static const char *logfilename = "/tmp/qemu.log";
FILE *logfile;
int loglevel;
static int log_append = 0;
#ifdef _WIN32
{
SYSTEM_INFO system_info;
- DWORD old_protect;
GetSystemInfo(&system_info);
qemu_real_host_page_size = system_info.dwPageSize;
#endif
}
-static inline PageDesc *page_find_alloc(target_ulong index)
+static inline PageDesc **page_l1_map(target_ulong index)
{
- PageDesc **lp, *p;
-
#if TARGET_LONG_BITS > 32
/* Host memory outside guest VM. For 32-bit targets we have already
excluded high addresses. */
- if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
+ if (index > ((target_ulong)L2_SIZE * L1_SIZE))
return NULL;
#endif
- lp = &l1_map[index >> L2_BITS];
+ return &l1_map[index >> L2_BITS];
+}
+
+static inline PageDesc *page_find_alloc(target_ulong index)
+{
+ PageDesc **lp, *p;
+ lp = page_l1_map(index);
+ if (!lp)
+ return NULL;
+
p = *lp;
if (!p) {
/* allocate if not found */
static inline PageDesc *page_find(target_ulong index)
{
- PageDesc *p;
+ PageDesc **lp, *p;
+ lp = page_l1_map(index);
+ if (!lp)
+ return NULL;
- p = l1_map[index >> L2_BITS];
+ p = *lp;
if (!p)
return 0;
return p + (index & (L2_SIZE - 1));
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
#endif
-void code_gen_alloc(unsigned long tb_size)
+static void code_gen_alloc(unsigned long tb_size)
{
#ifdef USE_STATIC_CODE_GEN_BUFFER
code_gen_buffer = static_code_gen_buffer;
code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
#else
/* XXX: needs ajustments */
- code_gen_buffer_size = (int)(phys_ram_size / 4);
+ code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
#endif
}
if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
#if defined(__linux__)
{
int flags;
+ void *start = NULL;
+
flags = MAP_PRIVATE | MAP_ANONYMOUS;
#if defined(__x86_64__)
flags |= MAP_32BIT;
/* Cannot map more than that */
if (code_gen_buffer_size > (800 * 1024 * 1024))
code_gen_buffer_size = (800 * 1024 * 1024);
+#elif defined(__sparc_v9__)
+ // Map the buffer below 2G, so we can use direct calls and branches
+ flags |= MAP_FIXED;
+ start = (void *) 0x60000000UL;
+ if (code_gen_buffer_size > (512 * 1024 * 1024))
+ code_gen_buffer_size = (512 * 1024 * 1024);
+#endif
+ code_gen_buffer = mmap(start, code_gen_buffer_size,
+ PROT_WRITE | PROT_READ | PROT_EXEC,
+ flags, -1, 0);
+ if (code_gen_buffer == MAP_FAILED) {
+ fprintf(stderr, "Could not allocate dynamic translator buffer\n");
+ exit(1);
+ }
+ }
+#elif defined(__FreeBSD__)
+ {
+ int flags;
+ void *addr = NULL;
+ flags = MAP_PRIVATE | MAP_ANONYMOUS;
+#if defined(__x86_64__)
+ /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
+ * 0x40000000 is free */
+ flags |= MAP_FIXED;
+ addr = (void *)0x40000000;
+ /* Cannot map more than that */
+ if (code_gen_buffer_size > (800 * 1024 * 1024))
+ code_gen_buffer_size = (800 * 1024 * 1024);
#endif
- code_gen_buffer = mmap(NULL, code_gen_buffer_size,
+ code_gen_buffer = mmap(addr, code_gen_buffer_size,
PROT_WRITE | PROT_READ | PROT_EXEC,
flags, -1, 0);
if (code_gen_buffer == MAP_FAILED) {
return -EINVAL;
qemu_get_be32s(f, &env->halted);
- qemu_put_be32s(f, &env->interrupt_request);
+ qemu_get_be32s(f, &env->interrupt_request);
tlb_flush(env, 1);
return 0;
cpu_index++;
}
env->cpu_index = cpu_index;
- env->nb_watchpoints = 0;
*penv = env;
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
}
}
-void tb_jmp_check(TranslationBlock *tb)
+static void tb_jmp_check(TranslationBlock *tb)
{
TranslationBlock *tb1;
unsigned int n1;
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
int is_cpu_write_access)
{
- int n, current_tb_modified, current_tb_not_found, current_flags;
+ TranslationBlock *tb, *tb_next, *saved_tb;
CPUState *env = cpu_single_env;
- PageDesc *p;
- TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
target_ulong tb_start, tb_end;
- target_ulong current_pc, current_cs_base;
+ PageDesc *p;
+ int n;
+#ifdef TARGET_HAS_PRECISE_SMC
+ int current_tb_not_found = is_cpu_write_access;
+ TranslationBlock *current_tb = NULL;
+ int current_tb_modified = 0;
+ target_ulong current_pc = 0;
+ target_ulong current_cs_base = 0;
+ int current_flags = 0;
+#endif /* TARGET_HAS_PRECISE_SMC */
p = page_find(start >> TARGET_PAGE_BITS);
if (!p)
/* we remove all the TBs in the range [start, end[ */
/* XXX: see if in some cases it could be faster to invalidate all the code */
- current_tb_not_found = is_cpu_write_access;
- current_tb_modified = 0;
- current_tb = NULL; /* avoid warning */
- current_pc = 0; /* avoid warning */
- current_cs_base = 0; /* avoid warning */
- current_flags = 0; /* avoid warning */
tb = p->first_tb;
while (tb != NULL) {
n = (long)tb & 3;
current_tb_modified = 1;
cpu_restore_state(current_tb, env,
env->mem_io_pc, NULL);
-#if defined(TARGET_I386)
- current_flags = env->hflags;
- current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
- current_cs_base = (target_ulong)env->segs[R_CS].base;
- current_pc = current_cs_base + env->eip;
-#else
-#error unsupported CPU
-#endif
+ cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
+ ¤t_flags);
}
#endif /* TARGET_HAS_PRECISE_SMC */
/* we need to do that to handle the case where a signal
static void tb_invalidate_phys_page(target_phys_addr_t addr,
unsigned long pc, void *puc)
{
- int n, current_flags, current_tb_modified;
- target_ulong current_pc, current_cs_base;
+ TranslationBlock *tb;
PageDesc *p;
- TranslationBlock *tb, *current_tb;
+ int n;
#ifdef TARGET_HAS_PRECISE_SMC
+ TranslationBlock *current_tb = NULL;
CPUState *env = cpu_single_env;
+ int current_tb_modified = 0;
+ target_ulong current_pc = 0;
+ target_ulong current_cs_base = 0;
+ int current_flags = 0;
#endif
addr &= TARGET_PAGE_MASK;
if (!p)
return;
tb = p->first_tb;
- current_tb_modified = 0;
- current_tb = NULL;
- current_pc = 0; /* avoid warning */
- current_cs_base = 0; /* avoid warning */
- current_flags = 0; /* avoid warning */
#ifdef TARGET_HAS_PRECISE_SMC
if (tb && pc != 0) {
current_tb = tb_find_pc(pc);
current_tb_modified = 1;
cpu_restore_state(current_tb, env, pc, puc);
-#if defined(TARGET_I386)
- current_flags = env->hflags;
- current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
- current_cs_base = (target_ulong)env->segs[R_CS].base;
- current_pc = current_cs_base + env->eip;
-#else
-#error unsupported CPU
-#endif
+ cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
+ ¤t_flags);
}
#endif /* TARGET_HAS_PRECISE_SMC */
tb_phys_invalidate(tb, addr);
#endif
/* Add a watchpoint. */
-int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
+int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
+ int flags, CPUWatchpoint **watchpoint)
{
- int i;
+ target_ulong len_mask = ~(len - 1);
+ CPUWatchpoint *wp, *prev_wp;
- for (i = 0; i < env->nb_watchpoints; i++) {
- if (addr == env->watchpoint[i].vaddr)
- return 0;
+ /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
+ if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
+ fprintf(stderr, "qemu: tried to set invalid watchpoint at "
+ TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
+ return -EINVAL;
}
- if (env->nb_watchpoints >= MAX_WATCHPOINTS)
- return -1;
+ wp = qemu_malloc(sizeof(*wp));
+ if (!wp)
+ return -ENOMEM;
+
+ wp->vaddr = addr;
+ wp->len_mask = len_mask;
+ wp->flags = flags;
+
+ /* keep all GDB-injected watchpoints in front */
+ if (!(flags & BP_GDB) && env->watchpoints) {
+ prev_wp = env->watchpoints;
+ while (prev_wp->next != NULL && (prev_wp->next->flags & BP_GDB))
+ prev_wp = prev_wp->next;
+ } else {
+ prev_wp = NULL;
+ }
+
+ /* Insert new watchpoint */
+ if (prev_wp) {
+ wp->next = prev_wp->next;
+ prev_wp->next = wp;
+ } else {
+ wp->next = env->watchpoints;
+ env->watchpoints = wp;
+ }
+ if (wp->next)
+ wp->next->prev = wp;
+ wp->prev = prev_wp;
- i = env->nb_watchpoints++;
- env->watchpoint[i].vaddr = addr;
- env->watchpoint[i].type = type;
tlb_flush_page(env, addr);
- /* FIXME: This flush is needed because of the hack to make memory ops
- terminate the TB. It can be removed once the proper IO trap and
- re-execute bits are in. */
- tb_flush(env);
- return i;
+
+ if (watchpoint)
+ *watchpoint = wp;
+ return 0;
}
-/* Remove a watchpoint. */
-int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
+/* Remove a specific watchpoint. */
+int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
+ int flags)
{
- int i;
+ target_ulong len_mask = ~(len - 1);
+ CPUWatchpoint *wp;
- for (i = 0; i < env->nb_watchpoints; i++) {
- if (addr == env->watchpoint[i].vaddr) {
- env->nb_watchpoints--;
- env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
- tlb_flush_page(env, addr);
+ for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
+ if (addr == wp->vaddr && len_mask == wp->len_mask
+ && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
+ cpu_watchpoint_remove_by_ref(env, wp);
return 0;
}
}
- return -1;
+ return -ENOENT;
}
-/* Remove all watchpoints. */
-void cpu_watchpoint_remove_all(CPUState *env) {
- int i;
+/* Remove a specific watchpoint by reference. */
+void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
+{
+ if (watchpoint->next)
+ watchpoint->next->prev = watchpoint->prev;
+ if (watchpoint->prev)
+ watchpoint->prev->next = watchpoint->next;
+ else
+ env->watchpoints = watchpoint->next;
- for (i = 0; i < env->nb_watchpoints; i++) {
- tlb_flush_page(env, env->watchpoint[i].vaddr);
- }
- env->nb_watchpoints = 0;
+ tlb_flush_page(env, watchpoint->vaddr);
+
+ qemu_free(watchpoint);
}
-/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
- breakpoint is reached */
-int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
+/* Remove all matching watchpoints. */
+void cpu_watchpoint_remove_all(CPUState *env, int mask)
+{
+ CPUWatchpoint *wp;
+
+ for (wp = env->watchpoints; wp != NULL; wp = wp->next)
+ if (wp->flags & mask)
+ cpu_watchpoint_remove_by_ref(env, wp);
+}
+
+/* Add a breakpoint. */
+int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
+ CPUBreakpoint **breakpoint)
{
#if defined(TARGET_HAS_ICE)
- int i;
+ CPUBreakpoint *bp, *prev_bp;
- for(i = 0; i < env->nb_breakpoints; i++) {
- if (env->breakpoints[i] == pc)
- return 0;
+ bp = qemu_malloc(sizeof(*bp));
+ if (!bp)
+ return -ENOMEM;
+
+ bp->pc = pc;
+ bp->flags = flags;
+
+ /* keep all GDB-injected breakpoints in front */
+ if (!(flags & BP_GDB) && env->breakpoints) {
+ prev_bp = env->breakpoints;
+ while (prev_bp->next != NULL && (prev_bp->next->flags & BP_GDB))
+ prev_bp = prev_bp->next;
+ } else {
+ prev_bp = NULL;
}
- if (env->nb_breakpoints >= MAX_BREAKPOINTS)
- return -1;
- env->breakpoints[env->nb_breakpoints++] = pc;
+ /* Insert new breakpoint */
+ if (prev_bp) {
+ bp->next = prev_bp->next;
+ prev_bp->next = bp;
+ } else {
+ bp->next = env->breakpoints;
+ env->breakpoints = bp;
+ }
+ if (bp->next)
+ bp->next->prev = bp;
+ bp->prev = prev_bp;
breakpoint_invalidate(env, pc);
+
+ if (breakpoint)
+ *breakpoint = bp;
return 0;
#else
- return -1;
+ return -ENOSYS;
#endif
}
-/* remove all breakpoints */
-void cpu_breakpoint_remove_all(CPUState *env) {
+/* Remove a specific breakpoint. */
+int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
+{
#if defined(TARGET_HAS_ICE)
- int i;
- for(i = 0; i < env->nb_breakpoints; i++) {
- breakpoint_invalidate(env, env->breakpoints[i]);
+ CPUBreakpoint *bp;
+
+ for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
+ if (bp->pc == pc && bp->flags == flags) {
+ cpu_breakpoint_remove_by_ref(env, bp);
+ return 0;
+ }
}
- env->nb_breakpoints = 0;
+ return -ENOENT;
+#else
+ return -ENOSYS;
#endif
}
-/* remove a breakpoint */
-int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
+/* Remove a specific breakpoint by reference. */
+void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
{
#if defined(TARGET_HAS_ICE)
- int i;
- for(i = 0; i < env->nb_breakpoints; i++) {
- if (env->breakpoints[i] == pc)
- goto found;
- }
- return -1;
- found:
- env->nb_breakpoints--;
- if (i < env->nb_breakpoints)
- env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
+ if (breakpoint->next)
+ breakpoint->next->prev = breakpoint->prev;
+ if (breakpoint->prev)
+ breakpoint->prev->next = breakpoint->next;
+ else
+ env->breakpoints = breakpoint->next;
- breakpoint_invalidate(env, pc);
- return 0;
-#else
- return -1;
+ breakpoint_invalidate(env, breakpoint->pc);
+
+ qemu_free(breakpoint);
+#endif
+}
+
+/* Remove all matching breakpoints. */
+void cpu_breakpoint_remove_all(CPUState *env, int mask)
+{
+#if defined(TARGET_HAS_ICE)
+ CPUBreakpoint *bp;
+
+ for (bp = env->breakpoints; bp != NULL; bp = bp->next)
+ if (bp->flags & mask)
+ cpu_breakpoint_remove_by_ref(env, bp);
#endif
}
#if !defined(CONFIG_SOFTMMU)
/* must avoid mmap() usage of glibc by setting a buffer "by hand" */
{
- static uint8_t logfile_buf[4096];
+ static char logfile_buf[4096];
setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
}
#else
signals are used primarily to interrupt blocking syscalls. */
#else
if (use_icount) {
- env->icount_decr.u16.high = 0x8000;
+ env->icount_decr.u16.high = 0xffff;
#ifndef CONFIG_USER_ONLY
/* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
an async event happened and we need to process it. */
env->interrupt_request &= ~mask;
}
-CPULogItem cpu_log_items[] = {
+const CPULogItem cpu_log_items[] = {
{ CPU_LOG_TB_OUT_ASM, "out_asm",
"show generated host assembly code for each compiled TB" },
{ CPU_LOG_TB_IN_ASM, "in_asm",
/* takes a comma separated list of log masks. Return 0 if error. */
int cpu_str_to_log_mask(const char *str)
{
- CPULogItem *item;
+ const CPULogItem *item;
int mask;
const char *p, *p1;
}
}
+int cpu_physical_memory_set_dirty_tracking(int enable)
+{
+ in_migration = enable;
+ return 0;
+}
+
+int cpu_physical_memory_get_dirty_tracking(void)
+{
+ return in_migration;
+}
+
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
{
ram_addr_t ram_addr;
target_phys_addr_t addend;
int ret;
CPUTLBEntry *te;
- int i;
+ CPUWatchpoint *wp;
target_phys_addr_t iotlb;
p = phys_page_find(paddr >> TARGET_PAGE_BITS);
code_address = address;
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
- for (i = 0; i < env->nb_watchpoints; i++) {
- if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
+ for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
+ if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
iotlb = io_mem_watch + paddr;
/* TODO: The memory case can be optimized by not trapping
reads of pages with a write breakpoint. */
target_ulong end;
target_ulong addr;
+ if (start + len < start)
+ /* we've wrapped around */
+ return -1;
+
end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
start = start & TARGET_PAGE_MASK;
- if( end < start )
- /* we've wrapped around */
- return -1;
for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
p = page_find(addr >> TARGET_PAGE_BITS);
if( !p )
kqemu_set_phys_mem(start_addr, size, phys_offset);
}
#endif
+ if (kvm_enabled())
+ kvm_set_phys_mem(start_addr, size, phys_offset);
+
size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
end_addr = start_addr + (target_phys_addr_t)size;
for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
{
ram_addr_t addr;
if ((phys_ram_alloc_offset + size) > phys_ram_size) {
- fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
+ fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
(uint64_t)size, (uint64_t)phys_ram_size);
abort();
}
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
-#ifdef TARGET_SPARC
- do_unassigned_access(addr, 0, 0, 0);
-#elif TARGET_CRIS
- do_unassigned_access(addr, 0, 0, 0);
+#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+ do_unassigned_access(addr, 0, 0, 0, 1);
+#endif
+ return 0;
+}
+
+static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
+{
+#ifdef DEBUG_UNASSIGNED
+ printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
+#endif
+#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+ do_unassigned_access(addr, 0, 0, 0, 2);
+#endif
+ return 0;
+}
+
+static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
+{
+#ifdef DEBUG_UNASSIGNED
+ printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
+#endif
+#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+ do_unassigned_access(addr, 0, 0, 0, 4);
#endif
return 0;
}
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
-#ifdef TARGET_SPARC
- do_unassigned_access(addr, 1, 0, 0);
-#elif TARGET_CRIS
- do_unassigned_access(addr, 1, 0, 0);
+#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+ do_unassigned_access(addr, 1, 0, 0, 1);
+#endif
+}
+
+static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
+{
+#ifdef DEBUG_UNASSIGNED
+ printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
+#endif
+#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+ do_unassigned_access(addr, 1, 0, 0, 2);
+#endif
+}
+
+static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
+{
+#ifdef DEBUG_UNASSIGNED
+ printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
+#endif
+#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+ do_unassigned_access(addr, 1, 0, 0, 4);
#endif
}
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
unassigned_mem_readb,
- unassigned_mem_readb,
- unassigned_mem_readb,
+ unassigned_mem_readw,
+ unassigned_mem_readl,
};
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
unassigned_mem_writeb,
- unassigned_mem_writeb,
- unassigned_mem_writeb,
+ unassigned_mem_writew,
+ unassigned_mem_writel,
};
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
};
/* Generate a debug exception if a watchpoint has been hit. */
-static void check_watchpoint(int offset, int flags)
+static void check_watchpoint(int offset, int len_mask, int flags)
{
CPUState *env = cpu_single_env;
+ target_ulong pc, cs_base;
+ TranslationBlock *tb;
target_ulong vaddr;
- int i;
-
+ CPUWatchpoint *wp;
+ int cpu_flags;
+
+ if (env->watchpoint_hit) {
+ /* We re-entered the check after replacing the TB. Now raise
+ * the debug interrupt so that is will trigger after the
+ * current instruction. */
+ cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
+ return;
+ }
vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
- for (i = 0; i < env->nb_watchpoints; i++) {
- if (vaddr == env->watchpoint[i].vaddr
- && (env->watchpoint[i].type & flags)) {
- env->watchpoint_hit = i + 1;
- cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
- break;
+ for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
+ if ((vaddr == (wp->vaddr & len_mask) ||
+ (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
+ wp->flags |= BP_WATCHPOINT_HIT;
+ if (!env->watchpoint_hit) {
+ env->watchpoint_hit = wp;
+ tb = tb_find_pc(env->mem_io_pc);
+ if (!tb) {
+ cpu_abort(env, "check_watchpoint: could not find TB for "
+ "pc=%p", (void *)env->mem_io_pc);
+ }
+ cpu_restore_state(tb, env, env->mem_io_pc, NULL);
+ tb_phys_invalidate(tb, -1);
+ if (wp->flags & BP_STOP_BEFORE_ACCESS) {
+ env->exception_index = EXCP_DEBUG;
+ } else {
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
+ tb_gen_code(env, pc, cs_base, cpu_flags, 1);
+ }
+ cpu_resume_from_signal(env, NULL);
+ }
+ } else {
+ wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
}
phys routines. */
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
return ldub_phys(addr);
}
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
return lduw_phys(addr);
}
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
return ldl_phys(addr);
}
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
stb_phys(addr, val);
}
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
stw_phys(addr, val);
}
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
stl_phys(addr, val);
}
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
} else {
- ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
- (addr & ~TARGET_PAGE_MASK);
+ unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ ptr = phys_ram_base + addr1;
stl_p(ptr, val);
+
+ if (unlikely(in_migration)) {
+ if (!cpu_physical_memory_is_dirty(addr1)) {
+ /* invalidate code */
+ tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
+ /* set dirty bit */
+ phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
+ (0xff & ~CODE_DIRTY_FLAG);
+ }
+ }
}
}