*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
*/
#include "config.h"
#ifdef _WIN32
-#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#else
#include <sys/types.h>
#define SMC_BITMAP_USE_THRESHOLD 10
-#define MMAP_AREA_START 0x00000000
-#define MMAP_AREA_END 0xa8000000
-
#if defined(TARGET_SPARC64)
#define TARGET_PHYS_ADDR_SPACE_BITS 41
#elif defined(TARGET_SPARC)
typedef struct PhysPageDesc {
/* offset in host memory of the page + io_index in the low bits */
ram_addr_t phys_offset;
+ ram_addr_t region_offset;
} PhysPageDesc;
#define L2_BITS 10
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
-static int io_mem_nb;
+static char io_mem_used[IO_MEM_NB_ENTRIES];
static int io_mem_watch;
#endif
CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
void *opaque[TARGET_PAGE_SIZE][2][4];
+ ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
} subpage_t;
#ifdef _WIN32
{
/* NOTE: we can always suppose that qemu_host_page_size >=
TARGET_PAGE_SIZE */
- qemu_real_host_page_size = qemu_getpagesize();
+#ifdef _WIN32
+ {
+ SYSTEM_INFO system_info;
+
+ GetSystemInfo(&system_info);
+ qemu_real_host_page_size = system_info.dwPageSize;
+ }
+#else
+ qemu_real_host_page_size = getpagesize();
+#endif
if (qemu_host_page_size == 0)
qemu_host_page_size = qemu_real_host_page_size;
if (qemu_host_page_size < TARGET_PAGE_SIZE)
if (!p) {
/* allocate if not found */
#if defined(CONFIG_USER_ONLY)
- unsigned long addr;
size_t len = sizeof(PageDesc) * L2_SIZE;
/* Don't use qemu_malloc because it may recurse. */
p = mmap(0, len, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
*lp = p;
- addr = h2g(p);
- if (addr == (target_ulong)addr) {
+ if (h2g_valid(p)) {
+ unsigned long addr = h2g(p);
page_set_flags(addr & TARGET_PAGE_MASK,
TARGET_PAGE_ALIGN(addr + len),
PAGE_RESERVED);
return NULL;
pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
*lp = pd;
- for (i = 0; i < L2_SIZE; i++)
+ for (i = 0; i < L2_SIZE; i++) {
pd[i].phys_offset = IO_MEM_UNASSIGNED;
+ pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
+ }
}
return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
}
start = (void *) 0x60000000UL;
if (code_gen_buffer_size > (512 * 1024 * 1024))
code_gen_buffer_size = (512 * 1024 * 1024);
+#elif defined(__arm__)
+ /* Map the buffer below 32M, so we can use direct calls and branches */
+ flags |= MAP_FIXED;
+ start = (void *) 0x01000000UL;
+ if (code_gen_buffer_size > 16 * 1024 * 1024)
+ code_gen_buffer_size = 16 * 1024 * 1024;
#endif
code_gen_buffer = mmap(start, code_gen_buffer_size,
PROT_WRITE | PROT_READ | PROT_EXEC,
exit(1);
}
}
-#elif defined(__FreeBSD__)
+#elif defined(__FreeBSD__) || defined(__DragonFly__)
{
int flags;
void *addr = NULL;
}
#else
code_gen_buffer = qemu_malloc(code_gen_buffer_size);
- if (!code_gen_buffer) {
- fprintf(stderr, "Could not allocate dynamic translator buffer\n");
- exit(1);
- }
map_exec(code_gen_buffer, code_gen_buffer_size);
#endif
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
qemu_get_be32s(f, &env->halted);
qemu_get_be32s(f, &env->interrupt_request);
+ /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
+ version_id is increased. */
+ env->interrupt_request &= ~0x01;
tlb_flush(env, 1);
return 0;
CPUState **penv;
int cpu_index;
+#if defined(CONFIG_USER_ONLY)
+ cpu_list_lock();
+#endif
env->next_cpu = NULL;
penv = &first_cpu;
cpu_index = 0;
cpu_index++;
}
env->cpu_index = cpu_index;
- env->nb_watchpoints = 0;
+ TAILQ_INIT(&env->breakpoints);
+ TAILQ_INIT(&env->watchpoints);
*penv = env;
+#if defined(CONFIG_USER_ONLY)
+ cpu_list_unlock();
+#endif
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
cpu_common_save, cpu_common_load, env);
TranslationBlock *tb;
p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
- if (!p->code_bitmap)
- return;
tb = p->first_tb;
while (tb != NULL) {
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
int is_cpu_write_access)
{
- int n, current_tb_modified, current_tb_not_found, current_flags;
+ TranslationBlock *tb, *tb_next, *saved_tb;
CPUState *env = cpu_single_env;
- PageDesc *p;
- TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
target_ulong tb_start, tb_end;
- target_ulong current_pc, current_cs_base;
+ PageDesc *p;
+ int n;
+#ifdef TARGET_HAS_PRECISE_SMC
+ int current_tb_not_found = is_cpu_write_access;
+ TranslationBlock *current_tb = NULL;
+ int current_tb_modified = 0;
+ target_ulong current_pc = 0;
+ target_ulong current_cs_base = 0;
+ int current_flags = 0;
+#endif /* TARGET_HAS_PRECISE_SMC */
p = page_find(start >> TARGET_PAGE_BITS);
if (!p)
/* we remove all the TBs in the range [start, end[ */
/* XXX: see if in some cases it could be faster to invalidate all the code */
- current_tb_not_found = is_cpu_write_access;
- current_tb_modified = 0;
- current_tb = NULL; /* avoid warning */
- current_pc = 0; /* avoid warning */
- current_cs_base = 0; /* avoid warning */
- current_flags = 0; /* avoid warning */
tb = p->first_tb;
while (tb != NULL) {
n = (long)tb & 3;
current_tb_modified = 1;
cpu_restore_state(current_tb, env,
env->mem_io_pc, NULL);
-#if defined(TARGET_I386)
- current_flags = env->hflags;
- current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
- current_cs_base = (target_ulong)env->segs[R_CS].base;
- current_pc = current_cs_base + env->eip;
-#else
-#error unsupported CPU
-#endif
+ cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
+ ¤t_flags);
}
#endif /* TARGET_HAS_PRECISE_SMC */
/* we need to do that to handle the case where a signal
int offset, b;
#if 0
if (1) {
- if (loglevel) {
- fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
- cpu_single_env->mem_io_vaddr, len,
- cpu_single_env->eip,
- cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
- }
+ qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
+ cpu_single_env->mem_io_vaddr, len,
+ cpu_single_env->eip,
+ cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
}
#endif
p = page_find(start >> TARGET_PAGE_BITS);
static void tb_invalidate_phys_page(target_phys_addr_t addr,
unsigned long pc, void *puc)
{
- int n, current_flags, current_tb_modified;
- target_ulong current_pc, current_cs_base;
+ TranslationBlock *tb;
PageDesc *p;
- TranslationBlock *tb, *current_tb;
+ int n;
#ifdef TARGET_HAS_PRECISE_SMC
+ TranslationBlock *current_tb = NULL;
CPUState *env = cpu_single_env;
+ int current_tb_modified = 0;
+ target_ulong current_pc = 0;
+ target_ulong current_cs_base = 0;
+ int current_flags = 0;
#endif
addr &= TARGET_PAGE_MASK;
if (!p)
return;
tb = p->first_tb;
- current_tb_modified = 0;
- current_tb = NULL;
- current_pc = 0; /* avoid warning */
- current_cs_base = 0; /* avoid warning */
- current_flags = 0; /* avoid warning */
#ifdef TARGET_HAS_PRECISE_SMC
if (tb && pc != 0) {
current_tb = tb_find_pc(pc);
current_tb_modified = 1;
cpu_restore_state(current_tb, env, pc, puc);
-#if defined(TARGET_I386)
- current_flags = env->hflags;
- current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
- current_cs_base = (target_ulong)env->segs[R_CS].base;
- current_pc = current_cs_base + env->eip;
-#else
-#error unsupported CPU
-#endif
+ cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
+ ¤t_flags);
}
#endif /* TARGET_HAS_PRECISE_SMC */
tb_phys_invalidate(tb, addr);
#endif
/* Add a watchpoint. */
-int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
+int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
+ int flags, CPUWatchpoint **watchpoint)
{
- int i;
+ target_ulong len_mask = ~(len - 1);
+ CPUWatchpoint *wp;
- for (i = 0; i < env->nb_watchpoints; i++) {
- if (addr == env->watchpoint[i].vaddr)
- return 0;
+ /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
+ if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
+ fprintf(stderr, "qemu: tried to set invalid watchpoint at "
+ TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
+ return -EINVAL;
}
- if (env->nb_watchpoints >= MAX_WATCHPOINTS)
- return -1;
+ wp = qemu_malloc(sizeof(*wp));
+
+ wp->vaddr = addr;
+ wp->len_mask = len_mask;
+ wp->flags = flags;
+
+ /* keep all GDB-injected watchpoints in front */
+ if (flags & BP_GDB)
+ TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
+ else
+ TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
- i = env->nb_watchpoints++;
- env->watchpoint[i].vaddr = addr;
- env->watchpoint[i].type = type;
tlb_flush_page(env, addr);
- /* FIXME: This flush is needed because of the hack to make memory ops
- terminate the TB. It can be removed once the proper IO trap and
- re-execute bits are in. */
- tb_flush(env);
- return i;
+
+ if (watchpoint)
+ *watchpoint = wp;
+ return 0;
}
-/* Remove a watchpoint. */
-int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
+/* Remove a specific watchpoint. */
+int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
+ int flags)
{
- int i;
+ target_ulong len_mask = ~(len - 1);
+ CPUWatchpoint *wp;
- for (i = 0; i < env->nb_watchpoints; i++) {
- if (addr == env->watchpoint[i].vaddr) {
- env->nb_watchpoints--;
- env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
- tlb_flush_page(env, addr);
+ TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ if (addr == wp->vaddr && len_mask == wp->len_mask
+ && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
+ cpu_watchpoint_remove_by_ref(env, wp);
return 0;
}
}
- return -1;
+ return -ENOENT;
}
-/* Remove all watchpoints. */
-void cpu_watchpoint_remove_all(CPUState *env) {
- int i;
+/* Remove a specific watchpoint by reference. */
+void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
+{
+ TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
+
+ tlb_flush_page(env, watchpoint->vaddr);
+
+ qemu_free(watchpoint);
+}
+
+/* Remove all matching watchpoints. */
+void cpu_watchpoint_remove_all(CPUState *env, int mask)
+{
+ CPUWatchpoint *wp, *next;
- for (i = 0; i < env->nb_watchpoints; i++) {
- tlb_flush_page(env, env->watchpoint[i].vaddr);
+ TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
+ if (wp->flags & mask)
+ cpu_watchpoint_remove_by_ref(env, wp);
}
- env->nb_watchpoints = 0;
}
-/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
- breakpoint is reached */
-int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
+/* Add a breakpoint. */
+int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
+ CPUBreakpoint **breakpoint)
{
#if defined(TARGET_HAS_ICE)
- int i;
+ CPUBreakpoint *bp;
- for(i = 0; i < env->nb_breakpoints; i++) {
- if (env->breakpoints[i] == pc)
- return 0;
- }
+ bp = qemu_malloc(sizeof(*bp));
- if (env->nb_breakpoints >= MAX_BREAKPOINTS)
- return -1;
- env->breakpoints[env->nb_breakpoints++] = pc;
+ bp->pc = pc;
+ bp->flags = flags;
+
+ /* keep all GDB-injected breakpoints in front */
+ if (flags & BP_GDB)
+ TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
+ else
+ TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
breakpoint_invalidate(env, pc);
+
+ if (breakpoint)
+ *breakpoint = bp;
return 0;
#else
- return -1;
+ return -ENOSYS;
#endif
}
-/* remove all breakpoints */
-void cpu_breakpoint_remove_all(CPUState *env) {
+/* Remove a specific breakpoint. */
+int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
+{
#if defined(TARGET_HAS_ICE)
- int i;
- for(i = 0; i < env->nb_breakpoints; i++) {
- breakpoint_invalidate(env, env->breakpoints[i]);
+ CPUBreakpoint *bp;
+
+ TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (bp->pc == pc && bp->flags == flags) {
+ cpu_breakpoint_remove_by_ref(env, bp);
+ return 0;
+ }
}
- env->nb_breakpoints = 0;
+ return -ENOENT;
+#else
+ return -ENOSYS;
#endif
}
-/* remove a breakpoint */
-int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
+/* Remove a specific breakpoint by reference. */
+void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
{
#if defined(TARGET_HAS_ICE)
- int i;
- for(i = 0; i < env->nb_breakpoints; i++) {
- if (env->breakpoints[i] == pc)
- goto found;
- }
- return -1;
- found:
- env->nb_breakpoints--;
- if (i < env->nb_breakpoints)
- env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
+ TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
- breakpoint_invalidate(env, pc);
- return 0;
-#else
- return -1;
+ breakpoint_invalidate(env, breakpoint->pc);
+
+ qemu_free(breakpoint);
+#endif
+}
+
+/* Remove all matching breakpoints. */
+void cpu_breakpoint_remove_all(CPUState *env, int mask)
+{
+#if defined(TARGET_HAS_ICE)
+ CPUBreakpoint *bp, *next;
+
+ TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
+ if (bp->flags & mask)
+ cpu_breakpoint_remove_by_ref(env, bp);
+ }
#endif
}
#if defined(TARGET_HAS_ICE)
if (env->singlestep_enabled != enabled) {
env->singlestep_enabled = enabled;
- /* must flush all the translated code to avoid inconsistancies */
- /* XXX: only flush what is necessary */
- tb_flush(env);
+ if (kvm_enabled())
+ kvm_update_guest_debug(env, 0);
+ else {
+ /* must flush all the translated code to avoid inconsistancies */
+ /* XXX: only flush what is necessary */
+ tb_flush(env);
+ }
}
#endif
}
cpu_set_log(loglevel);
}
-/* mask must never be zero, except for A20 change call */
-void cpu_interrupt(CPUState *env, int mask)
+static void cpu_unlink_tb(CPUState *env)
{
-#if !defined(USE_NPTL)
+#if defined(USE_NPTL)
+ /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
+ problem and hope the cpu will stop of its own accord. For userspace
+ emulation this often isn't actually as bad as it sounds. Often
+ signals are used primarily to interrupt blocking syscalls. */
+#else
TranslationBlock *tb;
static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
+
+ tb = env->current_tb;
+ /* if the cpu is currently executing code, we must unlink it and
+ all the potentially executing TB */
+ if (tb && !testandset(&interrupt_lock)) {
+ env->current_tb = NULL;
+ tb_reset_jump_recursive(tb);
+ resetlock(&interrupt_lock);
+ }
#endif
+}
+
+/* mask must never be zero, except for A20 change call */
+void cpu_interrupt(CPUState *env, int mask)
+{
int old_mask;
old_mask = env->interrupt_request;
- /* FIXME: This is probably not threadsafe. A different thread could
- be in the middle of a read-modify-write operation. */
env->interrupt_request |= mask;
-#if defined(USE_NPTL)
- /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
- problem and hope the cpu will stop of its own accord. For userspace
- emulation this often isn't actually as bad as it sounds. Often
- signals are used primarily to interrupt blocking syscalls. */
-#else
+
if (use_icount) {
env->icount_decr.u16.high = 0xffff;
#ifndef CONFIG_USER_ONLY
- /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
- an async event happened and we need to process it. */
if (!can_do_io(env)
- && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
+ && (mask & ~old_mask) != 0) {
cpu_abort(env, "Raised interrupt while not in I/O function");
}
#endif
} else {
- tb = env->current_tb;
- /* if the cpu is currently executing code, we must unlink it and
- all the potentially executing TB */
- if (tb && !testandset(&interrupt_lock)) {
- env->current_tb = NULL;
- tb_reset_jump_recursive(tb);
- resetlock(&interrupt_lock);
- }
+ cpu_unlink_tb(env);
}
-#endif
}
void cpu_reset_interrupt(CPUState *env, int mask)
env->interrupt_request &= ~mask;
}
+void cpu_exit(CPUState *env)
+{
+ env->exit_request = 1;
+ cpu_unlink_tb(env);
+}
+
const CPULogItem cpu_log_items[] = {
{ CPU_LOG_TB_OUT_ASM, "out_asm",
"show generated host assembly code for each compiled TB" },
#ifdef TARGET_I386
{ CPU_LOG_PCALL, "pcall",
"show protected mode far calls/returns/exceptions" },
+ { CPU_LOG_RESET, "cpu_reset",
+ "show CPU state before CPU resets" },
#endif
#ifdef DEBUG_IOPORT
{ CPU_LOG_IOPORT, "ioport",
#else
cpu_dump_state(env, stderr, fprintf, 0);
#endif
- if (logfile) {
- fprintf(logfile, "qemu: fatal: ");
- vfprintf(logfile, fmt, ap2);
- fprintf(logfile, "\n");
+ if (qemu_log_enabled()) {
+ qemu_log("qemu: fatal: ");
+ qemu_log_vprintf(fmt, ap2);
+ qemu_log("\n");
#ifdef TARGET_I386
- cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
+ log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
#else
- cpu_dump_state(env, logfile, fprintf, 0);
+ log_cpu_state(env, 0);
#endif
- fflush(logfile);
- fclose(logfile);
+ qemu_log_flush();
+ qemu_log_close();
}
va_end(ap2);
va_end(ap);
CPUState *cpu_copy(CPUState *env)
{
CPUState *new_env = cpu_init(env->cpu_model_str);
- /* preserve chaining and index */
CPUState *next_cpu = new_env->next_cpu;
int cpu_index = new_env->cpu_index;
+#if defined(TARGET_HAS_ICE)
+ CPUBreakpoint *bp;
+ CPUWatchpoint *wp;
+#endif
+
memcpy(new_env, env, sizeof(CPUState));
+
+ /* Preserve chaining and index. */
new_env->next_cpu = next_cpu;
new_env->cpu_index = cpu_index;
+
+ /* Clone all break/watchpoints.
+ Note: Once we support ptrace with hw-debug register access, make sure
+ BP_CPU break/watchpoints are handled correctly on clone. */
+ TAILQ_INIT(&env->breakpoints);
+ TAILQ_INIT(&env->watchpoints);
+#if defined(TARGET_HAS_ICE)
+ TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
+ }
+ TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
+ wp->flags, NULL);
+ }
+#endif
+
return new_env;
}
env->tlb_table[2][i].addr_read = -1;
env->tlb_table[2][i].addr_write = -1;
env->tlb_table[2][i].addr_code = -1;
-#if (NB_MMU_MODES == 4)
+#endif
+#if (NB_MMU_MODES >= 4)
env->tlb_table[3][i].addr_read = -1;
env->tlb_table[3][i].addr_write = -1;
env->tlb_table[3][i].addr_code = -1;
#endif
+#if (NB_MMU_MODES >= 5)
+ env->tlb_table[4][i].addr_read = -1;
+ env->tlb_table[4][i].addr_write = -1;
+ env->tlb_table[4][i].addr_code = -1;
#endif
+
}
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
tlb_flush_entry(&env->tlb_table[1][i], addr);
#if (NB_MMU_MODES >= 3)
tlb_flush_entry(&env->tlb_table[2][i], addr);
-#if (NB_MMU_MODES == 4)
+#endif
+#if (NB_MMU_MODES >= 4)
tlb_flush_entry(&env->tlb_table[3][i], addr);
#endif
+#if (NB_MMU_MODES >= 5)
+ tlb_flush_entry(&env->tlb_table[4][i], addr);
#endif
tlb_flush_jmp_cache(env, addr);
}
}
+/* Note: start and end must be within the same ram block. */
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
int dirty_flags)
{
/* we modify the TLB cache so that the dirty bit will be set again
when accessing the range */
- start1 = start + (unsigned long)phys_ram_base;
+ start1 = (unsigned long)qemu_get_ram_ptr(start);
+ /* Chek that we don't span multiple blocks - this breaks the
+ address comparisons below. */
+ if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
+ != (end - 1) - start) {
+ abort();
+ }
+
for(env = first_cpu; env != NULL; env = env->next_cpu) {
for(i = 0; i < CPU_TLB_SIZE; i++)
tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
#if (NB_MMU_MODES >= 3)
for(i = 0; i < CPU_TLB_SIZE; i++)
tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
-#if (NB_MMU_MODES == 4)
+#endif
+#if (NB_MMU_MODES >= 4)
for(i = 0; i < CPU_TLB_SIZE; i++)
tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
#endif
+#if (NB_MMU_MODES >= 5)
+ for(i = 0; i < CPU_TLB_SIZE; i++)
+ tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
#endif
}
}
return in_migration;
}
+void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
+{
+ if (kvm_enabled())
+ kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
+}
+
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
{
ram_addr_t ram_addr;
+ void *p;
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
- ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
- tlb_entry->addend - (unsigned long)phys_ram_base;
+ p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
+ + tlb_entry->addend);
+ ram_addr = qemu_ram_addr_from_host(p);
if (!cpu_physical_memory_is_dirty(ram_addr)) {
tlb_entry->addr_write |= TLB_NOTDIRTY;
}
#if (NB_MMU_MODES >= 3)
for(i = 0; i < CPU_TLB_SIZE; i++)
tlb_update_dirty(&env->tlb_table[2][i]);
-#if (NB_MMU_MODES == 4)
+#endif
+#if (NB_MMU_MODES >= 4)
for(i = 0; i < CPU_TLB_SIZE; i++)
tlb_update_dirty(&env->tlb_table[3][i]);
#endif
+#if (NB_MMU_MODES >= 5)
+ for(i = 0; i < CPU_TLB_SIZE; i++)
+ tlb_update_dirty(&env->tlb_table[4][i]);
#endif
}
tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
#if (NB_MMU_MODES >= 3)
tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
-#if (NB_MMU_MODES == 4)
+#endif
+#if (NB_MMU_MODES >= 4)
tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
#endif
+#if (NB_MMU_MODES >= 5)
+ tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
#endif
}
target_phys_addr_t addend;
int ret;
CPUTLBEntry *te;
- int i;
+ CPUWatchpoint *wp;
target_phys_addr_t iotlb;
p = phys_page_find(paddr >> TARGET_PAGE_BITS);
/* IO memory case (romd handled later) */
address |= TLB_MMIO;
}
- addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
+ addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
/* Normal RAM. */
iotlb = pd & TARGET_PAGE_MASK;
and avoid full address decoding in every device.
We can't use the high bits of pd for this because
IO_MEM_ROMD uses these as a ram address. */
- iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
+ iotlb = (pd & ~TARGET_PAGE_MASK);
+ if (p) {
+ iotlb += p->region_offset;
+ } else {
+ iotlb += paddr;
+ }
}
code_address = address;
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
- for (i = 0; i < env->nb_watchpoints; i++) {
- if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
+ TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
iotlb = io_mem_watch + paddr;
/* TODO: The memory case can be optimized by not trapping
reads of pages with a write breakpoint. */
#endif /* defined(CONFIG_USER_ONLY) */
#if !defined(CONFIG_USER_ONLY)
+
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
- ram_addr_t memory);
+ ram_addr_t memory, ram_addr_t region_offset);
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
- ram_addr_t orig_memory);
+ ram_addr_t orig_memory, ram_addr_t region_offset);
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
need_subpage) \
do { \
/* register physical memory. 'size' must be a multiple of the target
page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
- io memory page */
-void cpu_register_physical_memory(target_phys_addr_t start_addr,
- ram_addr_t size,
- ram_addr_t phys_offset)
+ io memory page. The address used when calling the IO function is
+ the offset from the start of the region, plus region_offset. Both
+ start_region and regon_offset are rounded down to a page boundary
+ before calculating this offset. This should not be a problem unless
+ the low bits of start_addr and region_offset differ. */
+void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
+ ram_addr_t size,
+ ram_addr_t phys_offset,
+ ram_addr_t region_offset)
{
target_phys_addr_t addr, end_addr;
PhysPageDesc *p;
if (kvm_enabled())
kvm_set_phys_mem(start_addr, size, phys_offset);
+ if (phys_offset == IO_MEM_UNASSIGNED) {
+ region_offset = start_addr;
+ }
+ region_offset &= TARGET_PAGE_MASK;
size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
end_addr = start_addr + (target_phys_addr_t)size;
for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
if (!(orig_memory & IO_MEM_SUBPAGE)) {
subpage = subpage_init((addr & TARGET_PAGE_MASK),
- &p->phys_offset, orig_memory);
+ &p->phys_offset, orig_memory,
+ p->region_offset);
} else {
subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
>> IO_MEM_SHIFT];
}
- subpage_register(subpage, start_addr2, end_addr2, phys_offset);
+ subpage_register(subpage, start_addr2, end_addr2, phys_offset,
+ region_offset);
+ p->region_offset = 0;
} else {
p->phys_offset = phys_offset;
if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
} else {
p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
p->phys_offset = phys_offset;
+ p->region_offset = region_offset;
if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
- (phys_offset & IO_MEM_ROMD))
+ (phys_offset & IO_MEM_ROMD)) {
phys_offset += TARGET_PAGE_SIZE;
- else {
+ } else {
target_phys_addr_t start_addr2, end_addr2;
int need_subpage = 0;
if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
subpage = subpage_init((addr & TARGET_PAGE_MASK),
- &p->phys_offset, IO_MEM_UNASSIGNED);
+ &p->phys_offset, IO_MEM_UNASSIGNED,
+ addr & TARGET_PAGE_MASK);
subpage_register(subpage, start_addr2, end_addr2,
- phys_offset);
+ phys_offset, region_offset);
+ p->region_offset = 0;
}
}
}
+ region_offset += TARGET_PAGE_SIZE;
}
/* since each CPU stores ram addresses in its TLB cache, we must
return p->phys_offset;
}
+void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
+{
+ if (kvm_enabled())
+ kvm_coalesce_mmio_region(addr, size);
+}
+
+void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
+{
+ if (kvm_enabled())
+ kvm_uncoalesce_mmio_region(addr, size);
+}
+
/* XXX: better than nothing */
ram_addr_t qemu_ram_alloc(ram_addr_t size)
{
{
}
+/* Return a host pointer to ram allocated with qemu_ram_alloc.
+ With the exception of the softmmu code in this file, this should
+ only be used for local memory (e.g. video ram) that the device owns,
+ and knows it isn't going to access beyond the end of the block.
+
+ It should not be used for general purpose DMA.
+ Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
+ */
+void *qemu_get_ram_ptr(ram_addr_t addr)
+{
+ return phys_ram_base + addr;
+}
+
+/* Some of the softmmu routines need to translate from a host pointer
+ (typically a TLB entry) back to a ram offset. */
+ram_addr_t qemu_ram_addr_from_host(void *ptr)
+{
+ return (uint8_t *)ptr - phys_ram_base;
+}
+
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
{
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+#if defined(TARGET_SPARC)
do_unassigned_access(addr, 0, 0, 0, 1);
#endif
return 0;
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+#if defined(TARGET_SPARC)
do_unassigned_access(addr, 0, 0, 0, 2);
#endif
return 0;
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+#if defined(TARGET_SPARC)
do_unassigned_access(addr, 0, 0, 0, 4);
#endif
return 0;
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+#if defined(TARGET_SPARC)
do_unassigned_access(addr, 1, 0, 0, 1);
#endif
}
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+#if defined(TARGET_SPARC)
do_unassigned_access(addr, 1, 0, 0, 2);
#endif
}
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
+#if defined(TARGET_SPARC)
do_unassigned_access(addr, 1, 0, 0, 4);
#endif
}
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
#endif
}
- stb_p(phys_ram_base + ram_addr, val);
+ stb_p(qemu_get_ram_ptr(ram_addr), val);
#ifdef USE_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
#endif
}
- stw_p(phys_ram_base + ram_addr, val);
+ stw_p(qemu_get_ram_ptr(ram_addr), val);
#ifdef USE_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
#endif
}
- stl_p(phys_ram_base + ram_addr, val);
+ stl_p(qemu_get_ram_ptr(ram_addr), val);
#ifdef USE_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
};
/* Generate a debug exception if a watchpoint has been hit. */
-static void check_watchpoint(int offset, int flags)
+static void check_watchpoint(int offset, int len_mask, int flags)
{
CPUState *env = cpu_single_env;
+ target_ulong pc, cs_base;
+ TranslationBlock *tb;
target_ulong vaddr;
- int i;
-
+ CPUWatchpoint *wp;
+ int cpu_flags;
+
+ if (env->watchpoint_hit) {
+ /* We re-entered the check after replacing the TB. Now raise
+ * the debug interrupt so that is will trigger after the
+ * current instruction. */
+ cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
+ return;
+ }
vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
- for (i = 0; i < env->nb_watchpoints; i++) {
- if (vaddr == env->watchpoint[i].vaddr
- && (env->watchpoint[i].type & flags)) {
- env->watchpoint_hit = i + 1;
- cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
- break;
+ TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ if ((vaddr == (wp->vaddr & len_mask) ||
+ (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
+ wp->flags |= BP_WATCHPOINT_HIT;
+ if (!env->watchpoint_hit) {
+ env->watchpoint_hit = wp;
+ tb = tb_find_pc(env->mem_io_pc);
+ if (!tb) {
+ cpu_abort(env, "check_watchpoint: could not find TB for "
+ "pc=%p", (void *)env->mem_io_pc);
+ }
+ cpu_restore_state(tb, env, env->mem_io_pc, NULL);
+ tb_phys_invalidate(tb, -1);
+ if (wp->flags & BP_STOP_BEFORE_ACCESS) {
+ env->exception_index = EXCP_DEBUG;
+ } else {
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
+ tb_gen_code(env, pc, cs_base, cpu_flags, 1);
+ }
+ cpu_resume_from_signal(env, NULL);
+ }
+ } else {
+ wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
}
phys routines. */
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
return ldub_phys(addr);
}
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
return lduw_phys(addr);
}
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
return ldl_phys(addr);
}
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
stb_phys(addr, val);
}
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
stw_phys(addr, val);
}
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
uint32_t val)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
stl_phys(addr, val);
}
uint32_t ret;
unsigned int idx;
- idx = SUBPAGE_IDX(addr - mmio->base);
+ idx = SUBPAGE_IDX(addr);
#if defined(DEBUG_SUBPAGE)
printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
mmio, len, addr, idx);
#endif
- ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
+ ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
+ addr + mmio->region_offset[idx][0][len]);
return ret;
}
{
unsigned int idx;
- idx = SUBPAGE_IDX(addr - mmio->base);
+ idx = SUBPAGE_IDX(addr);
#if defined(DEBUG_SUBPAGE)
printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
mmio, len, addr, idx, value);
#endif
- (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
+ (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
+ addr + mmio->region_offset[idx][1][len],
+ value);
}
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
};
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
- ram_addr_t memory)
+ ram_addr_t memory, ram_addr_t region_offset)
{
int idx, eidx;
unsigned int i;
if (io_mem_read[memory][i]) {
mmio->mem_read[idx][i] = &io_mem_read[memory][i];
mmio->opaque[idx][0][i] = io_mem_opaque[memory];
+ mmio->region_offset[idx][0][i] = region_offset;
}
if (io_mem_write[memory][i]) {
mmio->mem_write[idx][i] = &io_mem_write[memory][i];
mmio->opaque[idx][1][i] = io_mem_opaque[memory];
+ mmio->region_offset[idx][1][i] = region_offset;
}
}
}
}
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
- ram_addr_t orig_memory)
+ ram_addr_t orig_memory, ram_addr_t region_offset)
{
subpage_t *mmio;
int subpage_memory;
mmio = qemu_mallocz(sizeof(subpage_t));
- if (mmio != NULL) {
- mmio->base = base;
- subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
+
+ mmio->base = base;
+ subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
#if defined(DEBUG_SUBPAGE)
- printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
- mmio, base, TARGET_PAGE_SIZE, subpage_memory);
+ printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
+ mmio, base, TARGET_PAGE_SIZE, subpage_memory);
#endif
- *phys = subpage_memory | IO_MEM_SUBPAGE;
- subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
- }
+ *phys = subpage_memory | IO_MEM_SUBPAGE;
+ subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
+ region_offset);
return mmio;
}
+static int get_free_io_mem_idx(void)
+{
+ int i;
+
+ for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
+ if (!io_mem_used[i]) {
+ io_mem_used[i] = 1;
+ return i;
+ }
+
+ return -1;
+}
+
static void io_mem_init(void)
{
+ int i;
+
cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
- io_mem_nb = 5;
+ for (i=0; i<5; i++)
+ io_mem_used[i] = 1;
io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
watch_mem_write, NULL);
int i, subwidth = 0;
if (io_index <= 0) {
- if (io_mem_nb >= IO_MEM_NB_ENTRIES)
- return -1;
- io_index = io_mem_nb++;
+ io_index = get_free_io_mem_idx();
+ if (io_index == -1)
+ return io_index;
} else {
if (io_index >= IO_MEM_NB_ENTRIES)
return -1;
return (io_index << IO_MEM_SHIFT) | subwidth;
}
+void cpu_unregister_io_memory(int io_table_address)
+{
+ int i;
+ int io_index = io_table_address >> IO_MEM_SHIFT;
+
+ for (i=0;i < 3; i++) {
+ io_mem_read[io_index][i] = unassigned_mem_read[i];
+ io_mem_write[io_index][i] = unassigned_mem_write[i];
+ }
+ io_mem_opaque[io_index] = NULL;
+ io_mem_used[io_index] = 0;
+}
+
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
{
return io_mem_write[io_index >> IO_MEM_SHIFT];
if (is_write) {
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
+ target_phys_addr_t addr1 = addr;
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ if (p)
+ addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
/* XXX: could force cpu_single_env to NULL to avoid
potential bugs */
- if (l >= 4 && ((addr & 3) == 0)) {
+ if (l >= 4 && ((addr1 & 3) == 0)) {
/* 32 bit write access */
val = ldl_p(buf);
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
+ io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
l = 4;
- } else if (l >= 2 && ((addr & 1) == 0)) {
+ } else if (l >= 2 && ((addr1 & 1) == 0)) {
/* 16 bit write access */
val = lduw_p(buf);
- io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
+ io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
l = 2;
} else {
/* 8 bit write access */
val = ldub_p(buf);
- io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
+ io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
l = 1;
}
} else {
unsigned long addr1;
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
/* RAM case */
- ptr = phys_ram_base + addr1;
+ ptr = qemu_get_ram_ptr(addr1);
memcpy(ptr, buf, l);
if (!cpu_physical_memory_is_dirty(addr1)) {
/* invalidate code */
} else {
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
!(pd & IO_MEM_ROMD)) {
+ target_phys_addr_t addr1 = addr;
/* I/O case */
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
- if (l >= 4 && ((addr & 3) == 0)) {
+ if (p)
+ addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+ if (l >= 4 && ((addr1 & 3) == 0)) {
/* 32 bit read access */
- val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
+ val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
stl_p(buf, val);
l = 4;
- } else if (l >= 2 && ((addr & 1) == 0)) {
+ } else if (l >= 2 && ((addr1 & 1) == 0)) {
/* 16 bit read access */
- val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
+ val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
stw_p(buf, val);
l = 2;
} else {
/* 8 bit read access */
- val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
+ val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
stb_p(buf, val);
l = 1;
}
} else {
/* RAM case */
- ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
+ ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
(addr & ~TARGET_PAGE_MASK);
memcpy(buf, ptr, l);
}
unsigned long addr1;
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
/* ROM/RAM case */
- ptr = phys_ram_base + addr1;
+ ptr = qemu_get_ram_ptr(addr1);
memcpy(ptr, buf, l);
}
len -= l;
}
}
+typedef struct {
+ void *buffer;
+ target_phys_addr_t addr;
+ target_phys_addr_t len;
+} BounceBuffer;
+
+static BounceBuffer bounce;
+
+typedef struct MapClient {
+ void *opaque;
+ void (*callback)(void *opaque);
+ LIST_ENTRY(MapClient) link;
+} MapClient;
+
+static LIST_HEAD(map_client_list, MapClient) map_client_list
+ = LIST_HEAD_INITIALIZER(map_client_list);
+
+void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
+{
+ MapClient *client = qemu_malloc(sizeof(*client));
+
+ client->opaque = opaque;
+ client->callback = callback;
+ LIST_INSERT_HEAD(&map_client_list, client, link);
+ return client;
+}
+
+void cpu_unregister_map_client(void *_client)
+{
+ MapClient *client = (MapClient *)_client;
+
+ LIST_REMOVE(client, link);
+}
+
+static void cpu_notify_map_clients(void)
+{
+ MapClient *client;
+
+ while (!LIST_EMPTY(&map_client_list)) {
+ client = LIST_FIRST(&map_client_list);
+ client->callback(client->opaque);
+ LIST_REMOVE(client, link);
+ }
+}
+
+/* Map a physical memory region into a host virtual address.
+ * May map a subset of the requested range, given by and returned in *plen.
+ * May return NULL if resources needed to perform the mapping are exhausted.
+ * Use only for reads OR writes - not for read-modify-write operations.
+ * Use cpu_register_map_client() to know when retrying the map operation is
+ * likely to succeed.
+ */
+void *cpu_physical_memory_map(target_phys_addr_t addr,
+ target_phys_addr_t *plen,
+ int is_write)
+{
+ target_phys_addr_t len = *plen;
+ target_phys_addr_t done = 0;
+ int l;
+ uint8_t *ret = NULL;
+ uint8_t *ptr;
+ target_phys_addr_t page;
+ unsigned long pd;
+ PhysPageDesc *p;
+ unsigned long addr1;
+
+ while (len > 0) {
+ page = addr & TARGET_PAGE_MASK;
+ l = (page + TARGET_PAGE_SIZE) - addr;
+ if (l > len)
+ l = len;
+ p = phys_page_find(page >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+
+ if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
+ if (done || bounce.buffer) {
+ break;
+ }
+ bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
+ bounce.addr = addr;
+ bounce.len = l;
+ if (!is_write) {
+ cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
+ }
+ ptr = bounce.buffer;
+ } else {
+ addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ ptr = qemu_get_ram_ptr(addr1);
+ }
+ if (!done) {
+ ret = ptr;
+ } else if (ret + done != ptr) {
+ break;
+ }
+
+ len -= l;
+ addr += l;
+ done += l;
+ }
+ *plen = done;
+ return ret;
+}
+
+/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
+ * Will also mark the memory as dirty if is_write == 1. access_len gives
+ * the amount of memory that was actually read or written by the caller.
+ */
+void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
+ int is_write, target_phys_addr_t access_len)
+{
+ if (buffer != bounce.buffer) {
+ if (is_write) {
+ ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
+ while (access_len) {
+ unsigned l;
+ l = TARGET_PAGE_SIZE;
+ if (l > access_len)
+ l = access_len;
+ if (!cpu_physical_memory_is_dirty(addr1)) {
+ /* invalidate code */
+ tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
+ /* set dirty bit */
+ phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
+ (0xff & ~CODE_DIRTY_FLAG);
+ }
+ addr1 += l;
+ access_len -= l;
+ }
+ }
+ return;
+ }
+ if (is_write) {
+ cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
+ }
+ qemu_free(bounce.buffer);
+ bounce.buffer = NULL;
+ cpu_notify_map_clients();
+}
/* warning: addr must be aligned */
uint32_t ldl_phys(target_phys_addr_t addr)
!(pd & IO_MEM_ROMD)) {
/* I/O case */
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ if (p)
+ addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
} else {
/* RAM case */
- ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
+ ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
(addr & ~TARGET_PAGE_MASK);
val = ldl_p(ptr);
}
!(pd & IO_MEM_ROMD)) {
/* I/O case */
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ if (p)
+ addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
#ifdef TARGET_WORDS_BIGENDIAN
val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
#endif
} else {
/* RAM case */
- ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
+ ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
(addr & ~TARGET_PAGE_MASK);
val = ldq_p(ptr);
}
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ if (p)
+ addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
} else {
unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
- ptr = phys_ram_base + addr1;
+ ptr = qemu_get_ram_ptr(addr1);
stl_p(ptr, val);
if (unlikely(in_migration)) {
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ if (p)
+ addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
#ifdef TARGET_WORDS_BIGENDIAN
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
#endif
} else {
- ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
+ ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
(addr & ~TARGET_PAGE_MASK);
stq_p(ptr, val);
}
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ if (p)
+ addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
} else {
unsigned long addr1;
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
/* RAM case */
- ptr = phys_ram_base + addr1;
+ ptr = qemu_get_ram_ptr(addr1);
stl_p(ptr, val);
if (!cpu_physical_memory_is_dirty(addr1)) {
/* invalidate code */
#endif
-/* virtual memory access for debug */
+/* virtual memory access for debug (includes writing to ROM) */
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
uint8_t *buf, int len, int is_write)
{
l = (page + TARGET_PAGE_SIZE) - addr;
if (l > len)
l = len;
- cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
- buf, l, is_write);
+ phys_addr += (addr & ~TARGET_PAGE_MASK);
+#if !defined(CONFIG_USER_ONLY)
+ if (is_write)
+ cpu_physical_memory_write_rom(phys_addr, buf, l);
+ else
+#endif
+ cpu_physical_memory_rw(phys_addr, buf, l, is_write);
len -= l;
buf += l;
addr += l;