X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/cb72b75824c0362e5cb32eb8796930f29fa36d17..ac05f3492421caeb05809ffa02c6198ede179e43:/exec.c diff --git a/exec.c b/exec.c index 0c86bced84..bb6aa4a070 100644 --- a/exec.c +++ b/exec.c @@ -57,17 +57,17 @@ #include "trace.h" #endif +#include "cputlb.h" + #define WANT_EXEC_OBSOLETE #include "exec-obsolete.h" //#define DEBUG_TB_INVALIDATE //#define DEBUG_FLUSH -//#define DEBUG_TLB //#define DEBUG_UNASSIGNED /* make various TB consistency checks */ //#define DEBUG_TB_CHECK -//#define DEBUG_TLB_CHECK //#define DEBUG_IOPORT //#define DEBUG_SUBPAGE @@ -86,15 +86,14 @@ static int nb_tbs; /* any access to the tbs or the page table must use this lock */ spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; -#if defined(__arm__) || defined(__sparc_v9__) +#if defined(__arm__) || defined(__sparc__) /* The prologue must be reachable with a direct jump. ARM and Sparc64 have limited branch ranges (possibly also PPC) so place it in a section close to code segment. */ #define code_gen_section \ __attribute__((__section__(".gen_code"))) \ __attribute__((aligned (32))) -#elif defined(_WIN32) -/* Maximum alignment for Win32 is 16. */ +#elif defined(_WIN32) && !defined(_WIN64) #define code_gen_section \ __attribute__((aligned (16))) #else @@ -123,10 +122,10 @@ static MemoryRegion io_mem_subpage_ram; #endif -CPUState *first_cpu; +CPUArchState *first_cpu; /* current CPU in the current thread. It is only valid inside cpu_exec() */ -DEFINE_TLS(CPUState *,cpu_single_env); +DEFINE_TLS(CPUArchState *,cpu_single_env); /* 0 = Do not count executed instructions. 1 = Precise instruction counting. 2 = Adaptive rate instruction counting. */ @@ -177,9 +176,9 @@ typedef struct PageDesc { #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) -unsigned long qemu_real_host_page_size; -unsigned long qemu_host_page_size; -unsigned long qemu_host_page_mask; +uintptr_t qemu_real_host_page_size; +uintptr_t qemu_host_page_size; +uintptr_t qemu_host_page_mask; /* This is a multi-level map on the virtual address space. The bottom level has pointers to PageDesc. */ @@ -217,20 +216,7 @@ static void memory_map_init(void); static MemoryRegion io_mem_watch; #endif -/* log support */ -#ifdef WIN32 -static const char *logfilename = "qemu.log"; -#else -static const char *logfilename = "/tmp/qemu.log"; -#endif -FILE *logfile; -int loglevel; -static int log_append = 0; - /* statistics */ -#if !defined(CONFIG_USER_ONLY) -static int tlb_flush_count; -#endif static int tb_flush_count; static int tb_phys_invalidate_count; @@ -480,7 +466,7 @@ static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb, phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); } -static MemoryRegionSection *phys_page_find(target_phys_addr_t index) +MemoryRegionSection *phys_page_find(target_phys_addr_t index) { PhysPageEntry lp = phys_map; PhysPageEntry *p; @@ -500,17 +486,13 @@ not_found: return &phys_sections[s_index]; } -static target_phys_addr_t section_addr(MemoryRegionSection *section, - target_phys_addr_t addr) +bool memory_region_is_unassigned(MemoryRegion *mr) { - addr -= section->offset_within_address_space; - addr += section->offset_within_region; - return addr; + return mr != &io_mem_ram && mr != &io_mem_rom + && mr != &io_mem_notdirty && !mr->rom_device + && mr != &io_mem_watch; } -static void tlb_protect_code(ram_addr_t ram_addr); -static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, - target_ulong vaddr); #define mmap_lock() do { } while(0) #define mmap_unlock() do { } while(0) #endif @@ -559,10 +541,9 @@ static void code_gen_alloc(unsigned long tb_size) /* Cannot map more than that */ if (code_gen_buffer_size > (800 * 1024 * 1024)) code_gen_buffer_size = (800 * 1024 * 1024); -#elif defined(__sparc_v9__) +#elif defined(__sparc__) && HOST_LONG_BITS == 64 // Map the buffer below 2G, so we can use direct calls and branches - flags |= MAP_FIXED; - start = (void *) 0x60000000UL; + start = (void *) 0x40000000UL; if (code_gen_buffer_size > (512 * 1024 * 1024)) code_gen_buffer_size = (512 * 1024 * 1024); #elif defined(__arm__) @@ -600,10 +581,9 @@ static void code_gen_alloc(unsigned long tb_size) /* Cannot map more than that */ if (code_gen_buffer_size > (800 * 1024 * 1024)) code_gen_buffer_size = (800 * 1024 * 1024); -#elif defined(__sparc_v9__) +#elif defined(__sparc__) && HOST_LONG_BITS == 64 // Map the buffer below 2G, so we can use direct calls and branches - flags |= MAP_FIXED; - addr = (void *) 0x60000000UL; + addr = (void *) 0x40000000UL; if (code_gen_buffer_size > (512 * 1024 * 1024)) { code_gen_buffer_size = (512 * 1024 * 1024); } @@ -636,6 +616,7 @@ void tcg_exec_init(unsigned long tb_size) cpu_gen_init(); code_gen_alloc(tb_size); code_gen_ptr = code_gen_buffer; + tcg_register_jit(code_gen_buffer, code_gen_buffer_size); page_init(); #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE) /* There's no guest base to take into account, so go ahead and @@ -661,7 +642,7 @@ void cpu_exec_init_all(void) static int cpu_common_post_load(void *opaque, int version_id) { - CPUState *env = opaque; + CPUArchState *env = opaque; /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the version_id is increased. */ @@ -678,16 +659,16 @@ static const VMStateDescription vmstate_cpu_common = { .minimum_version_id_old = 1, .post_load = cpu_common_post_load, .fields = (VMStateField []) { - VMSTATE_UINT32(halted, CPUState), - VMSTATE_UINT32(interrupt_request, CPUState), + VMSTATE_UINT32(halted, CPUArchState), + VMSTATE_UINT32(interrupt_request, CPUArchState), VMSTATE_END_OF_LIST() } }; #endif -CPUState *qemu_get_cpu(int cpu) +CPUArchState *qemu_get_cpu(int cpu) { - CPUState *env = first_cpu; + CPUArchState *env = first_cpu; while (env) { if (env->cpu_index == cpu) @@ -698,9 +679,9 @@ CPUState *qemu_get_cpu(int cpu) return env; } -void cpu_exec_init(CPUState *env) +void cpu_exec_init(CPUArchState *env) { - CPUState **penv; + CPUArchState **penv; int cpu_index; #if defined(CONFIG_USER_ONLY) @@ -799,9 +780,9 @@ static void page_flush_tb(void) /* flush all the translation blocks */ /* XXX: tb_flush is currently not thread safe */ -void tb_flush(CPUState *env1) +void tb_flush(CPUArchState *env1) { - CPUState *env; + CPUArchState *env; #if defined(DEBUG_FLUSH) printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", (unsigned long)(code_gen_ptr - code_gen_buffer), @@ -887,8 +868,8 @@ static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) for(;;) { tb1 = *ptb; - n1 = (long)tb1 & 3; - tb1 = (TranslationBlock *)((long)tb1 & ~3); + n1 = (uintptr_t)tb1 & 3; + tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); if (tb1 == tb) { *ptb = tb1->page_next[n1]; break; @@ -908,8 +889,8 @@ static inline void tb_jmp_remove(TranslationBlock *tb, int n) /* find tb(n) in circular list */ for(;;) { tb1 = *ptb; - n1 = (long)tb1 & 3; - tb1 = (TranslationBlock *)((long)tb1 & ~3); + n1 = (uintptr_t)tb1 & 3; + tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); if (n1 == n && tb1 == tb) break; if (n1 == 2) { @@ -929,12 +910,12 @@ static inline void tb_jmp_remove(TranslationBlock *tb, int n) another TB */ static inline void tb_reset_jump(TranslationBlock *tb, int n) { - tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); + tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n])); } void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) { - CPUState *env; + CPUArchState *env; PageDesc *p; unsigned int h, n1; tb_page_addr_t phys_pc; @@ -974,16 +955,16 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) /* suppress any remaining jumps to this TB */ tb1 = tb->jmp_first; for(;;) { - n1 = (long)tb1 & 3; + n1 = (uintptr_t)tb1 & 3; if (n1 == 2) break; - tb1 = (TranslationBlock *)((long)tb1 & ~3); + tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); tb2 = tb1->jmp_next[n1]; tb_reset_jump(tb1, n1); tb1->jmp_next[n1] = NULL; tb1 = tb2; } - tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ + tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */ tb_phys_invalidate_count++; } @@ -1024,8 +1005,8 @@ static void build_page_bitmap(PageDesc *p) tb = p->first_tb; while (tb != NULL) { - n = (long)tb & 3; - tb = (TranslationBlock *)((long)tb & ~3); + n = (uintptr_t)tb & 3; + tb = (TranslationBlock *)((uintptr_t)tb & ~3); /* NOTE: this is subtle as a TB may span two physical pages */ if (n == 0) { /* NOTE: tb_end may be after the end of the page, but @@ -1043,7 +1024,7 @@ static void build_page_bitmap(PageDesc *p) } } -TranslationBlock *tb_gen_code(CPUState *env, +TranslationBlock *tb_gen_code(CPUArchState *env, target_ulong pc, target_ulong cs_base, int flags, int cflags) { @@ -1069,7 +1050,8 @@ TranslationBlock *tb_gen_code(CPUState *env, tb->flags = flags; tb->cflags = cflags; cpu_gen_code(env, tb, &code_gen_size); - code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); + code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size + + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); /* check next page if needed */ virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; @@ -1081,16 +1063,35 @@ TranslationBlock *tb_gen_code(CPUState *env, return tb; } -/* invalidate all TBs which intersect with the target physical page - starting in range [start;end[. NOTE: start and end must refer to - the same physical page. 'is_cpu_write_access' should be true if called - from a real cpu write access: the virtual CPU will exit the current - TB if code is modified inside this TB. */ +/* + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end may refer to *different* physical pages. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. + */ +void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end, + int is_cpu_write_access) +{ + while (start < end) { + tb_invalidate_phys_page_range(start, end, is_cpu_write_access); + start &= TARGET_PAGE_MASK; + start += TARGET_PAGE_SIZE; + } +} + +/* + * Invalidate all TBs which intersect with the target physical address range + * [start;end[. NOTE: start and end must refer to the *same* physical page. + * 'is_cpu_write_access' should be true if called from a real cpu write + * access: the virtual CPU will exit the current TB if code is modified inside + * this TB. + */ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, int is_cpu_write_access) { TranslationBlock *tb, *tb_next, *saved_tb; - CPUState *env = cpu_single_env; + CPUArchState *env = cpu_single_env; tb_page_addr_t tb_start, tb_end; PageDesc *p; int n; @@ -1117,8 +1118,8 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, /* XXX: see if in some cases it could be faster to invalidate all the code */ tb = p->first_tb; while (tb != NULL) { - n = (long)tb & 3; - tb = (TranslationBlock *)((long)tb & ~3); + n = (uintptr_t)tb & 3; + tb = (TranslationBlock *)((uintptr_t)tb & ~3); tb_next = tb->page_next[n]; /* NOTE: this is subtle as a TB may span two physical pages */ if (n == 0) { @@ -1201,7 +1202,8 @@ static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", cpu_single_env->mem_io_vaddr, len, cpu_single_env->eip, - cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); + cpu_single_env->eip + + (intptr_t)cpu_single_env->segs[R_CS].base); } #endif p = page_find(start >> TARGET_PAGE_BITS); @@ -1220,14 +1222,14 @@ static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) #if !defined(CONFIG_SOFTMMU) static void tb_invalidate_phys_page(tb_page_addr_t addr, - unsigned long pc, void *puc) + uintptr_t pc, void *puc) { TranslationBlock *tb; PageDesc *p; int n; #ifdef TARGET_HAS_PRECISE_SMC TranslationBlock *current_tb = NULL; - CPUState *env = cpu_single_env; + CPUArchState *env = cpu_single_env; int current_tb_modified = 0; target_ulong current_pc = 0; target_ulong current_cs_base = 0; @@ -1245,8 +1247,8 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr, } #endif while (tb != NULL) { - n = (long)tb & 3; - tb = (TranslationBlock *)((long)tb & ~3); + n = (uintptr_t)tb & 3; + tb = (TranslationBlock *)((uintptr_t)tb & ~3); #ifdef TARGET_HAS_PRECISE_SMC if (current_tb == tb && (current_tb->cflags & CF_COUNT_MASK) != 1) { @@ -1294,7 +1296,7 @@ static inline void tb_alloc_page(TranslationBlock *tb, #ifndef CONFIG_USER_ONLY page_already_protected = p->first_tb != NULL; #endif - p->first_tb = (TranslationBlock *)((long)tb | n); + p->first_tb = (TranslationBlock *)((uintptr_t)tb | n); invalidate_page_bitmap(p); #if defined(TARGET_HAS_SMC) || 1 @@ -1361,7 +1363,7 @@ void tb_link_page(TranslationBlock *tb, else tb->page_addr[1] = -1; - tb->jmp_first = (TranslationBlock *)((long)tb | 2); + tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); tb->jmp_next[0] = NULL; tb->jmp_next[1] = NULL; @@ -1379,24 +1381,25 @@ void tb_link_page(TranslationBlock *tb, /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < tb[1].tc_ptr. Return NULL if not found */ -TranslationBlock *tb_find_pc(unsigned long tc_ptr) +TranslationBlock *tb_find_pc(uintptr_t tc_ptr) { int m_min, m_max, m; - unsigned long v; + uintptr_t v; TranslationBlock *tb; if (nb_tbs <= 0) return NULL; - if (tc_ptr < (unsigned long)code_gen_buffer || - tc_ptr >= (unsigned long)code_gen_ptr) + if (tc_ptr < (uintptr_t)code_gen_buffer || + tc_ptr >= (uintptr_t)code_gen_ptr) { return NULL; + } /* binary search (cf Knuth) */ m_min = 0; m_max = nb_tbs - 1; while (m_min <= m_max) { m = (m_min + m_max) >> 1; tb = &tbs[m]; - v = (unsigned long)tb->tc_ptr; + v = (uintptr_t)tb->tc_ptr; if (v == tc_ptr) return tb; else if (tc_ptr < v) { @@ -1419,8 +1422,8 @@ static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) if (tb1 != NULL) { /* find head of list */ for(;;) { - n1 = (long)tb1 & 3; - tb1 = (TranslationBlock *)((long)tb1 & ~3); + n1 = (uintptr_t)tb1 & 3; + tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); if (n1 == 2) break; tb1 = tb1->jmp_next[n1]; @@ -1432,8 +1435,8 @@ static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) ptb = &tb_next->jmp_first; for(;;) { tb1 = *ptb; - n1 = (long)tb1 & 3; - tb1 = (TranslationBlock *)((long)tb1 & ~3); + n1 = (uintptr_t)tb1 & 3; + tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); if (n1 == n && tb1 == tb) break; ptb = &tb1->jmp_next[n1]; @@ -1457,44 +1460,48 @@ static void tb_reset_jump_recursive(TranslationBlock *tb) #if defined(TARGET_HAS_ICE) #if defined(CONFIG_USER_ONLY) -static void breakpoint_invalidate(CPUState *env, target_ulong pc) +static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) { tb_invalidate_phys_page_range(pc, pc + 1, 0); } #else -static void breakpoint_invalidate(CPUState *env, target_ulong pc) +void tb_invalidate_phys_addr(target_phys_addr_t addr) { - target_phys_addr_t addr; ram_addr_t ram_addr; MemoryRegionSection *section; - addr = cpu_get_phys_page_debug(env, pc); section = phys_page_find(addr >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || (section->mr->rom_device && section->mr->readable))) { return; } ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) - + section_addr(section, addr); + + memory_region_section_addr(section, addr); tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); } + +static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) +{ + tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) | + (pc & ~TARGET_PAGE_MASK)); +} #endif #endif /* TARGET_HAS_ICE */ #if defined(CONFIG_USER_ONLY) -void cpu_watchpoint_remove_all(CPUState *env, int mask) +void cpu_watchpoint_remove_all(CPUArchState *env, int mask) { } -int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, +int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, int flags, CPUWatchpoint **watchpoint) { return -ENOSYS; } #else /* Add a watchpoint. */ -int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, +int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, int flags, CPUWatchpoint **watchpoint) { target_ulong len_mask = ~(len - 1); @@ -1527,7 +1534,7 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, } /* Remove a specific watchpoint. */ -int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len, +int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len, int flags) { target_ulong len_mask = ~(len - 1); @@ -1544,7 +1551,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len, } /* Remove a specific watchpoint by reference. */ -void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint) +void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint) { QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry); @@ -1554,7 +1561,7 @@ void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint) } /* Remove all matching watchpoints. */ -void cpu_watchpoint_remove_all(CPUState *env, int mask) +void cpu_watchpoint_remove_all(CPUArchState *env, int mask) { CPUWatchpoint *wp, *next; @@ -1566,7 +1573,7 @@ void cpu_watchpoint_remove_all(CPUState *env, int mask) #endif /* Add a breakpoint. */ -int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, +int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags, CPUBreakpoint **breakpoint) { #if defined(TARGET_HAS_ICE) @@ -1594,7 +1601,7 @@ int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, } /* Remove a specific breakpoint. */ -int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags) +int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags) { #if defined(TARGET_HAS_ICE) CPUBreakpoint *bp; @@ -1612,7 +1619,7 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags) } /* Remove a specific breakpoint by reference. */ -void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint) +void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint) { #if defined(TARGET_HAS_ICE) QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry); @@ -1624,7 +1631,7 @@ void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint) } /* Remove all matching breakpoints. */ -void cpu_breakpoint_remove_all(CPUState *env, int mask) +void cpu_breakpoint_remove_all(CPUArchState *env, int mask) { #if defined(TARGET_HAS_ICE) CPUBreakpoint *bp, *next; @@ -1638,7 +1645,7 @@ void cpu_breakpoint_remove_all(CPUState *env, int mask) /* enable or disable single step mode. EXCP_DEBUG is returned by the CPU loop after each instruction */ -void cpu_single_step(CPUState *env, int enabled) +void cpu_single_step(CPUArchState *env, int enabled) { #if defined(TARGET_HAS_ICE) if (env->singlestep_enabled != enabled) { @@ -1654,47 +1661,7 @@ void cpu_single_step(CPUState *env, int enabled) #endif } -/* enable or disable low levels log */ -void cpu_set_log(int log_flags) -{ - loglevel = log_flags; - if (loglevel && !logfile) { - logfile = fopen(logfilename, log_append ? "a" : "w"); - if (!logfile) { - perror(logfilename); - _exit(1); - } -#if !defined(CONFIG_SOFTMMU) - /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ - { - static char logfile_buf[4096]; - setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); - } -#elif defined(_WIN32) - /* Win32 doesn't support line-buffering, so use unbuffered output. */ - setvbuf(logfile, NULL, _IONBF, 0); -#else - setvbuf(logfile, NULL, _IOLBF, 0); -#endif - log_append = 1; - } - if (!loglevel && logfile) { - fclose(logfile); - logfile = NULL; - } -} - -void cpu_set_log_filename(const char *filename) -{ - logfilename = strdup(filename); - if (logfile) { - fclose(logfile); - logfile = NULL; - } - cpu_set_log(loglevel); -} - -static void cpu_unlink_tb(CPUState *env) +static void cpu_unlink_tb(CPUArchState *env) { /* FIXME: TB unchaining isn't SMP safe. For now just ignore the problem and hope the cpu will stop of its own accord. For userspace @@ -1716,7 +1683,7 @@ static void cpu_unlink_tb(CPUState *env) #ifndef CONFIG_USER_ONLY /* mask must never be zero, except for A20 change call */ -static void tcg_handle_interrupt(CPUState *env, int mask) +static void tcg_handle_interrupt(CPUArchState *env, int mask) { int old_mask; @@ -1747,97 +1714,25 @@ CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt; #else /* CONFIG_USER_ONLY */ -void cpu_interrupt(CPUState *env, int mask) +void cpu_interrupt(CPUArchState *env, int mask) { env->interrupt_request |= mask; cpu_unlink_tb(env); } #endif /* CONFIG_USER_ONLY */ -void cpu_reset_interrupt(CPUState *env, int mask) +void cpu_reset_interrupt(CPUArchState *env, int mask) { env->interrupt_request &= ~mask; } -void cpu_exit(CPUState *env) +void cpu_exit(CPUArchState *env) { env->exit_request = 1; cpu_unlink_tb(env); } -const CPULogItem cpu_log_items[] = { - { CPU_LOG_TB_OUT_ASM, "out_asm", - "show generated host assembly code for each compiled TB" }, - { CPU_LOG_TB_IN_ASM, "in_asm", - "show target assembly code for each compiled TB" }, - { CPU_LOG_TB_OP, "op", - "show micro ops for each compiled TB" }, - { CPU_LOG_TB_OP_OPT, "op_opt", - "show micro ops " -#ifdef TARGET_I386 - "before eflags optimization and " -#endif - "after liveness analysis" }, - { CPU_LOG_INT, "int", - "show interrupts/exceptions in short format" }, - { CPU_LOG_EXEC, "exec", - "show trace before each executed TB (lots of logs)" }, - { CPU_LOG_TB_CPU, "cpu", - "show CPU state before block translation" }, -#ifdef TARGET_I386 - { CPU_LOG_PCALL, "pcall", - "show protected mode far calls/returns/exceptions" }, - { CPU_LOG_RESET, "cpu_reset", - "show CPU state before CPU resets" }, -#endif -#ifdef DEBUG_IOPORT - { CPU_LOG_IOPORT, "ioport", - "show all i/o ports accesses" }, -#endif - { 0, NULL, NULL }, -}; - -static int cmp1(const char *s1, int n, const char *s2) -{ - if (strlen(s2) != n) - return 0; - return memcmp(s1, s2, n) == 0; -} - -/* takes a comma separated list of log masks. Return 0 if error. */ -int cpu_str_to_log_mask(const char *str) -{ - const CPULogItem *item; - int mask; - const char *p, *p1; - - p = str; - mask = 0; - for(;;) { - p1 = strchr(p, ','); - if (!p1) - p1 = p + strlen(p); - if(cmp1(p,p1-p,"all")) { - for(item = cpu_log_items; item->mask != 0; item++) { - mask |= item->mask; - } - } else { - for(item = cpu_log_items; item->mask != 0; item++) { - if (cmp1(p, p1 - p, item->name)) - goto found; - } - return 0; - } - found: - mask |= item->mask; - if (*p1 != ',') - break; - p = p1 + 1; - } - return mask; -} - -void cpu_abort(CPUState *env, const char *fmt, ...) +void cpu_abort(CPUArchState *env, const char *fmt, ...) { va_list ap; va_list ap2; @@ -1877,17 +1772,17 @@ void cpu_abort(CPUState *env, const char *fmt, ...) abort(); } -CPUState *cpu_copy(CPUState *env) +CPUArchState *cpu_copy(CPUArchState *env) { - CPUState *new_env = cpu_init(env->cpu_model_str); - CPUState *next_cpu = new_env->next_cpu; + CPUArchState *new_env = cpu_init(env->cpu_model_str); + CPUArchState *next_cpu = new_env->next_cpu; int cpu_index = new_env->cpu_index; #if defined(TARGET_HAS_ICE) CPUBreakpoint *bp; CPUWatchpoint *wp; #endif - memcpy(new_env, env, sizeof(CPUState)); + memcpy(new_env, env, sizeof(CPUArchState)); /* Preserve chaining and index. */ new_env->next_cpu = next_cpu; @@ -1912,8 +1807,7 @@ CPUState *cpu_copy(CPUState *env) } #if !defined(CONFIG_USER_ONLY) - -static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr) +void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr) { unsigned int i; @@ -1928,128 +1822,29 @@ static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr) TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); } -static CPUTLBEntry s_cputlb_empty_entry = { - .addr_read = -1, - .addr_write = -1, - .addr_code = -1, - .addend = -1, -}; - -/* NOTE: - * If flush_global is true (the usual case), flush all tlb entries. - * If flush_global is false, flush (at least) all tlb entries not - * marked global. - * - * Since QEMU doesn't currently implement a global/not-global flag - * for tlb entries, at the moment tlb_flush() will also flush all - * tlb entries in the flush_global == false case. This is OK because - * CPU architectures generally permit an implementation to drop - * entries from the TLB at any time, so flushing more entries than - * required is only an efficiency issue, not a correctness issue. - */ -void tlb_flush(CPUState *env, int flush_global) -{ - int i; - -#if defined(DEBUG_TLB) - printf("tlb_flush:\n"); -#endif - /* must reset current TB so that interrupts cannot modify the - links while we are modifying them */ - env->current_tb = NULL; - - for(i = 0; i < CPU_TLB_SIZE; i++) { - int mmu_idx; - for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { - env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry; - } - } - - memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); - - env->tlb_flush_addr = -1; - env->tlb_flush_mask = 0; - tlb_flush_count++; -} - -static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) -{ - if (addr == (tlb_entry->addr_read & - (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || - addr == (tlb_entry->addr_write & - (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || - addr == (tlb_entry->addr_code & - (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { - *tlb_entry = s_cputlb_empty_entry; - } -} - -void tlb_flush_page(CPUState *env, target_ulong addr) +static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end, + uintptr_t length) { - int i; - int mmu_idx; + uintptr_t start1; -#if defined(DEBUG_TLB) - printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); -#endif - /* Check if we need to flush due to large pages. */ - if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { -#if defined(DEBUG_TLB) - printf("tlb_flush_page: forced full flush (" - TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", - env->tlb_flush_addr, env->tlb_flush_mask); -#endif - tlb_flush(env, 1); - return; + /* we modify the TLB cache so that the dirty bit will be set again + when accessing the range */ + start1 = (uintptr_t)qemu_safe_ram_ptr(start); + /* Check that we don't span multiple blocks - this breaks the + address comparisons below. */ + if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1 + != (end - 1) - start) { + abort(); } - /* must reset current TB so that interrupts cannot modify the - links while we are modifying them */ - env->current_tb = NULL; - - addr &= TARGET_PAGE_MASK; - i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) - tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); - - tlb_flush_jmp_cache(env, addr); -} - -/* update the TLBs so that writes to code in the virtual page 'addr' - can be detected */ -static void tlb_protect_code(ram_addr_t ram_addr) -{ - cpu_physical_memory_reset_dirty(ram_addr, - ram_addr + TARGET_PAGE_SIZE, - CODE_DIRTY_FLAG); -} - -/* update the TLB so that writes in physical page 'phys_addr' are no longer - tested for self modifying code */ -static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, - target_ulong vaddr) -{ - cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG); -} + cpu_tlb_reset_dirty_all(start1, length); -static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, - unsigned long start, unsigned long length) -{ - unsigned long addr; - if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) { - addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; - if ((addr - start) < length) { - tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY; - } - } } /* Note: start and end must be within the same ram block. */ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, int dirty_flags) { - CPUState *env; - unsigned long length, start1; - int i; + uintptr_t length; start &= TARGET_PAGE_MASK; end = TARGET_PAGE_ALIGN(end); @@ -2059,23 +1854,8 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, return; cpu_physical_memory_mask_dirty_range(start, length, dirty_flags); - /* we modify the TLB cache so that the dirty bit will be set again - when accessing the range */ - start1 = (unsigned long)qemu_safe_ram_ptr(start); - /* Check that we don't span multiple blocks - this breaks the - address comparisons below. */ - if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1 - != (end - 1) - start) { - abort(); - } - - for(env = first_cpu; env != NULL; env = env->next_cpu) { - int mmu_idx; - for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { - for(i = 0; i < CPU_TLB_SIZE; i++) - tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], - start1, length); - } + if (tcg_enabled()) { + tlb_reset_dirty_range_all(start, end, length); } } @@ -2086,137 +1866,25 @@ int cpu_physical_memory_set_dirty_tracking(int enable) return ret; } -static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) -{ - ram_addr_t ram_addr; - void *p; - - if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) { - p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK) - + tlb_entry->addend); - ram_addr = qemu_ram_addr_from_host_nofail(p); - if (!cpu_physical_memory_is_dirty(ram_addr)) { - tlb_entry->addr_write |= TLB_NOTDIRTY; - } - } -} - -/* update the TLB according to the current state of the dirty bits */ -void cpu_tlb_update_dirty(CPUState *env) -{ - int i; - int mmu_idx; - for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { - for(i = 0; i < CPU_TLB_SIZE; i++) - tlb_update_dirty(&env->tlb_table[mmu_idx][i]); - } -} - -static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) -{ - if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) - tlb_entry->addr_write = vaddr; -} - -/* update the TLB corresponding to virtual page vaddr - so that it is no longer dirty */ -static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr) -{ - int i; - int mmu_idx; - - vaddr &= TARGET_PAGE_MASK; - i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) - tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); -} - -/* Our TLB does not support large pages, so remember the area covered by - large pages and trigger a full TLB flush if these are invalidated. */ -static void tlb_add_large_page(CPUState *env, target_ulong vaddr, - target_ulong size) -{ - target_ulong mask = ~(size - 1); - - if (env->tlb_flush_addr == (target_ulong)-1) { - env->tlb_flush_addr = vaddr & mask; - env->tlb_flush_mask = mask; - return; - } - /* Extend the existing region to include the new page. - This is a compromise between unnecessary flushes and the cost - of maintaining a full variable size TLB. */ - mask &= env->tlb_flush_mask; - while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) { - mask <<= 1; - } - env->tlb_flush_addr &= mask; - env->tlb_flush_mask = mask; -} - -static bool is_ram_rom(MemoryRegionSection *s) -{ - return memory_region_is_ram(s->mr); -} - -static bool is_romd(MemoryRegionSection *s) -{ - MemoryRegion *mr = s->mr; - - return mr->rom_device && mr->readable; -} - -static bool is_ram_rom_romd(MemoryRegionSection *s) -{ - return is_ram_rom(s) || is_romd(s); -} - -/* Add a new TLB entry. At most one entry for a given virtual address - is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the - supplied size is only used by tlb_flush_page. */ -void tlb_set_page(CPUState *env, target_ulong vaddr, - target_phys_addr_t paddr, int prot, - int mmu_idx, target_ulong size) +target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env, + MemoryRegionSection *section, + target_ulong vaddr, + target_phys_addr_t paddr, + int prot, + target_ulong *address) { - MemoryRegionSection *section; - unsigned int index; - target_ulong address; - target_ulong code_address; - unsigned long addend; - CPUTLBEntry *te; - CPUWatchpoint *wp; target_phys_addr_t iotlb; + CPUWatchpoint *wp; - assert(size >= TARGET_PAGE_SIZE); - if (size != TARGET_PAGE_SIZE) { - tlb_add_large_page(env, vaddr, size); - } - section = phys_page_find(paddr >> TARGET_PAGE_BITS); -#if defined(DEBUG_TLB) - printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx - " prot=%x idx=%d pd=0x%08lx\n", - vaddr, paddr, prot, mmu_idx, pd); -#endif - - address = vaddr; - if (!is_ram_rom_romd(section)) { - /* IO memory case (romd handled later) */ - address |= TLB_MMIO; - } - if (is_ram_rom_romd(section)) { - addend = (unsigned long)memory_region_get_ram_ptr(section->mr) - + section_addr(section, paddr); - } else { - addend = 0; - } - if (is_ram_rom(section)) { + if (memory_region_is_ram(section->mr)) { /* Normal RAM. */ iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) - + section_addr(section, paddr); - if (!section->readonly) + + memory_region_section_addr(section, paddr); + if (!section->readonly) { iotlb |= phys_section_notdirty; - else + } else { iotlb |= phys_section_rom; + } } else { /* IO handlers are currently passed a physical address. It would be nice to pass an offset from the base address @@ -2225,10 +1893,9 @@ void tlb_set_page(CPUState *env, target_ulong vaddr, We can't use the high bits of pd for this because IO_MEM_ROMD uses these as a ram address. */ iotlb = section - phys_sections; - iotlb += section_addr(section, paddr); + iotlb += memory_region_section_addr(section, paddr); } - code_address = address; /* Make accesses to pages with watchpoints go via the watchpoint trap routines. */ QTAILQ_FOREACH(wp, &env->watchpoints, entry) { @@ -2236,55 +1903,16 @@ void tlb_set_page(CPUState *env, target_ulong vaddr, /* Avoid trapping reads of pages with a write breakpoint. */ if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { iotlb = phys_section_watch + paddr; - address |= TLB_MMIO; + *address |= TLB_MMIO; break; } } } - index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - env->iotlb[mmu_idx][index] = iotlb - vaddr; - te = &env->tlb_table[mmu_idx][index]; - te->addend = addend - vaddr; - if (prot & PAGE_READ) { - te->addr_read = address; - } else { - te->addr_read = -1; - } - - if (prot & PAGE_EXEC) { - te->addr_code = code_address; - } else { - te->addr_code = -1; - } - if (prot & PAGE_WRITE) { - if ((memory_region_is_ram(section->mr) && section->readonly) - || is_romd(section)) { - /* Write access calls the I/O callback. */ - te->addr_write = address | TLB_MMIO; - } else if (memory_region_is_ram(section->mr) - && !cpu_physical_memory_is_dirty( - section->mr->ram_addr - + section_addr(section, paddr))) { - te->addr_write = address | TLB_NOTDIRTY; - } else { - te->addr_write = address; - } - } else { - te->addr_write = -1; - } + return iotlb; } #else - -void tlb_flush(CPUState *env, int flush_global) -{ -} - -void tlb_flush_page(CPUState *env, target_ulong addr) -{ -} - /* * Walks guest process memory "regions" one by one * and calls callback function 'fn' for each region. @@ -2294,7 +1922,7 @@ struct walk_memory_regions_data { walk_memory_regions_fn fn; void *priv; - unsigned long start; + uintptr_t start; int prot; }; @@ -2355,7 +1983,7 @@ static int walk_memory_regions_1(struct walk_memory_regions_data *data, int walk_memory_regions(void *priv, walk_memory_regions_fn fn) { struct walk_memory_regions_data data; - unsigned long i; + uintptr_t i; data.fn = fn; data.priv = priv; @@ -2496,7 +2124,7 @@ int page_check_range(target_ulong start, target_ulong len, int flags) /* called from signal handler: invalidate the code and unprotect the page. Return TRUE if the fault was successfully handled. */ -int page_unprotect(target_ulong address, unsigned long pc, void *puc) +int page_unprotect(target_ulong address, uintptr_t pc, void *puc) { unsigned int prot; PageDesc *p; @@ -2541,11 +2169,6 @@ int page_unprotect(target_ulong address, unsigned long pc, void *puc) mmap_unlock(); return 0; } - -static inline void tlb_set_dirty(CPUState *env, - unsigned long addr, target_ulong vaddr) -{ -} #endif /* defined(CONFIG_USER_ONLY) */ #if !defined(CONFIG_USER_ONLY) @@ -2615,14 +2238,6 @@ static void phys_sections_clear(void) phys_sections_nb = 0; } -/* register physical memory. - For RAM, 'size' must be a multiple of the target page size. - If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an - io memory page. The address used when calling the IO function is - the offset from the start of the region, plus region_offset. Both - start_addr and region_offset are rounded down to a page boundary - before calculating this offset. This should not be a problem unless - the low bits of start_addr and region_offset differ. */ static void register_subpage(MemoryRegionSection *section) { subpage_t *subpage; @@ -2646,7 +2261,7 @@ static void register_subpage(MemoryRegionSection *section) subpage = container_of(existing->mr, subpage_t, iomem); } start = section->offset_within_address_space & ~TARGET_PAGE_MASK; - end = start + section->size; + end = start + section->size - 1; subpage_register(subpage, start, end, phys_section_add(section)); } @@ -2680,10 +2295,15 @@ void cpu_register_physical_memory_log(MemoryRegionSection *section, remain.offset_within_address_space += now.size; remain.offset_within_region += now.size; } - now = remain; - now.size &= TARGET_PAGE_MASK; - if (now.size) { - register_multipage(&now); + while (remain.size >= TARGET_PAGE_SIZE) { + now = remain; + if (remain.offset_within_region & ~TARGET_PAGE_MASK) { + now.size = TARGET_PAGE_SIZE; + register_subpage(&now); + } else { + now.size &= TARGET_PAGE_MASK; + register_multipage(&now); + } remain.size -= now.size; remain.offset_within_address_space += now.size; remain.offset_within_region += now.size; @@ -2853,6 +2473,24 @@ static ram_addr_t last_ram_offset(void) return last; } +static void qemu_ram_setup_dump(void *addr, ram_addr_t size) +{ + int ret; + QemuOpts *machine_opts; + + /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ + machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0); + if (machine_opts && + !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) { + ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); + if (ret) { + perror("qemu_madvise"); + fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " + "but dump_guest_core=off specified\n"); + } + } +} + void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) { RAMBlock *new_block, *block; @@ -2867,8 +2505,8 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) assert(new_block); assert(!new_block->idstr[0]); - if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) { - char *id = dev->parent_bus->info->get_dev_path(dev); + if (dev) { + char *id = qdev_get_dev_path(dev); if (id) { snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); g_free(id); @@ -2885,6 +2523,19 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) } } +static int memory_try_enable_merging(void *addr, size_t len) +{ + QemuOpts *opts; + + opts = qemu_opts_find(qemu_find_opts("machine"), 0); + if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) { + /* disabled by the user */ + return 0; + } + + return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); +} + ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, MemoryRegion *mr) { @@ -2904,34 +2555,22 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, new_block->host = file_ram_alloc(new_block, size, mem_path); if (!new_block->host) { new_block->host = qemu_vmalloc(size); - qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE); + memory_try_enable_merging(new_block->host, size); } #else fprintf(stderr, "-mem-path option unsupported\n"); exit(1); #endif } else { -#if defined(TARGET_S390X) && defined(CONFIG_KVM) - /* S390 KVM requires the topmost vma of the RAM to be smaller than - an system defined value, which is at least 256GB. Larger systems - have larger values. We put the guest between the end of data - segment (system break) and this value. We use 32GB as a base to - have enough room for the system break to grow. */ - new_block->host = mmap((void*)0x800000000, size, - PROT_EXEC|PROT_READ|PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); - if (new_block->host == MAP_FAILED) { - fprintf(stderr, "Allocating RAM failed\n"); - abort(); - } -#else if (xen_enabled()) { xen_ram_alloc(new_block->offset, size, mr); + } else if (kvm_enabled()) { + /* some s390/kvm configurations have special constraints */ + new_block->host = kvm_vmalloc(size); } else { new_block->host = qemu_vmalloc(size); } -#endif - qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE); + memory_try_enable_merging(new_block->host, size); } } new_block->length = size; @@ -2941,7 +2580,10 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, ram_list.phys_dirty = g_realloc(ram_list.phys_dirty, last_ram_offset() >> TARGET_PAGE_BITS); memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), - 0xff, size >> TARGET_PAGE_BITS); + 0, size >> TARGET_PAGE_BITS); + cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff); + + qemu_ram_setup_dump(new_block->host, size); if (kvm_enabled()) kvm_setup_guest_memory(new_block->host, size); @@ -3058,7 +2700,8 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) length, addr); exit(1); } - qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE); + memory_try_enable_merging(vaddr, length); + qemu_ram_setup_dump(vaddr, length); } return; } @@ -3299,7 +2942,7 @@ static const MemoryRegionOps notdirty_mem_ops = { /* Generate a debug exception if a watchpoint has been hit. */ static void check_watchpoint(int offset, int len_mask, int flags) { - CPUState *env = cpu_single_env; + CPUArchState *env = cpu_single_env; target_ulong pc, cs_base; TranslationBlock *tb; target_ulong vaddr; @@ -3544,7 +3187,7 @@ static void core_begin(MemoryListener *listener) static void core_commit(MemoryListener *listener) { - CPUState *env; + CPUArchState *env; /* since each CPU stores ram addresses in its TLB cache, we must reset the modified entries */ @@ -3598,13 +3241,13 @@ static void core_log_global_stop(MemoryListener *listener) static void core_eventfd_add(MemoryListener *listener, MemoryRegionSection *section, - bool match_data, uint64_t data, int fd) + bool match_data, uint64_t data, EventNotifier *e) { } static void core_eventfd_del(MemoryListener *listener, MemoryRegionSection *section, - bool match_data, uint64_t data, int fd) + bool match_data, uint64_t data, EventNotifier *e) { } @@ -3664,13 +3307,13 @@ static void io_log_global_stop(MemoryListener *listener) static void io_eventfd_add(MemoryListener *listener, MemoryRegionSection *section, - bool match_data, uint64_t data, int fd) + bool match_data, uint64_t data, EventNotifier *e) { } static void io_eventfd_del(MemoryListener *listener, MemoryRegionSection *section, - bool match_data, uint64_t data, int fd) + bool match_data, uint64_t data, EventNotifier *e) { } @@ -3734,7 +3377,7 @@ MemoryRegion *get_system_io(void) /* physical memory access (slow version, mainly for debug) */ #if defined(CONFIG_USER_ONLY) -int cpu_memory_rw_debug(CPUState *env, target_ulong addr, +int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, uint8_t *buf, int len, int is_write) { int l, flags; @@ -3793,7 +3436,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, if (is_write) { if (!memory_region_is_ram(section->mr)) { target_phys_addr_t addr1; - addr1 = section_addr(section, addr); + addr1 = memory_region_section_addr(section, addr); /* XXX: could force cpu_single_env to NULL to avoid potential bugs */ if (l >= 4 && ((addr1 & 3) == 0)) { @@ -3815,7 +3458,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, } else if (!section->readonly) { ram_addr_t addr1; addr1 = memory_region_get_ram_addr(section->mr) - + section_addr(section, addr); + + memory_region_section_addr(section, addr); /* RAM case */ ptr = qemu_get_ram_ptr(addr1); memcpy(ptr, buf, l); @@ -3829,10 +3472,11 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, qemu_put_ram_ptr(ptr); } } else { - if (!is_ram_rom_romd(section)) { + if (!(memory_region_is_ram(section->mr) || + memory_region_is_romd(section->mr))) { target_phys_addr_t addr1; /* I/O case */ - addr1 = section_addr(section, addr); + addr1 = memory_region_section_addr(section, addr); if (l >= 4 && ((addr1 & 3) == 0)) { /* 32 bit read access */ val = io_mem_read(section->mr, addr1, 4); @@ -3851,8 +3495,9 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, } } else { /* RAM case */ - ptr = qemu_get_ram_ptr(section->mr->ram_addr) - + section_addr(section, addr); + ptr = qemu_get_ram_ptr(section->mr->ram_addr + + memory_region_section_addr(section, + addr)); memcpy(buf, ptr, l); qemu_put_ram_ptr(ptr); } @@ -3879,15 +3524,23 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr, l = len; section = phys_page_find(page >> TARGET_PAGE_BITS); - if (!is_ram_rom_romd(section)) { + if (!(memory_region_is_ram(section->mr) || + memory_region_is_romd(section->mr))) { /* do nothing */ } else { unsigned long addr1; addr1 = memory_region_get_ram_addr(section->mr) - + section_addr(section, addr); + + memory_region_section_addr(section, addr); /* ROM/RAM case */ ptr = qemu_get_ram_ptr(addr1); memcpy(ptr, buf, l); + if (!cpu_physical_memory_is_dirty(addr1)) { + /* invalidate code */ + tb_invalidate_phys_page_range(addr1, addr1 + l, 0); + /* set dirty bit */ + cpu_physical_memory_set_dirty_flags( + addr1, (0xff & ~CODE_DIRTY_FLAG)); + } qemu_put_ram_ptr(ptr); } len -= l; @@ -3985,7 +3638,7 @@ void *cpu_physical_memory_map(target_phys_addr_t addr, } if (!todo) { raddr = memory_region_get_ram_addr(section->mr) - + section_addr(section, addr); + + memory_region_section_addr(section, addr); } len -= l; @@ -4047,9 +3700,10 @@ static inline uint32_t ldl_phys_internal(target_phys_addr_t addr, section = phys_page_find(addr >> TARGET_PAGE_BITS); - if (!is_ram_rom_romd(section)) { + if (!(memory_region_is_ram(section->mr) || + memory_region_is_romd(section->mr))) { /* I/O case */ - addr = section_addr(section, addr); + addr = memory_region_section_addr(section, addr); val = io_mem_read(section->mr, addr, 4); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { @@ -4064,7 +3718,7 @@ static inline uint32_t ldl_phys_internal(target_phys_addr_t addr, /* RAM case */ ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) - + section_addr(section, addr)); + + memory_region_section_addr(section, addr)); switch (endian) { case DEVICE_LITTLE_ENDIAN: val = ldl_le_p(ptr); @@ -4105,9 +3759,10 @@ static inline uint64_t ldq_phys_internal(target_phys_addr_t addr, section = phys_page_find(addr >> TARGET_PAGE_BITS); - if (!is_ram_rom_romd(section)) { + if (!(memory_region_is_ram(section->mr) || + memory_region_is_romd(section->mr))) { /* I/O case */ - addr = section_addr(section, addr); + addr = memory_region_section_addr(section, addr); /* XXX This is broken when device endian != cpu endian. Fix and add "endian" variable check */ @@ -4122,7 +3777,7 @@ static inline uint64_t ldq_phys_internal(target_phys_addr_t addr, /* RAM case */ ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) - + section_addr(section, addr)); + + memory_region_section_addr(section, addr)); switch (endian) { case DEVICE_LITTLE_ENDIAN: val = ldq_le_p(ptr); @@ -4171,9 +3826,10 @@ static inline uint32_t lduw_phys_internal(target_phys_addr_t addr, section = phys_page_find(addr >> TARGET_PAGE_BITS); - if (!is_ram_rom_romd(section)) { + if (!(memory_region_is_ram(section->mr) || + memory_region_is_romd(section->mr))) { /* I/O case */ - addr = section_addr(section, addr); + addr = memory_region_section_addr(section, addr); val = io_mem_read(section->mr, addr, 2); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { @@ -4188,7 +3844,7 @@ static inline uint32_t lduw_phys_internal(target_phys_addr_t addr, /* RAM case */ ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) - + section_addr(section, addr)); + + memory_region_section_addr(section, addr)); switch (endian) { case DEVICE_LITTLE_ENDIAN: val = lduw_le_p(ptr); @@ -4230,7 +3886,7 @@ void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) section = phys_page_find(addr >> TARGET_PAGE_BITS); if (!memory_region_is_ram(section->mr) || section->readonly) { - addr = section_addr(section, addr); + addr = memory_region_section_addr(section, addr); if (memory_region_is_ram(section->mr)) { section = &phys_sections[phys_section_rom]; } @@ -4238,7 +3894,7 @@ void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) } else { unsigned long addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) - + section_addr(section, addr); + + memory_region_section_addr(section, addr); ptr = qemu_get_ram_ptr(addr1); stl_p(ptr, val); @@ -4262,7 +3918,7 @@ void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val) section = phys_page_find(addr >> TARGET_PAGE_BITS); if (!memory_region_is_ram(section->mr) || section->readonly) { - addr = section_addr(section, addr); + addr = memory_region_section_addr(section, addr); if (memory_region_is_ram(section->mr)) { section = &phys_sections[phys_section_rom]; } @@ -4276,7 +3932,7 @@ void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val) } else { ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) - + section_addr(section, addr)); + + memory_region_section_addr(section, addr)); stq_p(ptr, val); } } @@ -4291,7 +3947,7 @@ static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val, section = phys_page_find(addr >> TARGET_PAGE_BITS); if (!memory_region_is_ram(section->mr) || section->readonly) { - addr = section_addr(section, addr); + addr = memory_region_section_addr(section, addr); if (memory_region_is_ram(section->mr)) { section = &phys_sections[phys_section_rom]; } @@ -4308,7 +3964,7 @@ static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val, } else { unsigned long addr1; addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) - + section_addr(section, addr); + + memory_region_section_addr(section, addr); /* RAM case */ ptr = qemu_get_ram_ptr(addr1); switch (endian) { @@ -4364,7 +4020,7 @@ static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val, section = phys_page_find(addr >> TARGET_PAGE_BITS); if (!memory_region_is_ram(section->mr) || section->readonly) { - addr = section_addr(section, addr); + addr = memory_region_section_addr(section, addr); if (memory_region_is_ram(section->mr)) { section = &phys_sections[phys_section_rom]; } @@ -4381,7 +4037,7 @@ static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val, } else { unsigned long addr1; addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) - + section_addr(section, addr); + + memory_region_section_addr(section, addr); /* RAM case */ ptr = qemu_get_ram_ptr(addr1); switch (endian) { @@ -4440,7 +4096,7 @@ void stq_be_phys(target_phys_addr_t addr, uint64_t val) } /* virtual memory access for debug (includes writing to ROM) */ -int cpu_memory_rw_debug(CPUState *env, target_ulong addr, +int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, uint8_t *buf, int len, int is_write) { int l; @@ -4471,20 +4127,20 @@ int cpu_memory_rw_debug(CPUState *env, target_ulong addr, /* in deterministic execution mode, instructions doing device I/Os must be at the end of the TB */ -void cpu_io_recompile(CPUState *env, void *retaddr) +void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr) { TranslationBlock *tb; uint32_t n, cflags; target_ulong pc, cs_base; uint64_t flags; - tb = tb_find_pc((unsigned long)retaddr); + tb = tb_find_pc(retaddr); if (!tb) { cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", - retaddr); + (void *)retaddr); } n = env->icount_decr.u16.low + tb->icount; - cpu_restore_state(tb, env, (unsigned long)retaddr); + cpu_restore_state(tb, env, retaddr); /* Calculate how many instructions had been executed before the fault occurred. */ n = n - env->icount_decr.u16.low; @@ -4582,35 +4238,6 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) tcg_dump_info(f, cpu_fprintf); } -/* NOTE: this function can trigger an exception */ -/* NOTE2: the returned address is not exactly the physical address: it - is the offset relative to phys_ram_base */ -tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr) -{ - int mmu_idx, page_index, pd; - void *p; - MemoryRegion *mr; - - page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - mmu_idx = cpu_mmu_index(env1); - if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code != - (addr & TARGET_PAGE_MASK))) { - ldub_code(addr); - } - pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK; - mr = iotlb_to_region(pd); - if (mr != &io_mem_ram && mr != &io_mem_rom - && mr != &io_mem_notdirty && !mr->rom_device) { -#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC) - cpu_unassigned_access(env1, addr, 0, 1, 0, 4); -#else - cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr); -#endif - } - p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend); - return qemu_ram_addr_from_host_nofail(p); -} - /* * A helper function for the _utterly broken_ virtio device model to find out if * it's running on a big endian machine. Don't do this at home kids! @@ -4625,24 +4252,16 @@ bool virtio_is_big_endian(void) #endif } -#define MMUSUFFIX _cmmu -#undef GETPC -#define GETPC() NULL -#define env cpu_single_env -#define SOFTMMU_CODE_ACCESS - -#define SHIFT 0 -#include "softmmu_template.h" - -#define SHIFT 1 -#include "softmmu_template.h" - -#define SHIFT 2 -#include "softmmu_template.h" +#endif -#define SHIFT 3 -#include "softmmu_template.h" +#ifndef CONFIG_USER_ONLY +bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr) +{ + MemoryRegionSection *section; -#undef env + section = phys_page_find(phys_addr >> TARGET_PAGE_BITS); + return !(memory_region_is_ram(section->mr) || + memory_region_is_romd(section->mr)); +} #endif