X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/4f25ac5f425d435d0c841ab876adfb541cb521a3..8f16753fd6b584f34d47e102d290dbf4d5c94d46:/exec.c diff --git a/exec.c b/exec.c index db9ff5515a..983c0db3f7 100644 --- a/exec.c +++ b/exec.c @@ -517,7 +517,8 @@ static void code_gen_alloc(unsigned long tb_size) exit(1); } } -#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) +#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \ + || defined(__DragonFly__) || defined(__OpenBSD__) { int flags; void *addr = NULL; @@ -530,6 +531,13 @@ static void code_gen_alloc(unsigned long tb_size) /* Cannot map more than that */ if (code_gen_buffer_size > (800 * 1024 * 1024)) code_gen_buffer_size = (800 * 1024 * 1024); +#elif defined(__sparc_v9__) + // Map the buffer below 2G, so we can use direct calls and branches + flags |= MAP_FIXED; + addr = (void *) 0x60000000UL; + if (code_gen_buffer_size > (512 * 1024 * 1024)) { + code_gen_buffer_size = (512 * 1024 * 1024); + } #endif code_gen_buffer = mmap(addr, code_gen_buffer_size, PROT_WRITE | PROT_READ | PROT_EXEC, @@ -630,6 +638,9 @@ void cpu_exec_init(CPUState *env) env->numa_node = 0; QTAILQ_INIT(&env->breakpoints); QTAILQ_INIT(&env->watchpoints); +#ifndef CONFIG_USER_ONLY + env->thread_id = qemu_get_thread_id(); +#endif *penv = env; #if defined(CONFIG_USER_ONLY) cpu_list_unlock(); @@ -641,6 +652,32 @@ void cpu_exec_init(CPUState *env) #endif } +/* Allocate a new translation block. Flush the translation buffer if + too many translation blocks or too much generated code. */ +static TranslationBlock *tb_alloc(target_ulong pc) +{ + TranslationBlock *tb; + + if (nb_tbs >= code_gen_max_blocks || + (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size) + return NULL; + tb = &tbs[nb_tbs++]; + tb->pc = pc; + tb->cflags = 0; + return tb; +} + +void tb_free(TranslationBlock *tb) +{ + /* In practice this is mostly used for single use temporary TB + Ignore the hard cases and just back up if this TB happens to + be the last one generated. */ + if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) { + code_gen_ptr = tb->tc_ptr; + nb_tbs--; + } +} + static inline void invalidate_page_bitmap(PageDesc *p) { if (p->code_bitmap) { @@ -1218,32 +1255,6 @@ static inline void tb_alloc_page(TranslationBlock *tb, #endif /* TARGET_HAS_SMC */ } -/* Allocate a new translation block. Flush the translation buffer if - too many translation blocks or too much generated code. */ -TranslationBlock *tb_alloc(target_ulong pc) -{ - TranslationBlock *tb; - - if (nb_tbs >= code_gen_max_blocks || - (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size) - return NULL; - tb = &tbs[nb_tbs++]; - tb->pc = pc; - tb->cflags = 0; - return tb; -} - -void tb_free(TranslationBlock *tb) -{ - /* In practice this is mostly used for single use temporary TB - Ignore the hard cases and just back up if this TB happens to - be the last one generated. */ - if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) { - code_gen_ptr = tb->tc_ptr; - nb_tbs--; - } -} - /* add a new TB and link it to the physical page tables. phys_page2 is (-1) to indicate that only one page contains the TB. */ void tb_link_page(TranslationBlock *tb, @@ -1632,7 +1643,7 @@ void cpu_interrupt(CPUState *env, int mask) * If called from iothread context, wake the target cpu in * case its halted. */ - if (!qemu_cpu_self(env)) { + if (!qemu_cpu_is_self(env)) { qemu_cpu_kick(env); return; } @@ -2030,10 +2041,10 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, /* we modify the TLB cache so that the dirty bit will be set again when accessing the range */ - start1 = (unsigned long)qemu_get_ram_ptr(start); + start1 = (unsigned long)qemu_safe_ram_ptr(start); /* Chek that we don't span multiple blocks - this breaks the address comparisons below. */ - if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1 + if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1 != (end - 1) - start) { abort(); } @@ -2070,6 +2081,36 @@ int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, return ret; } +int cpu_physical_log_start(target_phys_addr_t start_addr, + ram_addr_t size) +{ + CPUPhysMemoryClient *client; + QLIST_FOREACH(client, &memory_client_list, list) { + if (client->log_start) { + int r = client->log_start(client, start_addr, size); + if (r < 0) { + return r; + } + } + } + return 0; +} + +int cpu_physical_log_stop(target_phys_addr_t start_addr, + ram_addr_t size) +{ + CPUPhysMemoryClient *client; + QLIST_FOREACH(client, &memory_client_list, list) { + if (client->log_stop) { + int r = client->log_stop(client, start_addr, size); + if (r < 0) { + return r; + } + } + } + return 0; +} + static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) { ram_addr_t ram_addr; @@ -2570,6 +2611,7 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr, ram_addr_t orig_size = size; subpage_t *subpage; + assert(size); cpu_notify_set_memory(start_addr, size, phys_offset); if (phys_offset == IO_MEM_UNASSIGNED) { @@ -2578,7 +2620,9 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr, region_offset &= TARGET_PAGE_MASK; size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; end_addr = start_addr + (target_phys_addr_t)size; - for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { + + addr = start_addr; + do { p = phys_page_find(addr >> TARGET_PAGE_BITS); if (p && p->phys_offset != IO_MEM_UNASSIGNED) { ram_addr_t orig_memory = p->phys_offset; @@ -2630,7 +2674,8 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr, } } region_offset += TARGET_PAGE_SIZE; - } + addr += TARGET_PAGE_SIZE; + } while (addr != end_addr); /* since each CPU stores ram addresses in its TLB cache, we must reset the modified entries */ @@ -2829,6 +2874,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, if (host) { new_block->host = host; + new_block->flags |= RAM_PREALLOC_MASK; } else { if (mem_path) { #if defined (__linux__) && !defined(TARGET_S390X) @@ -2882,7 +2928,9 @@ void qemu_ram_free(ram_addr_t addr) QLIST_FOREACH(block, &ram_list.blocks, next) { if (addr == block->offset) { QLIST_REMOVE(block, next); - if (mem_path) { + if (block->flags & RAM_PREALLOC_MASK) { + ; + } else if (mem_path) { #if defined (__linux__) && !defined(TARGET_S390X) if (block->fd) { munmap(block->host, block->length); @@ -2890,6 +2938,8 @@ void qemu_ram_free(ram_addr_t addr) } else { qemu_vfree(block->host); } +#else + abort(); #endif } else { #if defined(TARGET_S390X) && defined(CONFIG_KVM) @@ -2905,6 +2955,66 @@ void qemu_ram_free(ram_addr_t addr) } +#ifndef _WIN32 +void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) +{ + RAMBlock *block; + ram_addr_t offset; + int flags; + void *area, *vaddr; + + QLIST_FOREACH(block, &ram_list.blocks, next) { + offset = addr - block->offset; + if (offset < block->length) { + vaddr = block->host + offset; + if (block->flags & RAM_PREALLOC_MASK) { + ; + } else { + flags = MAP_FIXED; + munmap(vaddr, length); + if (mem_path) { +#if defined(__linux__) && !defined(TARGET_S390X) + if (block->fd) { +#ifdef MAP_POPULATE + flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED : + MAP_PRIVATE; +#else + flags |= MAP_PRIVATE; +#endif + area = mmap(vaddr, length, PROT_READ | PROT_WRITE, + flags, block->fd, offset); + } else { + flags |= MAP_PRIVATE | MAP_ANONYMOUS; + area = mmap(vaddr, length, PROT_READ | PROT_WRITE, + flags, -1, 0); + } +#else + abort(); +#endif + } else { +#if defined(TARGET_S390X) && defined(CONFIG_KVM) + flags |= MAP_SHARED | MAP_ANONYMOUS; + area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE, + flags, -1, 0); +#else + flags |= MAP_PRIVATE | MAP_ANONYMOUS; + area = mmap(vaddr, length, PROT_READ | PROT_WRITE, + flags, -1, 0); +#endif + } + if (area != vaddr) { + fprintf(stderr, "Could not remap addr: %lx@%lx\n", + length, addr); + exit(1); + } + qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE); + } + return; + } + } +} +#endif /* !_WIN32 */ + /* Return a host pointer to ram allocated with qemu_ram_alloc. With the exception of the softmmu code in this file, this should only be used for local memory (e.g. video ram) that the device owns, @@ -2919,8 +3029,30 @@ void *qemu_get_ram_ptr(ram_addr_t addr) QLIST_FOREACH(block, &ram_list.blocks, next) { if (addr - block->offset < block->length) { - QLIST_REMOVE(block, next); - QLIST_INSERT_HEAD(&ram_list.blocks, block, next); + /* Move this entry to to start of the list. */ + if (block != QLIST_FIRST(&ram_list.blocks)) { + QLIST_REMOVE(block, next); + QLIST_INSERT_HEAD(&ram_list.blocks, block, next); + } + return block->host + (addr - block->offset); + } + } + + fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); + abort(); + + return NULL; +} + +/* Return a host pointer to ram allocated with qemu_ram_alloc. + * Same as qemu_get_ram_ptr but avoid reordering ramblocks. + */ +void *qemu_safe_ram_ptr(ram_addr_t addr) +{ + RAMBlock *block; + + QLIST_FOREACH(block, &ram_list.blocks, next) { + if (addr - block->offset < block->length) { return block->host + (addr - block->offset); } } @@ -3312,7 +3444,8 @@ static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys, mmio = qemu_mallocz(sizeof(subpage_t)); mmio->base = base; - subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio); + subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio, + DEVICE_NATIVE_ENDIAN); #if defined(DEBUG_SUBPAGE) printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, mmio, base, TARGET_PAGE_SIZE, subpage_memory); @@ -3336,6 +3469,106 @@ static int get_free_io_mem_idx(void) return -1; } +/* + * Usually, devices operate in little endian mode. There are devices out + * there that operate in big endian too. Each device gets byte swapped + * mmio if plugged onto a CPU that does the other endianness. + * + * CPU Device swap? + * + * little little no + * little big yes + * big little yes + * big big no + */ + +typedef struct SwapEndianContainer { + CPUReadMemoryFunc *read[3]; + CPUWriteMemoryFunc *write[3]; + void *opaque; +} SwapEndianContainer; + +static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr) +{ + uint32_t val; + SwapEndianContainer *c = opaque; + val = c->read[0](c->opaque, addr); + return val; +} + +static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr) +{ + uint32_t val; + SwapEndianContainer *c = opaque; + val = bswap16(c->read[1](c->opaque, addr)); + return val; +} + +static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr) +{ + uint32_t val; + SwapEndianContainer *c = opaque; + val = bswap32(c->read[2](c->opaque, addr)); + return val; +} + +static CPUReadMemoryFunc * const swapendian_readfn[3]={ + swapendian_mem_readb, + swapendian_mem_readw, + swapendian_mem_readl +}; + +static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr, + uint32_t val) +{ + SwapEndianContainer *c = opaque; + c->write[0](c->opaque, addr, val); +} + +static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr, + uint32_t val) +{ + SwapEndianContainer *c = opaque; + c->write[1](c->opaque, addr, bswap16(val)); +} + +static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr, + uint32_t val) +{ + SwapEndianContainer *c = opaque; + c->write[2](c->opaque, addr, bswap32(val)); +} + +static CPUWriteMemoryFunc * const swapendian_writefn[3]={ + swapendian_mem_writeb, + swapendian_mem_writew, + swapendian_mem_writel +}; + +static void swapendian_init(int io_index) +{ + SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer)); + int i; + + /* Swap mmio for big endian targets */ + c->opaque = io_mem_opaque[io_index]; + for (i = 0; i < 3; i++) { + c->read[i] = io_mem_read[io_index][i]; + c->write[i] = io_mem_write[io_index][i]; + + io_mem_read[io_index][i] = swapendian_readfn[i]; + io_mem_write[io_index][i] = swapendian_writefn[i]; + } + io_mem_opaque[io_index] = c; +} + +static void swapendian_del(int io_index) +{ + if (io_mem_read[io_index][0] == swapendian_readfn[0]) { + qemu_free(io_mem_opaque[io_index]); + } +} + /* mem_read and mem_write are arrays of functions containing the function to access byte (index 0), word (index 1) and dword (index 2). Functions can be omitted with a NULL function pointer. @@ -3346,7 +3579,7 @@ static int get_free_io_mem_idx(void) static int cpu_register_io_memory_fixed(int io_index, CPUReadMemoryFunc * const *mem_read, CPUWriteMemoryFunc * const *mem_write, - void *opaque) + void *opaque, enum device_endian endian) { int i; @@ -3370,14 +3603,30 @@ static int cpu_register_io_memory_fixed(int io_index, } io_mem_opaque[io_index] = opaque; + switch (endian) { + case DEVICE_BIG_ENDIAN: +#ifndef TARGET_WORDS_BIGENDIAN + swapendian_init(io_index); +#endif + break; + case DEVICE_LITTLE_ENDIAN: +#ifdef TARGET_WORDS_BIGENDIAN + swapendian_init(io_index); +#endif + break; + case DEVICE_NATIVE_ENDIAN: + default: + break; + } + return (io_index << IO_MEM_SHIFT); } int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read, CPUWriteMemoryFunc * const *mem_write, - void *opaque) + void *opaque, enum device_endian endian) { - return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque); + return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian); } void cpu_unregister_io_memory(int io_table_address) @@ -3385,6 +3634,8 @@ void cpu_unregister_io_memory(int io_table_address) int i; int io_index = io_table_address >> IO_MEM_SHIFT; + swapendian_del(io_index); + for (i=0;i < 3; i++) { io_mem_read[io_index][i] = unassigned_mem_read[i]; io_mem_write[io_index][i] = unassigned_mem_write[i]; @@ -3397,14 +3648,21 @@ static void io_mem_init(void) { int i; - cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL); - cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL); - cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL); + cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, + unassigned_mem_write, NULL, + DEVICE_NATIVE_ENDIAN); + cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, + unassigned_mem_write, NULL, + DEVICE_NATIVE_ENDIAN); + cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, + notdirty_mem_write, NULL, + DEVICE_NATIVE_ENDIAN); for (i=0; i<5; i++) io_mem_used[i] = 1; io_mem_watch = cpu_register_io_memory(watch_mem_read, - watch_mem_write, NULL); + watch_mem_write, NULL, + DEVICE_NATIVE_ENDIAN); } #endif /* !defined(CONFIG_USER_ONLY) */