/* Cannot map more than that */
if (code_gen_buffer_size > (800 * 1024 * 1024))
code_gen_buffer_size = (800 * 1024 * 1024);
+#elif defined(__sparc_v9__)
+ // Map the buffer below 2G, so we can use direct calls and branches
+ flags |= MAP_FIXED;
+ addr = (void *) 0x60000000UL;
+ if (code_gen_buffer_size > (512 * 1024 * 1024)) {
+ code_gen_buffer_size = (512 * 1024 * 1024);
+ }
#endif
code_gen_buffer = mmap(addr, code_gen_buffer_size,
PROT_WRITE | PROT_READ | PROT_EXEC,
env->numa_node = 0;
QTAILQ_INIT(&env->breakpoints);
QTAILQ_INIT(&env->watchpoints);
+#ifndef CONFIG_USER_ONLY
+ env->thread_id = qemu_get_thread_id();
+#endif
*penv = env;
#if defined(CONFIG_USER_ONLY)
cpu_list_unlock();
#endif
}
+/* Allocate a new translation block. Flush the translation buffer if
+ too many translation blocks or too much generated code. */
+static TranslationBlock *tb_alloc(target_ulong pc)
+{
+ TranslationBlock *tb;
+
+ if (nb_tbs >= code_gen_max_blocks ||
+ (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
+ return NULL;
+ tb = &tbs[nb_tbs++];
+ tb->pc = pc;
+ tb->cflags = 0;
+ return tb;
+}
+
+void tb_free(TranslationBlock *tb)
+{
+ /* In practice this is mostly used for single use temporary TB
+ Ignore the hard cases and just back up if this TB happens to
+ be the last one generated. */
+ if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
+ code_gen_ptr = tb->tc_ptr;
+ nb_tbs--;
+ }
+}
+
static inline void invalidate_page_bitmap(PageDesc *p)
{
if (p->code_bitmap) {
restore the CPU state */
current_tb_modified = 1;
- cpu_restore_state(current_tb, env,
- env->mem_io_pc, NULL);
+ cpu_restore_state(current_tb, env, env->mem_io_pc);
cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
¤t_flags);
}
restore the CPU state */
current_tb_modified = 1;
- cpu_restore_state(current_tb, env, pc, puc);
+ cpu_restore_state(current_tb, env, pc);
cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
¤t_flags);
}
#endif /* TARGET_HAS_SMC */
}
-/* Allocate a new translation block. Flush the translation buffer if
- too many translation blocks or too much generated code. */
-TranslationBlock *tb_alloc(target_ulong pc)
-{
- TranslationBlock *tb;
-
- if (nb_tbs >= code_gen_max_blocks ||
- (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
- return NULL;
- tb = &tbs[nb_tbs++];
- tb->pc = pc;
- tb->cflags = 0;
- return tb;
-}
-
-void tb_free(TranslationBlock *tb)
-{
- /* In practice this is mostly used for single use temporary TB
- Ignore the hard cases and just back up if this TB happens to
- be the last one generated. */
- if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
- code_gen_ptr = tb->tc_ptr;
- nb_tbs--;
- }
-}
-
/* add a new TB and link it to the physical page tables. phys_page2 is
(-1) to indicate that only one page contains the TB. */
void tb_link_page(TranslationBlock *tb,
* If called from iothread context, wake the target cpu in
* case its halted.
*/
- if (!qemu_cpu_self(env)) {
+ if (!qemu_cpu_is_self(env)) {
qemu_cpu_kick(env);
return;
}
return ret;
}
+int cpu_physical_log_start(target_phys_addr_t start_addr,
+ ram_addr_t size)
+{
+ CPUPhysMemoryClient *client;
+ QLIST_FOREACH(client, &memory_client_list, list) {
+ if (client->log_start) {
+ int r = client->log_start(client, start_addr, size);
+ if (r < 0) {
+ return r;
+ }
+ }
+ }
+ return 0;
+}
+
+int cpu_physical_log_stop(target_phys_addr_t start_addr,
+ ram_addr_t size)
+{
+ CPUPhysMemoryClient *client;
+ QLIST_FOREACH(client, &memory_client_list, list) {
+ if (client->log_stop) {
+ int r = client->log_stop(client, start_addr, size);
+ if (r < 0) {
+ return r;
+ }
+ }
+ }
+ return 0;
+}
+
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
{
ram_addr_t ram_addr;
ram_addr_t orig_size = size;
subpage_t *subpage;
+ assert(size);
cpu_notify_set_memory(start_addr, size, phys_offset);
if (phys_offset == IO_MEM_UNASSIGNED) {
region_offset &= TARGET_PAGE_MASK;
size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
end_addr = start_addr + (target_phys_addr_t)size;
- for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
+
+ addr = start_addr;
+ do {
p = phys_page_find(addr >> TARGET_PAGE_BITS);
if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
ram_addr_t orig_memory = p->phys_offset;
}
}
region_offset += TARGET_PAGE_SIZE;
- }
+ addr += TARGET_PAGE_SIZE;
+ } while (addr != end_addr);
/* since each CPU stores ram addresses in its TLB cache, we must
reset the modified entries */
if (host) {
new_block->host = host;
+ new_block->flags |= RAM_PREALLOC_MASK;
} else {
if (mem_path) {
#if defined (__linux__) && !defined(TARGET_S390X)
QLIST_FOREACH(block, &ram_list.blocks, next) {
if (addr == block->offset) {
QLIST_REMOVE(block, next);
- if (mem_path) {
+ if (block->flags & RAM_PREALLOC_MASK) {
+ ;
+ } else if (mem_path) {
#if defined (__linux__) && !defined(TARGET_S390X)
if (block->fd) {
munmap(block->host, block->length);
} else {
qemu_vfree(block->host);
}
+#else
+ abort();
#endif
} else {
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
}
+#ifndef _WIN32
+void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
+{
+ RAMBlock *block;
+ ram_addr_t offset;
+ int flags;
+ void *area, *vaddr;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ offset = addr - block->offset;
+ if (offset < block->length) {
+ vaddr = block->host + offset;
+ if (block->flags & RAM_PREALLOC_MASK) {
+ ;
+ } else {
+ flags = MAP_FIXED;
+ munmap(vaddr, length);
+ if (mem_path) {
+#if defined(__linux__) && !defined(TARGET_S390X)
+ if (block->fd) {
+#ifdef MAP_POPULATE
+ flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
+ MAP_PRIVATE;
+#else
+ flags |= MAP_PRIVATE;
+#endif
+ area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
+ flags, block->fd, offset);
+ } else {
+ flags |= MAP_PRIVATE | MAP_ANONYMOUS;
+ area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
+ flags, -1, 0);
+ }
+#else
+ abort();
+#endif
+ } else {
+#if defined(TARGET_S390X) && defined(CONFIG_KVM)
+ flags |= MAP_SHARED | MAP_ANONYMOUS;
+ area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
+ flags, -1, 0);
+#else
+ flags |= MAP_PRIVATE | MAP_ANONYMOUS;
+ area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
+ flags, -1, 0);
+#endif
+ }
+ if (area != vaddr) {
+ fprintf(stderr, "Could not remap addr: %lx@%lx\n",
+ length, addr);
+ exit(1);
+ }
+ qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
+ }
+ return;
+ }
+ }
+}
+#endif /* !_WIN32 */
+
/* Return a host pointer to ram allocated with qemu_ram_alloc.
With the exception of the softmmu code in this file, this should
only be used for local memory (e.g. video ram) that the device owns,
QLIST_FOREACH(block, &ram_list.blocks, next) {
if (addr - block->offset < block->length) {
- QLIST_REMOVE(block, next);
- QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
+ /* Move this entry to to start of the list. */
+ if (block != QLIST_FIRST(&ram_list.blocks)) {
+ QLIST_REMOVE(block, next);
+ QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
+ }
return block->host + (addr - block->offset);
}
}
cpu_abort(env, "check_watchpoint: could not find TB for "
"pc=%p", (void *)env->mem_io_pc);
}
- cpu_restore_state(tb, env, env->mem_io_pc, NULL);
+ cpu_restore_state(tb, env, env->mem_io_pc);
tb_phys_invalidate(tb, -1);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
env->exception_index = EXCP_DEBUG;
bounce.addr = addr;
bounce.len = l;
if (!is_write) {
- cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
+ cpu_physical_memory_read(addr, bounce.buffer, l);
}
ptr = bounce.buffer;
} else {
void stq_phys(target_phys_addr_t addr, uint64_t val)
{
val = tswap64(val);
- cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
+ cpu_physical_memory_write(addr, &val, 8);
}
/* virtual memory access for debug (includes writing to ROM) */
retaddr);
}
n = env->icount_decr.u16.low + tb->icount;
- cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
+ cpu_restore_state(tb, env, (unsigned long)retaddr);
/* Calculate how many instructions had been executed before the fault
occurred. */
n = n - env->icount_decr.u16.low;