#endif
/* log support */
+#ifdef WIN32
+static const char *logfilename = "qemu.log";
+#else
static const char *logfilename = "/tmp/qemu.log";
+#endif
FILE *logfile;
int loglevel;
static int log_append = 0;
exit(1);
}
}
-#elif defined(__FreeBSD__) || defined(__DragonFly__)
+#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
{
int flags;
void *addr = NULL;
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
-static void cpu_common_pre_save(const void *opaque)
+static void cpu_common_pre_save(void *opaque)
{
- CPUState *env = (void *)opaque;
+ CPUState *env = opaque;
cpu_synchronize_state(env);
}
static void cpu_unlink_tb(CPUState *env)
{
-#if defined(CONFIG_USE_NPTL)
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
problem and hope the cpu will stop of its own accord. For userspace
emulation this often isn't actually as bad as it sounds. Often
signals are used primarily to interrupt blocking syscalls. */
-#else
TranslationBlock *tb;
static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
tb = env->current_tb;
/* if the cpu is currently executing code, we must unlink it and
all the potentially executing TB */
- if (tb && !testandset(&interrupt_lock)) {
+ if (tb) {
+ spin_lock(&interrupt_lock);
env->current_tb = NULL;
tb_reset_jump_recursive(tb);
- resetlock(&interrupt_lock);
+ spin_unlock(&interrupt_lock);
}
-#endif
}
/* mask must never be zero, except for A20 change call */
} \
} while (0)
-/* register physical memory. 'size' must be a multiple of the target
- page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
+/* register physical memory.
+ For RAM, 'size' must be a multiple of the target page size.
+ If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
io memory page. The address used when calling the IO function is
the offset from the start of the region, plus region_offset. Both
start_addr and region_offset are rounded down to a page boundary
size = TARGET_PAGE_ALIGN(size);
new_block = qemu_malloc(sizeof(*new_block));
+#if defined(TARGET_S390X) && defined(CONFIG_KVM)
+ /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
+ new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+#else
new_block->host = qemu_vmalloc(size);
+#endif
+#ifdef MADV_MERGEABLE
+ madvise(new_block->host, size, MADV_MERGEABLE);
+#endif
new_block->offset = last_ram_offset;
new_block->length = size;
io_mem_used[i] = 1;
return i;
}
-
+ fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
return -1;
}