/* any access to the tbs or the page table must use this lock */
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
-#if defined(__arm__) || defined(__sparc_v9__)
+#if defined(__arm__) || defined(__sparc__)
/* The prologue must be reachable with a direct jump. ARM and Sparc64
have limited branch ranges (possibly also PPC) so place it in a
section close to code segment. */
static MemoryRegion io_mem_watch;
#endif
-/* log support */
-#ifdef WIN32
-static const char *logfilename = "qemu.log";
-#else
-static const char *logfilename = "/tmp/qemu.log";
-#endif
-FILE *logfile;
-int loglevel;
-static int log_append = 0;
-
/* statistics */
static int tb_flush_count;
static int tb_phys_invalidate_count;
/* Cannot map more than that */
if (code_gen_buffer_size > (800 * 1024 * 1024))
code_gen_buffer_size = (800 * 1024 * 1024);
-#elif defined(__sparc_v9__)
+#elif defined(__sparc__) && HOST_LONG_BITS == 64
// Map the buffer below 2G, so we can use direct calls and branches
- flags |= MAP_FIXED;
- start = (void *) 0x60000000UL;
+ start = (void *) 0x40000000UL;
if (code_gen_buffer_size > (512 * 1024 * 1024))
code_gen_buffer_size = (512 * 1024 * 1024);
#elif defined(__arm__)
/* Cannot map more than that */
if (code_gen_buffer_size > (800 * 1024 * 1024))
code_gen_buffer_size = (800 * 1024 * 1024);
-#elif defined(__sparc_v9__)
+#elif defined(__sparc__) && HOST_LONG_BITS == 64
// Map the buffer below 2G, so we can use direct calls and branches
- flags |= MAP_FIXED;
- addr = (void *) 0x60000000UL;
+ addr = (void *) 0x40000000UL;
if (code_gen_buffer_size > (512 * 1024 * 1024)) {
code_gen_buffer_size = (512 * 1024 * 1024);
}
#endif
}
-/* enable or disable low levels log */
-void cpu_set_log(int log_flags)
-{
- loglevel = log_flags;
- if (loglevel && !logfile) {
- logfile = fopen(logfilename, log_append ? "a" : "w");
- if (!logfile) {
- perror(logfilename);
- _exit(1);
- }
-#if !defined(CONFIG_SOFTMMU)
- /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
- {
- static char logfile_buf[4096];
- setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
- }
-#elif defined(_WIN32)
- /* Win32 doesn't support line-buffering, so use unbuffered output. */
- setvbuf(logfile, NULL, _IONBF, 0);
-#else
- setvbuf(logfile, NULL, _IOLBF, 0);
-#endif
- log_append = 1;
- }
- if (!loglevel && logfile) {
- fclose(logfile);
- logfile = NULL;
- }
-}
-
-void cpu_set_log_filename(const char *filename)
-{
- logfilename = strdup(filename);
- if (logfile) {
- fclose(logfile);
- logfile = NULL;
- }
- cpu_set_log(loglevel);
-}
-
static void cpu_unlink_tb(CPUArchState *env)
{
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
cpu_unlink_tb(env);
}
-const CPULogItem cpu_log_items[] = {
- { CPU_LOG_TB_OUT_ASM, "out_asm",
- "show generated host assembly code for each compiled TB" },
- { CPU_LOG_TB_IN_ASM, "in_asm",
- "show target assembly code for each compiled TB" },
- { CPU_LOG_TB_OP, "op",
- "show micro ops for each compiled TB" },
- { CPU_LOG_TB_OP_OPT, "op_opt",
- "show micro ops "
-#ifdef TARGET_I386
- "before eflags optimization and "
-#endif
- "after liveness analysis" },
- { CPU_LOG_INT, "int",
- "show interrupts/exceptions in short format" },
- { CPU_LOG_EXEC, "exec",
- "show trace before each executed TB (lots of logs)" },
- { CPU_LOG_TB_CPU, "cpu",
- "show CPU state before block translation" },
-#ifdef TARGET_I386
- { CPU_LOG_PCALL, "pcall",
- "show protected mode far calls/returns/exceptions" },
- { CPU_LOG_RESET, "cpu_reset",
- "show CPU state before CPU resets" },
-#endif
-#ifdef DEBUG_IOPORT
- { CPU_LOG_IOPORT, "ioport",
- "show all i/o ports accesses" },
-#endif
- { 0, NULL, NULL },
-};
-
-static int cmp1(const char *s1, int n, const char *s2)
-{
- if (strlen(s2) != n)
- return 0;
- return memcmp(s1, s2, n) == 0;
-}
-
-/* takes a comma separated list of log masks. Return 0 if error. */
-int cpu_str_to_log_mask(const char *str)
-{
- const CPULogItem *item;
- int mask;
- const char *p, *p1;
-
- p = str;
- mask = 0;
- for(;;) {
- p1 = strchr(p, ',');
- if (!p1)
- p1 = p + strlen(p);
- if(cmp1(p,p1-p,"all")) {
- for(item = cpu_log_items; item->mask != 0; item++) {
- mask |= item->mask;
- }
- } else {
- for(item = cpu_log_items; item->mask != 0; item++) {
- if (cmp1(p, p1 - p, item->name))
- goto found;
- }
- return 0;
- }
- found:
- mask |= item->mask;
- if (*p1 != ',')
- break;
- p = p1 + 1;
- }
- return mask;
-}
-
void cpu_abort(CPUArchState *env, const char *fmt, ...)
{
va_list ap;
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
}
+static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
+ uintptr_t length)
+{
+ uintptr_t start1;
+
+ /* we modify the TLB cache so that the dirty bit will be set again
+ when accessing the range */
+ start1 = (uintptr_t)qemu_safe_ram_ptr(start);
+ /* Check that we don't span multiple blocks - this breaks the
+ address comparisons below. */
+ if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
+ != (end - 1) - start) {
+ abort();
+ }
+ cpu_tlb_reset_dirty_all(start1, length);
+
+}
+
/* Note: start and end must be within the same ram block. */
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
int dirty_flags)
{
- uintptr_t length, start1;
+ uintptr_t length;
start &= TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
return;
cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
- /* we modify the TLB cache so that the dirty bit will be set again
- when accessing the range */
- start1 = (uintptr_t)qemu_safe_ram_ptr(start);
- /* Check that we don't span multiple blocks - this breaks the
- address comparisons below. */
- if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
- != (end - 1) - start) {
- abort();
+ if (tcg_enabled()) {
+ tlb_reset_dirty_range_all(start, end, length);
}
- cpu_tlb_reset_dirty_all(start1, length);
}
int cpu_physical_memory_set_dirty_tracking(int enable)
phys_sections_nb = 0;
}
-/* register physical memory.
- For RAM, 'size' must be a multiple of the target page size.
- If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
- io memory page. The address used when calling the IO function is
- the offset from the start of the region, plus region_offset. Both
- start_addr and region_offset are rounded down to a page boundary
- before calculating this offset. This should not be a problem unless
- the low bits of start_addr and region_offset differ. */
static void register_subpage(MemoryRegionSection *section)
{
subpage_t *subpage;
subpage = container_of(existing->mr, subpage_t, iomem);
}
start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
- end = start + section->size;
+ end = start + section->size - 1;
subpage_register(subpage, start, end, phys_section_add(section));
}
remain.offset_within_address_space += now.size;
remain.offset_within_region += now.size;
}
- now = remain;
- now.size &= TARGET_PAGE_MASK;
- if (now.size) {
- register_multipage(&now);
+ while (remain.size >= TARGET_PAGE_SIZE) {
+ now = remain;
+ if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
+ now.size = TARGET_PAGE_SIZE;
+ register_subpage(&now);
+ } else {
+ now.size &= TARGET_PAGE_MASK;
+ register_multipage(&now);
+ }
remain.size -= now.size;
remain.offset_within_address_space += now.size;
remain.offset_within_region += now.size;
return last;
}
+static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
+{
+ int ret;
+ QemuOpts *machine_opts;
+
+ /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
+ machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
+ if (machine_opts &&
+ !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
+ ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
+ if (ret) {
+ perror("qemu_madvise");
+ fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
+ "but dump_guest_core=off specified\n");
+ }
+ }
+}
+
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
{
RAMBlock *new_block, *block;
assert(new_block);
assert(!new_block->idstr[0]);
- if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
- char *id = dev->parent_bus->info->get_dev_path(dev);
+ if (dev) {
+ char *id = qdev_get_dev_path(dev);
if (id) {
snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
g_free(id);
}
}
+static int memory_try_enable_merging(void *addr, size_t len)
+{
+ QemuOpts *opts;
+
+ opts = qemu_opts_find(qemu_find_opts("machine"), 0);
+ if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
+ /* disabled by the user */
+ return 0;
+ }
+
+ return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
+}
+
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr)
{
new_block->host = file_ram_alloc(new_block, size, mem_path);
if (!new_block->host) {
new_block->host = qemu_vmalloc(size);
- qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
+ memory_try_enable_merging(new_block->host, size);
}
#else
fprintf(stderr, "-mem-path option unsupported\n");
exit(1);
#endif
} else {
-#if defined(TARGET_S390X) && defined(CONFIG_KVM)
- /* S390 KVM requires the topmost vma of the RAM to be smaller than
- an system defined value, which is at least 256GB. Larger systems
- have larger values. We put the guest between the end of data
- segment (system break) and this value. We use 32GB as a base to
- have enough room for the system break to grow. */
- new_block->host = mmap((void*)0x800000000, size,
- PROT_EXEC|PROT_READ|PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
- if (new_block->host == MAP_FAILED) {
- fprintf(stderr, "Allocating RAM failed\n");
- abort();
- }
-#else
if (xen_enabled()) {
xen_ram_alloc(new_block->offset, size, mr);
+ } else if (kvm_enabled()) {
+ /* some s390/kvm configurations have special constraints */
+ new_block->host = kvm_vmalloc(size);
} else {
new_block->host = qemu_vmalloc(size);
}
-#endif
- qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
+ memory_try_enable_merging(new_block->host, size);
}
}
new_block->length = size;
ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
last_ram_offset() >> TARGET_PAGE_BITS);
memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
- 0xff, size >> TARGET_PAGE_BITS);
+ 0, size >> TARGET_PAGE_BITS);
+ cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
+
+ qemu_ram_setup_dump(new_block->host, size);
if (kvm_enabled())
kvm_setup_guest_memory(new_block->host, size);
length, addr);
exit(1);
}
- qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
+ memory_try_enable_merging(vaddr, length);
+ qemu_ram_setup_dump(vaddr, length);
}
return;
}
static void core_eventfd_add(MemoryListener *listener,
MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd)
+ bool match_data, uint64_t data, EventNotifier *e)
{
}
static void core_eventfd_del(MemoryListener *listener,
MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd)
+ bool match_data, uint64_t data, EventNotifier *e)
{
}
static void io_eventfd_add(MemoryListener *listener,
MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd)
+ bool match_data, uint64_t data, EventNotifier *e)
{
}
static void io_eventfd_del(MemoryListener *listener,
MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd)
+ bool match_data, uint64_t data, EventNotifier *e)
{
}
/* ROM/RAM case */
ptr = qemu_get_ram_ptr(addr1);
memcpy(ptr, buf, l);
+ if (!cpu_physical_memory_is_dirty(addr1)) {
+ /* invalidate code */
+ tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
+ /* set dirty bit */
+ cpu_physical_memory_set_dirty_flags(
+ addr1, (0xff & ~CODE_DIRTY_FLAG));
+ }
qemu_put_ram_ptr(ptr);
}
len -= l;