#include "kvm.h"
#if defined(CONFIG_USER_ONLY)
#include <qemu.h>
+#include <signal.h>
#endif
//#define DEBUG_TB_INVALIDATE
#endif
/* log support */
+#ifdef WIN32
+static const char *logfilename = "qemu.log";
+#else
static const char *logfilename = "/tmp/qemu.log";
+#endif
FILE *logfile;
int loglevel;
static int log_append = 0;
static void cpu_unlink_tb(CPUState *env)
{
-#if defined(CONFIG_USE_NPTL)
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
problem and hope the cpu will stop of its own accord. For userspace
emulation this often isn't actually as bad as it sounds. Often
signals are used primarily to interrupt blocking syscalls. */
-#else
TranslationBlock *tb;
static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock(&interrupt_lock);
tb = env->current_tb;
/* if the cpu is currently executing code, we must unlink it and
all the potentially executing TB */
- if (tb && !testandset(&interrupt_lock)) {
+ if (tb) {
env->current_tb = NULL;
tb_reset_jump_recursive(tb);
- resetlock(&interrupt_lock);
}
-#endif
+ spin_unlock(&interrupt_lock);
}
/* mask must never be zero, except for A20 change call */
{ 0, NULL, NULL },
};
+#ifndef CONFIG_USER_ONLY
+static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
+ = QLIST_HEAD_INITIALIZER(memory_client_list);
+
+static void cpu_notify_set_memory(target_phys_addr_t start_addr,
+ ram_addr_t size,
+ ram_addr_t phys_offset)
+{
+ CPUPhysMemoryClient *client;
+ QLIST_FOREACH(client, &memory_client_list, list) {
+ client->set_memory(client, start_addr, size, phys_offset);
+ }
+}
+
+static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
+ target_phys_addr_t end)
+{
+ CPUPhysMemoryClient *client;
+ QLIST_FOREACH(client, &memory_client_list, list) {
+ int r = client->sync_dirty_bitmap(client, start, end);
+ if (r < 0)
+ return r;
+ }
+ return 0;
+}
+
+static int cpu_notify_migration_log(int enable)
+{
+ CPUPhysMemoryClient *client;
+ QLIST_FOREACH(client, &memory_client_list, list) {
+ int r = client->migration_log(client, enable);
+ if (r < 0)
+ return r;
+ }
+ return 0;
+}
+
+static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map,
+ CPUPhysMemoryClient *client)
+{
+ PhysPageDesc *pd;
+ int l1, l2;
+
+ for (l1 = 0; l1 < L1_SIZE; ++l1) {
+ pd = phys_map[l1];
+ if (!pd) {
+ continue;
+ }
+ for (l2 = 0; l2 < L2_SIZE; ++l2) {
+ if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) {
+ continue;
+ }
+ client->set_memory(client, pd[l2].region_offset,
+ TARGET_PAGE_SIZE, pd[l2].phys_offset);
+ }
+ }
+}
+
+static void phys_page_for_each(CPUPhysMemoryClient *client)
+{
+#if TARGET_PHYS_ADDR_SPACE_BITS > 32
+
+#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
+#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
+#endif
+ void **phys_map = (void **)l1_phys_map;
+ int l1;
+ if (!l1_phys_map) {
+ return;
+ }
+ for (l1 = 0; l1 < L1_SIZE; ++l1) {
+ if (phys_map[l1]) {
+ phys_page_for_each_in_l1_map(phys_map[l1], client);
+ }
+ }
+#else
+ if (!l1_phys_map) {
+ return;
+ }
+ phys_page_for_each_in_l1_map(l1_phys_map, client);
+#endif
+}
+
+void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
+{
+ QLIST_INSERT_HEAD(&memory_client_list, client, list);
+ phys_page_for_each(client);
+}
+
+void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
+{
+ QLIST_REMOVE(client, list);
+}
+#endif
+
static int cmp1(const char *s1, int n, const char *s2)
{
if (strlen(s2) != n)
}
va_end(ap2);
va_end(ap);
+#if defined(CONFIG_USER_ONLY)
+ {
+ struct sigaction act;
+ sigfillset(&act.sa_mask);
+ act.sa_handler = SIG_DFL;
+ sigaction(SIGABRT, &act, NULL);
+ }
+#endif
abort();
}
int cpu_physical_memory_set_dirty_tracking(int enable)
{
+ int ret = 0;
in_migration = enable;
if (kvm_enabled()) {
- return kvm_set_migration_log(enable);
+ ret = kvm_set_migration_log(enable);
}
- return 0;
+ if (ret < 0) {
+ return ret;
+ }
+ ret = cpu_notify_migration_log(!!enable);
+ return ret;
}
int cpu_physical_memory_get_dirty_tracking(void)
{
int ret = 0;
- if (kvm_enabled())
+ if (kvm_enabled()) {
ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
+ }
+ if (ret < 0) {
+ return ret;
+ }
+ ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
return ret;
}
if (kvm_enabled())
kvm_set_phys_mem(start_addr, size, phys_offset);
+ cpu_notify_set_memory(start_addr, size, phys_offset);
+
if (phys_offset == IO_MEM_UNASSIGNED) {
region_offset = start_addr;
}
kvm_uncoalesce_mmio_region(addr, size);
}
+void qemu_flush_coalesced_mmio_buffer(void)
+{
+ if (kvm_enabled())
+ kvm_flush_coalesced_mmio_buffer();
+}
+
ram_addr_t qemu_ram_alloc(ram_addr_t size)
{
RAMBlock *new_block;
ram_addr_t qemu_ram_addr_from_host(void *ptr)
{
RAMBlock *prev;
- RAMBlock **prevp;
RAMBlock *block;
uint8_t *host = ptr;
prev = NULL;
- prevp = &ram_blocks;
block = ram_blocks;
while (block && (block->host > host
|| block->host + block->length <= host)) {
- if (prev)
- prevp = &prev->next;
prev = block;
block = block->next;
}
io_mem_used[i] = 1;
return i;
}
-
+ fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
return -1;
}
if (is_write) {
cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
}
- qemu_free(bounce.buffer);
+ qemu_vfree(bounce.buffer);
bounce.buffer = NULL;
cpu_notify_map_clients();
}