spin_unlock(&interrupt_lock);
}
+#ifndef CONFIG_USER_ONLY
/* mask must never be zero, except for A20 change call */
-void cpu_interrupt(CPUState *env, int mask)
+static void tcg_handle_interrupt(CPUState *env, int mask)
{
int old_mask;
old_mask = env->interrupt_request;
env->interrupt_request |= mask;
-#ifndef CONFIG_USER_ONLY
/*
* If called from iothread context, wake the target cpu in
* case its halted.
qemu_cpu_kick(env);
return;
}
-#endif
if (use_icount) {
env->icount_decr.u16.high = 0xffff;
-#ifndef CONFIG_USER_ONLY
if (!can_do_io(env)
&& (mask & ~old_mask) != 0) {
cpu_abort(env, "Raised interrupt while not in I/O function");
}
-#endif
} else {
cpu_unlink_tb(env);
}
}
+CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
+
+#else /* CONFIG_USER_ONLY */
+
+void cpu_interrupt(CPUState *env, int mask)
+{
+ env->interrupt_request |= mask;
+ cpu_unlink_tb(env);
+}
+#endif /* CONFIG_USER_ONLY */
+
void cpu_reset_interrupt(CPUState *env, int mask)
{
env->interrupt_request &= ~mask;
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
ram_addr_t size,
- ram_addr_t phys_offset)
+ ram_addr_t phys_offset,
+ bool log_dirty)
{
CPUPhysMemoryClient *client;
QLIST_FOREACH(client, &memory_client_list, list) {
- client->set_memory(client, start_addr, size, phys_offset);
+ client->set_memory(client, start_addr, size, phys_offset, log_dirty);
}
}
return 0;
}
+/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
+ * address. Each intermediate table provides the next L2_BITs of guest
+ * physical address space. The number of levels vary based on host and
+ * guest configuration, making it efficient to build the final guest
+ * physical address by seeding the L1 offset and shifting and adding in
+ * each L2 offset as we recurse through them. */
static void phys_page_for_each_1(CPUPhysMemoryClient *client,
- int level, void **lp)
+ int level, void **lp, target_phys_addr_t addr)
{
int i;
}
if (level == 0) {
PhysPageDesc *pd = *lp;
+ addr <<= L2_BITS + TARGET_PAGE_BITS;
for (i = 0; i < L2_SIZE; ++i) {
if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
- client->set_memory(client, pd[i].region_offset,
- TARGET_PAGE_SIZE, pd[i].phys_offset);
+ client->set_memory(client, addr | i << TARGET_PAGE_BITS,
+ TARGET_PAGE_SIZE, pd[i].phys_offset, false);
}
}
} else {
void **pp = *lp;
for (i = 0; i < L2_SIZE; ++i) {
- phys_page_for_each_1(client, level - 1, pp + i);
+ phys_page_for_each_1(client, level - 1, pp + i,
+ (addr << L2_BITS) | i);
}
}
}
int i;
for (i = 0; i < P_L1_SIZE; ++i) {
phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
- l1_phys_map + 1);
+ l1_phys_map + i, i);
}
}
start_addr and region_offset are rounded down to a page boundary
before calculating this offset. This should not be a problem unless
the low bits of start_addr and region_offset differ. */
-void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
+void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
ram_addr_t size,
ram_addr_t phys_offset,
- ram_addr_t region_offset)
+ ram_addr_t region_offset,
+ bool log_dirty)
{
target_phys_addr_t addr, end_addr;
PhysPageDesc *p;
subpage_t *subpage;
assert(size);
- cpu_notify_set_memory(start_addr, size, phys_offset);
+ cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
if (phys_offset == IO_MEM_UNASSIGNED) {
region_offset = start_addr;