#include "qemu-common.h"
#include "cpu.h"
-#include "exec-all.h"
#include "tcg.h"
#include "hw/hw.h"
#include "hw/qdev.h"
#endif
#else /* !CONFIG_USER_ONLY */
#include "xen-mapcache.h"
+#include "trace.h"
#endif
//#define DEBUG_TB_INVALIDATE
#endif
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
map_exec(code_gen_prologue, sizeof(code_gen_prologue));
- code_gen_buffer_max_size = code_gen_buffer_size -
- (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
+ code_gen_buffer_max_size = code_gen_buffer_size -
+ (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
}
unsigned int n, tb_page_addr_t page_addr)
{
PageDesc *p;
- TranslationBlock *last_first_tb;
+#ifndef CONFIG_USER_ONLY
+ bool page_already_protected;
+#endif
tb->page_addr[n] = page_addr;
p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
tb->page_next[n] = p->first_tb;
- last_first_tb = p->first_tb;
+#ifndef CONFIG_USER_ONLY
+ page_already_protected = p->first_tb != NULL;
+#endif
p->first_tb = (TranslationBlock *)((long)tb | n);
invalidate_page_bitmap(p);
/* if some code is already present, then the pages are already
protected. So we handle the case where only the first TB is
allocated in a physical page */
- if (!last_first_tb) {
+ if (!page_already_protected) {
tlb_protect_code(page_addr);
}
#endif
return 0;
}
+struct last_map {
+ target_phys_addr_t start_addr;
+ ram_addr_t size;
+ ram_addr_t phys_offset;
+};
+
/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
* address. Each intermediate table provides the next L2_BITs of guest
* physical address space. The number of levels vary based on host and
* guest configuration, making it efficient to build the final guest
* physical address by seeding the L1 offset and shifting and adding in
* each L2 offset as we recurse through them. */
-static void phys_page_for_each_1(CPUPhysMemoryClient *client,
- int level, void **lp, target_phys_addr_t addr)
+static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
+ void **lp, target_phys_addr_t addr,
+ struct last_map *map)
{
int i;
addr <<= L2_BITS + TARGET_PAGE_BITS;
for (i = 0; i < L2_SIZE; ++i) {
if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
- client->set_memory(client, addr | i << TARGET_PAGE_BITS,
- TARGET_PAGE_SIZE, pd[i].phys_offset, false);
+ target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
+
+ if (map->size &&
+ start_addr == map->start_addr + map->size &&
+ pd[i].phys_offset == map->phys_offset + map->size) {
+
+ map->size += TARGET_PAGE_SIZE;
+ continue;
+ } else if (map->size) {
+ client->set_memory(client, map->start_addr,
+ map->size, map->phys_offset, false);
+ }
+
+ map->start_addr = start_addr;
+ map->size = TARGET_PAGE_SIZE;
+ map->phys_offset = pd[i].phys_offset;
}
}
} else {
void **pp = *lp;
for (i = 0; i < L2_SIZE; ++i) {
phys_page_for_each_1(client, level - 1, pp + i,
- (addr << L2_BITS) | i);
+ (addr << L2_BITS) | i, map);
}
}
}
static void phys_page_for_each(CPUPhysMemoryClient *client)
{
int i;
+ struct last_map map = { };
+
for (i = 0; i < P_L1_SIZE; ++i) {
phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
- l1_phys_map + i, i);
+ l1_phys_map + i, i, &map);
+ }
+ if (map.size) {
+ client->set_memory(client, map.start_addr, map.size, map.phys_offset,
+ false);
}
}
abort();
}
#else
- if (xen_mapcache_enabled()) {
+ if (xen_enabled()) {
xen_ram_alloc(new_block->offset, size);
} else {
new_block->host = qemu_vmalloc(size);
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
munmap(block->host, block->length);
#else
- if (xen_mapcache_enabled()) {
- qemu_invalidate_entry(block->host);
+ if (xen_enabled()) {
+ xen_invalidate_map_cache_entry(block->host);
} else {
qemu_vfree(block->host);
}
QLIST_REMOVE(block, next);
QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
}
- if (xen_mapcache_enabled()) {
+ if (xen_enabled()) {
/* We need to check if the requested address is in the RAM
* because we don't want to map the entire memory in QEMU.
+ * In that case just map until the end of the page.
*/
if (block->offset == 0) {
- return qemu_map_cache(addr, 0, 1);
+ return xen_map_cache(addr, 0, 0);
} else if (block->host == NULL) {
- block->host = xen_map_block(block->offset, block->length);
+ block->host =
+ xen_map_cache(block->offset, block->length, 1);
}
}
return block->host + (addr - block->offset);
QLIST_FOREACH(block, &ram_list.blocks, next) {
if (addr - block->offset < block->length) {
- if (xen_mapcache_enabled()) {
+ if (xen_enabled()) {
/* We need to check if the requested address is in the RAM
* because we don't want to map the entire memory in QEMU.
+ * In that case just map until the end of the page.
*/
if (block->offset == 0) {
- return qemu_map_cache(addr, 0, 1);
+ return xen_map_cache(addr, 0, 0);
} else if (block->host == NULL) {
- block->host = xen_map_block(block->offset, block->length);
+ block->host =
+ xen_map_cache(block->offset, block->length, 1);
}
}
return block->host + (addr - block->offset);
return NULL;
}
-void qemu_put_ram_ptr(void *addr)
+/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
+ * but takes a size argument */
+void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
{
- trace_qemu_put_ram_ptr(addr);
-
- if (xen_mapcache_enabled()) {
+ if (*size == 0) {
+ return NULL;
+ }
+ if (xen_enabled()) {
+ return xen_map_cache(addr, *size, 1);
+ } else {
RAMBlock *block;
QLIST_FOREACH(block, &ram_list.blocks, next) {
- if (addr == block->host) {
- break;
+ if (addr - block->offset < block->length) {
+ if (addr - block->offset + *size > block->length)
+ *size = block->length - addr + block->offset;
+ return block->host + (addr - block->offset);
}
}
- if (block && block->host) {
- xen_unmap_block(block->host, block->length);
- block->host = NULL;
- } else {
- qemu_map_cache_unlock(addr);
- }
+
+ fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
+ abort();
}
}
+void qemu_put_ram_ptr(void *addr)
+{
+ trace_qemu_put_ram_ptr(addr);
+}
+
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
{
RAMBlock *block;
uint8_t *host = ptr;
+ if (xen_enabled()) {
+ *ram_addr = xen_ram_addr_from_mapcache(ptr);
+ return 0;
+ }
+
QLIST_FOREACH(block, &ram_list.blocks, next) {
/* This case append when the block is not mapped. */
if (block->host == NULL) {
}
}
- if (xen_mapcache_enabled()) {
- *ram_addr = qemu_ram_addr_from_mapcache(ptr);
- return 0;
- }
-
return -1;
}
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 0, 0, 0, 1);
+#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
+ cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
#endif
return 0;
}
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 0, 0, 0, 2);
+#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
+ cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
#endif
return 0;
}
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 0, 0, 0, 4);
+#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
+ cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
#endif
return 0;
}
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 1, 0, 0, 1);
+#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
+ cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
#endif
}
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 1, 0, 0, 2);
+#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
+ cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
#endif
}
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
-#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 1, 0, 0, 4);
+#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
+ cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
#endif
}
int is_write)
{
target_phys_addr_t len = *plen;
- target_phys_addr_t done = 0;
+ target_phys_addr_t todo = 0;
int l;
- uint8_t *ret = NULL;
- uint8_t *ptr;
target_phys_addr_t page;
unsigned long pd;
PhysPageDesc *p;
- unsigned long addr1;
+ ram_addr_t raddr = ULONG_MAX;
+ ram_addr_t rlen;
+ void *ret;
while (len > 0) {
page = addr & TARGET_PAGE_MASK;
}
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
- if (done || bounce.buffer) {
+ if (todo || bounce.buffer) {
break;
}
bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
if (!is_write) {
cpu_physical_memory_read(addr, bounce.buffer, l);
}
- ptr = bounce.buffer;
- } else {
- addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
- ptr = qemu_get_ram_ptr(addr1);
+
+ *plen = l;
+ return bounce.buffer;
}
- if (!done) {
- ret = ptr;
- } else if (ret + done != ptr) {
- break;
+ if (!todo) {
+ raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
}
len -= l;
addr += l;
- done += l;
+ todo += l;
}
- *plen = done;
+ rlen = todo;
+ ret = qemu_ram_ptr_length(raddr, &rlen);
+ *plen = rlen;
return ret;
}
access_len -= l;
}
}
- if (xen_mapcache_enabled()) {
- uint8_t *buffer1 = buffer;
- uint8_t *end_buffer = buffer + len;
-
- while (buffer1 < end_buffer) {
- qemu_put_ram_ptr(buffer1);
- buffer1 += TARGET_PAGE_SIZE;
- }
+ if (xen_enabled()) {
+ xen_invalidate_map_cache_entry(buffer);
}
return;
}
}
/* warning: addr must be aligned */
-uint32_t ldl_phys(target_phys_addr_t addr)
+static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
+ enum device_endian endian)
{
int io_index;
uint8_t *ptr;
if (p)
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
+#if defined(TARGET_WORDS_BIGENDIAN)
+ if (endian == DEVICE_LITTLE_ENDIAN) {
+ val = bswap32(val);
+ }
+#else
+ if (endian == DEVICE_BIG_ENDIAN) {
+ val = bswap32(val);
+ }
+#endif
} else {
/* RAM case */
ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
(addr & ~TARGET_PAGE_MASK);
- val = ldl_p(ptr);
+ switch (endian) {
+ case DEVICE_LITTLE_ENDIAN:
+ val = ldl_le_p(ptr);
+ break;
+ case DEVICE_BIG_ENDIAN:
+ val = ldl_be_p(ptr);
+ break;
+ default:
+ val = ldl_p(ptr);
+ break;
+ }
}
return val;
}
+uint32_t ldl_phys(target_phys_addr_t addr)
+{
+ return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
+}
+
+uint32_t ldl_le_phys(target_phys_addr_t addr)
+{
+ return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
+}
+
+uint32_t ldl_be_phys(target_phys_addr_t addr)
+{
+ return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
+}
+
/* warning: addr must be aligned */
-uint64_t ldq_phys(target_phys_addr_t addr)
+static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
+ enum device_endian endian)
{
int io_index;
uint8_t *ptr;
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
if (p)
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+
+ /* XXX This is broken when device endian != cpu endian.
+ Fix and add "endian" variable check */
#ifdef TARGET_WORDS_BIGENDIAN
val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
/* RAM case */
ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
(addr & ~TARGET_PAGE_MASK);
- val = ldq_p(ptr);
+ switch (endian) {
+ case DEVICE_LITTLE_ENDIAN:
+ val = ldq_le_p(ptr);
+ break;
+ case DEVICE_BIG_ENDIAN:
+ val = ldq_be_p(ptr);
+ break;
+ default:
+ val = ldq_p(ptr);
+ break;
+ }
}
return val;
}
+uint64_t ldq_phys(target_phys_addr_t addr)
+{
+ return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
+}
+
+uint64_t ldq_le_phys(target_phys_addr_t addr)
+{
+ return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
+}
+
+uint64_t ldq_be_phys(target_phys_addr_t addr)
+{
+ return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
+}
+
/* XXX: optimize */
uint32_t ldub_phys(target_phys_addr_t addr)
{
}
/* warning: addr must be aligned */
-uint32_t lduw_phys(target_phys_addr_t addr)
+static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
+ enum device_endian endian)
{
int io_index;
uint8_t *ptr;
if (p)
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
+#if defined(TARGET_WORDS_BIGENDIAN)
+ if (endian == DEVICE_LITTLE_ENDIAN) {
+ val = bswap16(val);
+ }
+#else
+ if (endian == DEVICE_BIG_ENDIAN) {
+ val = bswap16(val);
+ }
+#endif
} else {
/* RAM case */
ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
(addr & ~TARGET_PAGE_MASK);
- val = lduw_p(ptr);
+ switch (endian) {
+ case DEVICE_LITTLE_ENDIAN:
+ val = lduw_le_p(ptr);
+ break;
+ case DEVICE_BIG_ENDIAN:
+ val = lduw_be_p(ptr);
+ break;
+ default:
+ val = lduw_p(ptr);
+ break;
+ }
}
return val;
}
+uint32_t lduw_phys(target_phys_addr_t addr)
+{
+ return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
+}
+
+uint32_t lduw_le_phys(target_phys_addr_t addr)
+{
+ return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
+}
+
+uint32_t lduw_be_phys(target_phys_addr_t addr)
+{
+ return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
+}
+
/* warning: addr must be aligned. The ram page is not masked as dirty
and the code inside is not invalidated. It is useful if the dirty
bits are used to track modified PTEs */
}
/* warning: addr must be aligned */
-void stl_phys(target_phys_addr_t addr, uint32_t val)
+static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
+ enum device_endian endian)
{
int io_index;
uint8_t *ptr;
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
if (p)
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+#if defined(TARGET_WORDS_BIGENDIAN)
+ if (endian == DEVICE_LITTLE_ENDIAN) {
+ val = bswap32(val);
+ }
+#else
+ if (endian == DEVICE_BIG_ENDIAN) {
+ val = bswap32(val);
+ }
+#endif
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
} else {
unsigned long addr1;
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
/* RAM case */
ptr = qemu_get_ram_ptr(addr1);
- stl_p(ptr, val);
+ switch (endian) {
+ case DEVICE_LITTLE_ENDIAN:
+ stl_le_p(ptr, val);
+ break;
+ case DEVICE_BIG_ENDIAN:
+ stl_be_p(ptr, val);
+ break;
+ default:
+ stl_p(ptr, val);
+ break;
+ }
if (!cpu_physical_memory_is_dirty(addr1)) {
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
}
}
+void stl_phys(target_phys_addr_t addr, uint32_t val)
+{
+ stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
+}
+
+void stl_le_phys(target_phys_addr_t addr, uint32_t val)
+{
+ stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
+}
+
+void stl_be_phys(target_phys_addr_t addr, uint32_t val)
+{
+ stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
+}
+
/* XXX: optimize */
void stb_phys(target_phys_addr_t addr, uint32_t val)
{
}
/* warning: addr must be aligned */
-void stw_phys(target_phys_addr_t addr, uint32_t val)
+static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
+ enum device_endian endian)
{
int io_index;
uint8_t *ptr;
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
if (p)
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+#if defined(TARGET_WORDS_BIGENDIAN)
+ if (endian == DEVICE_LITTLE_ENDIAN) {
+ val = bswap16(val);
+ }
+#else
+ if (endian == DEVICE_BIG_ENDIAN) {
+ val = bswap16(val);
+ }
+#endif
io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
} else {
unsigned long addr1;
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
/* RAM case */
ptr = qemu_get_ram_ptr(addr1);
- stw_p(ptr, val);
+ switch (endian) {
+ case DEVICE_LITTLE_ENDIAN:
+ stw_le_p(ptr, val);
+ break;
+ case DEVICE_BIG_ENDIAN:
+ stw_be_p(ptr, val);
+ break;
+ default:
+ stw_p(ptr, val);
+ break;
+ }
if (!cpu_physical_memory_is_dirty(addr1)) {
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
}
}
+void stw_phys(target_phys_addr_t addr, uint32_t val)
+{
+ stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
+}
+
+void stw_le_phys(target_phys_addr_t addr, uint32_t val)
+{
+ stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
+}
+
+void stw_be_phys(target_phys_addr_t addr, uint32_t val)
+{
+ stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
+}
+
/* XXX: optimize */
void stq_phys(target_phys_addr_t addr, uint64_t val)
{
cpu_physical_memory_write(addr, &val, 8);
}
+void stq_le_phys(target_phys_addr_t addr, uint64_t val)
+{
+ val = cpu_to_le64(val);
+ cpu_physical_memory_write(addr, &val, 8);
+}
+
+void stq_be_phys(target_phys_addr_t addr, uint64_t val)
+{
+ val = cpu_to_be64(val);
+ cpu_physical_memory_write(addr, &val, 8);
+}
+
/* virtual memory access for debug (includes writing to ROM) */
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
uint8_t *buf, int len, int is_write)