#include "qemu-common.h"
#include "cpu.h"
-#include "exec-all.h"
#include "tcg.h"
#include "hw/hw.h"
#include "hw/qdev.h"
#include "kvm.h"
#include "hw/xen.h"
#include "qemu-timer.h"
+#include "memory.h"
+#include "exec-memory.h"
#if defined(CONFIG_USER_ONLY)
#include <qemu.h>
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
int phys_ram_fd;
static int in_migration;
-RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
+RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
+
+static MemoryRegion *system_memory;
+static MemoryRegion *system_io;
+
#endif
CPUState *first_cpu;
/* current CPU in the current thread. It is only valid inside
cpu_exec() */
-CPUState *cpu_single_env;
+DEFINE_TLS(CPUState *,cpu_single_env);
/* 0 = Do not count executed instructions.
1 = Precise instruction counting.
2 = Adaptive rate instruction counting. */
int use_icount = 0;
-/* Current instruction counter. While executing translated code this may
- include some instructions that have not yet been executed. */
-int64_t qemu_icount;
typedef struct PageDesc {
/* list of TBs intersecting this ram page */
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
unsigned long qemu_real_host_page_size;
-unsigned long qemu_host_page_bits;
unsigned long qemu_host_page_size;
unsigned long qemu_host_page_mask;
static void *l1_phys_map[P_L1_SIZE];
static void io_mem_init(void);
+static void memory_map_init(void);
/* io memory support */
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
qemu_host_page_size = qemu_real_host_page_size;
if (qemu_host_page_size < TARGET_PAGE_SIZE)
qemu_host_page_size = TARGET_PAGE_SIZE;
- qemu_host_page_bits = 0;
- while ((1 << qemu_host_page_bits) < qemu_host_page_size)
- qemu_host_page_bits++;
qemu_host_page_mask = ~(qemu_host_page_size - 1);
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
int i;
#if defined(CONFIG_USER_ONLY)
- /* We can't use qemu_malloc because it may recurse into a locked mutex. */
+ /* We can't use g_malloc because it may recurse into a locked mutex. */
# define ALLOC(P, SIZE) \
do { \
P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
} while (0)
#else
# define ALLOC(P, SIZE) \
- do { P = qemu_mallocz(SIZE); } while (0)
+ do { P = g_malloc0(SIZE); } while (0)
#endif
/* Level 1. Always allocated. */
if (!alloc) {
return NULL;
}
- *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
+ *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
}
lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
}
return NULL;
}
- *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
+ *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
for (i = 0; i < L2_SIZE; i++) {
pd[i].phys_offset = IO_MEM_UNASSIGNED;
code_gen_buffer_size = tb_size;
if (code_gen_buffer_size == 0) {
#if defined(CONFIG_USER_ONLY)
- /* in user mode, phys_ram_size is not meaningful */
code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
#else
/* XXX: needs adjustments */
}
}
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
- || defined(__DragonFly__) || defined(__OpenBSD__)
+ || defined(__DragonFly__) || defined(__OpenBSD__) \
+ || defined(__NetBSD__)
{
int flags;
void *addr = NULL;
}
}
#else
- code_gen_buffer = qemu_malloc(code_gen_buffer_size);
+ code_gen_buffer = g_malloc(code_gen_buffer_size);
map_exec(code_gen_buffer, code_gen_buffer_size);
#endif
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
map_exec(code_gen_prologue, sizeof(code_gen_prologue));
- code_gen_buffer_max_size = code_gen_buffer_size -
- (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
+ code_gen_buffer_max_size = code_gen_buffer_size -
+ (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
- tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
+ tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
}
/* Must be called before using the QEMU cpus. 'tb_size' is the size
(in bytes) allocated to the translation buffer. Zero means default
size. */
-void cpu_exec_init_all(unsigned long tb_size)
+void tcg_exec_init(unsigned long tb_size)
{
cpu_gen_init();
code_gen_alloc(tb_size);
code_gen_ptr = code_gen_buffer;
page_init();
-#if !defined(CONFIG_USER_ONLY)
- io_mem_init();
-#endif
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
/* There's no guest base to take into account, so go ahead and
initialize the prologue now. */
#endif
}
+bool tcg_enabled(void)
+{
+ return code_gen_buffer != NULL;
+}
+
+void cpu_exec_init_all(void)
+{
+#if !defined(CONFIG_USER_ONLY)
+ memory_map_init();
+ io_mem_init();
+#endif
+}
+
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
static int cpu_common_post_load(void *opaque, int version_id)
static inline void invalidate_page_bitmap(PageDesc *p)
{
if (p->code_bitmap) {
- qemu_free(p->code_bitmap);
+ g_free(p->code_bitmap);
p->code_bitmap = NULL;
}
p->code_write_count = 0;
int n, tb_start, tb_end;
TranslationBlock *tb;
- p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
+ p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
tb = p->first_tb;
while (tb != NULL) {
unsigned int n, tb_page_addr_t page_addr)
{
PageDesc *p;
- TranslationBlock *last_first_tb;
+#ifndef CONFIG_USER_ONLY
+ bool page_already_protected;
+#endif
tb->page_addr[n] = page_addr;
p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
tb->page_next[n] = p->first_tb;
- last_first_tb = p->first_tb;
+#ifndef CONFIG_USER_ONLY
+ page_already_protected = p->first_tb != NULL;
+#endif
p->first_tb = (TranslationBlock *)((long)tb | n);
invalidate_page_bitmap(p);
/* if some code is already present, then the pages are already
protected. So we handle the case where only the first TB is
allocated in a physical page */
- if (!last_first_tb) {
+ if (!page_already_protected) {
tlb_protect_code(page_addr);
}
#endif
TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
return -EINVAL;
}
- wp = qemu_malloc(sizeof(*wp));
+ wp = g_malloc(sizeof(*wp));
wp->vaddr = addr;
wp->len_mask = len_mask;
tlb_flush_page(env, watchpoint->vaddr);
- qemu_free(watchpoint);
+ g_free(watchpoint);
}
/* Remove all matching watchpoints. */
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp;
- bp = qemu_malloc(sizeof(*bp));
+ bp = g_malloc(sizeof(*bp));
bp->pc = pc;
bp->flags = flags;
breakpoint_invalidate(env, breakpoint->pc);
- qemu_free(breakpoint);
+ g_free(breakpoint);
#endif
}
static ram_addr_t find_ram_offset(ram_addr_t size)
{
RAMBlock *block, *next_block;
- ram_addr_t offset = 0, mingap = ULONG_MAX;
+ ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
if (QLIST_EMPTY(&ram_list.blocks))
return 0;
QLIST_FOREACH(block, &ram_list.blocks, next) {
- ram_addr_t end, next = ULONG_MAX;
+ ram_addr_t end, next = RAM_ADDR_MAX;
end = block->offset + block->length;
}
}
if (next - end >= size && next - end < mingap) {
- offset = end;
+ offset = end;
mingap = next - end;
}
}
+
+ if (offset == RAM_ADDR_MAX) {
+ fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
+ (uint64_t)size);
+ abort();
+ }
+
return offset;
}
RAMBlock *new_block, *block;
size = TARGET_PAGE_ALIGN(size);
- new_block = qemu_mallocz(sizeof(*new_block));
+ new_block = g_malloc0(sizeof(*new_block));
if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
char *id = dev->parent_bus->info->get_dev_path(dev);
if (id) {
snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
- qemu_free(id);
+ g_free(id);
}
}
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
abort();
}
#else
- if (xen_mapcache_enabled()) {
+ if (xen_enabled()) {
xen_ram_alloc(new_block->offset, size);
} else {
new_block->host = qemu_vmalloc(size);
QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
- ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
+ ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
last_ram_offset() >> TARGET_PAGE_BITS);
memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
0xff, size >> TARGET_PAGE_BITS);
QLIST_FOREACH(block, &ram_list.blocks, next) {
if (addr == block->offset) {
QLIST_REMOVE(block, next);
- qemu_free(block);
+ g_free(block);
return;
}
}
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
munmap(block->host, block->length);
#else
- if (xen_mapcache_enabled()) {
- qemu_invalidate_entry(block->host);
+ if (xen_enabled()) {
+ xen_invalidate_map_cache_entry(block->host);
} else {
qemu_vfree(block->host);
}
#endif
}
- qemu_free(block);
+ g_free(block);
return;
}
}
#endif
}
if (area != vaddr) {
- fprintf(stderr, "Could not remap addr: %lx@%lx\n",
+ fprintf(stderr, "Could not remap addr: "
+ RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
length, addr);
exit(1);
}
QLIST_REMOVE(block, next);
QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
}
- if (xen_mapcache_enabled()) {
+ if (xen_enabled()) {
/* We need to check if the requested address is in the RAM
* because we don't want to map the entire memory in QEMU.
* In that case just map until the end of the page.
*/
if (block->offset == 0) {
- return qemu_map_cache(addr, 0, 0);
+ return xen_map_cache(addr, 0, 0);
} else if (block->host == NULL) {
- block->host = qemu_map_cache(block->offset, block->length, 1);
+ block->host =
+ xen_map_cache(block->offset, block->length, 1);
}
}
return block->host + (addr - block->offset);
QLIST_FOREACH(block, &ram_list.blocks, next) {
if (addr - block->offset < block->length) {
- if (xen_mapcache_enabled()) {
+ if (xen_enabled()) {
/* We need to check if the requested address is in the RAM
* because we don't want to map the entire memory in QEMU.
* In that case just map until the end of the page.
*/
if (block->offset == 0) {
- return qemu_map_cache(addr, 0, 0);
+ return xen_map_cache(addr, 0, 0);
} else if (block->host == NULL) {
- block->host = qemu_map_cache(block->offset, block->length, 1);
+ block->host =
+ xen_map_cache(block->offset, block->length, 1);
}
}
return block->host + (addr - block->offset);
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
* but takes a size argument */
-void *qemu_ram_ptr_length(target_phys_addr_t addr, target_phys_addr_t *size)
+void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
{
- if (xen_mapcache_enabled())
- return qemu_map_cache(addr, *size, 1);
- else {
+ if (*size == 0) {
+ return NULL;
+ }
+ if (xen_enabled()) {
+ return xen_map_cache(addr, *size, 1);
+ } else {
RAMBlock *block;
QLIST_FOREACH(block, &ram_list.blocks, next) {
fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
abort();
-
- *size = 0;
- return NULL;
}
}
RAMBlock *block;
uint8_t *host = ptr;
- if (xen_mapcache_enabled()) {
- *ram_addr = qemu_ram_addr_from_mapcache(ptr);
+ if (xen_enabled()) {
+ *ram_addr = xen_ram_addr_from_mapcache(ptr);
return 0;
}
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 0, 0, 0, 1);
+ cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
#endif
return 0;
}
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 0, 0, 0, 2);
+ cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
#endif
return 0;
}
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 0, 0, 0, 4);
+ cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
#endif
return 0;
}
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 1, 0, 0, 1);
+ cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
#endif
}
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 1, 0, 0, 2);
+ cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
#endif
}
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
#endif
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- do_unassigned_access(addr, 1, 0, 0, 4);
+ cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
#endif
}
subpage_t *mmio;
int subpage_memory;
- mmio = qemu_mallocz(sizeof(subpage_t));
+ mmio = g_malloc0(sizeof(subpage_t));
mmio->base = base;
subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
static void swapendian_init(int io_index)
{
- SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
+ SwapEndianContainer *c = g_malloc(sizeof(SwapEndianContainer));
int i;
/* Swap mmio for big endian targets */
static void swapendian_del(int io_index)
{
if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
- qemu_free(io_mem_opaque[io_index]);
+ g_free(io_mem_opaque[io_index]);
}
}
DEVICE_NATIVE_ENDIAN);
}
+static void memory_map_init(void)
+{
+ system_memory = g_malloc(sizeof(*system_memory));
+ memory_region_init(system_memory, "system", INT64_MAX);
+ set_system_memory_map(system_memory);
+
+ system_io = g_malloc(sizeof(*system_io));
+ memory_region_init(system_io, "io", 65536);
+ set_system_io_map(system_io);
+}
+
+MemoryRegion *get_system_memory(void)
+{
+ return system_memory;
+}
+
+MemoryRegion *get_system_io(void)
+{
+ return system_io;
+}
+
#endif /* !defined(CONFIG_USER_ONLY) */
/* physical memory access (slow version, mainly for debug) */
uint8_t *ptr;
uint32_t val;
target_phys_addr_t page;
- unsigned long pd;
+ ram_addr_t pd;
PhysPageDesc *p;
while (len > 0) {
l = 1;
}
} else {
- unsigned long addr1;
+ ram_addr_t addr1;
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
/* RAM case */
ptr = qemu_get_ram_ptr(addr1);
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
{
- MapClient *client = qemu_malloc(sizeof(*client));
+ MapClient *client = g_malloc(sizeof(*client));
client->opaque = opaque;
client->callback = callback;
MapClient *client = (MapClient *)_client;
QLIST_REMOVE(client, link);
- qemu_free(client);
+ g_free(client);
}
static void cpu_notify_map_clients(void)
target_phys_addr_t page;
unsigned long pd;
PhysPageDesc *p;
- target_phys_addr_t addr1 = addr;
+ ram_addr_t raddr = RAM_ADDR_MAX;
+ ram_addr_t rlen;
+ void *ret;
while (len > 0) {
page = addr & TARGET_PAGE_MASK;
*plen = l;
return bounce.buffer;
}
+ if (!todo) {
+ raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ }
len -= l;
addr += l;
todo += l;
}
- *plen = todo;
- return qemu_ram_ptr_length(addr1, plen);
+ rlen = todo;
+ ret = qemu_ram_ptr_length(raddr, &rlen);
+ *plen = rlen;
+ return ret;
}
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
access_len -= l;
}
}
- if (xen_mapcache_enabled()) {
- qemu_invalidate_entry(buffer);
+ if (xen_enabled()) {
+ xen_invalidate_map_cache_entry(buffer);
}
return;
}
}
/* warning: addr must be aligned */
-uint32_t ldl_phys(target_phys_addr_t addr)
+static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
+ enum device_endian endian)
{
int io_index;
uint8_t *ptr;
if (p)
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
+#if defined(TARGET_WORDS_BIGENDIAN)
+ if (endian == DEVICE_LITTLE_ENDIAN) {
+ val = bswap32(val);
+ }
+#else
+ if (endian == DEVICE_BIG_ENDIAN) {
+ val = bswap32(val);
+ }
+#endif
} else {
/* RAM case */
ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
(addr & ~TARGET_PAGE_MASK);
- val = ldl_p(ptr);
+ switch (endian) {
+ case DEVICE_LITTLE_ENDIAN:
+ val = ldl_le_p(ptr);
+ break;
+ case DEVICE_BIG_ENDIAN:
+ val = ldl_be_p(ptr);
+ break;
+ default:
+ val = ldl_p(ptr);
+ break;
+ }
}
return val;
}
+uint32_t ldl_phys(target_phys_addr_t addr)
+{
+ return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
+}
+
+uint32_t ldl_le_phys(target_phys_addr_t addr)
+{
+ return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
+}
+
+uint32_t ldl_be_phys(target_phys_addr_t addr)
+{
+ return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
+}
+
/* warning: addr must be aligned */
-uint64_t ldq_phys(target_phys_addr_t addr)
+static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
+ enum device_endian endian)
{
int io_index;
uint8_t *ptr;
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
if (p)
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+
+ /* XXX This is broken when device endian != cpu endian.
+ Fix and add "endian" variable check */
#ifdef TARGET_WORDS_BIGENDIAN
val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
/* RAM case */
ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
(addr & ~TARGET_PAGE_MASK);
- val = ldq_p(ptr);
+ switch (endian) {
+ case DEVICE_LITTLE_ENDIAN:
+ val = ldq_le_p(ptr);
+ break;
+ case DEVICE_BIG_ENDIAN:
+ val = ldq_be_p(ptr);
+ break;
+ default:
+ val = ldq_p(ptr);
+ break;
+ }
}
return val;
}
+uint64_t ldq_phys(target_phys_addr_t addr)
+{
+ return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
+}
+
+uint64_t ldq_le_phys(target_phys_addr_t addr)
+{
+ return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
+}
+
+uint64_t ldq_be_phys(target_phys_addr_t addr)
+{
+ return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
+}
+
/* XXX: optimize */
uint32_t ldub_phys(target_phys_addr_t addr)
{
}
/* warning: addr must be aligned */
-uint32_t lduw_phys(target_phys_addr_t addr)
+static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
+ enum device_endian endian)
{
int io_index;
uint8_t *ptr;
if (p)
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
+#if defined(TARGET_WORDS_BIGENDIAN)
+ if (endian == DEVICE_LITTLE_ENDIAN) {
+ val = bswap16(val);
+ }
+#else
+ if (endian == DEVICE_BIG_ENDIAN) {
+ val = bswap16(val);
+ }
+#endif
} else {
/* RAM case */
ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
(addr & ~TARGET_PAGE_MASK);
- val = lduw_p(ptr);
+ switch (endian) {
+ case DEVICE_LITTLE_ENDIAN:
+ val = lduw_le_p(ptr);
+ break;
+ case DEVICE_BIG_ENDIAN:
+ val = lduw_be_p(ptr);
+ break;
+ default:
+ val = lduw_p(ptr);
+ break;
+ }
}
return val;
}
+uint32_t lduw_phys(target_phys_addr_t addr)
+{
+ return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
+}
+
+uint32_t lduw_le_phys(target_phys_addr_t addr)
+{
+ return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
+}
+
+uint32_t lduw_be_phys(target_phys_addr_t addr)
+{
+ return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
+}
+
/* warning: addr must be aligned. The ram page is not masked as dirty
and the code inside is not invalidated. It is useful if the dirty
bits are used to track modified PTEs */
}
/* warning: addr must be aligned */
-void stl_phys(target_phys_addr_t addr, uint32_t val)
+static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
+ enum device_endian endian)
{
int io_index;
uint8_t *ptr;
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
if (p)
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+#if defined(TARGET_WORDS_BIGENDIAN)
+ if (endian == DEVICE_LITTLE_ENDIAN) {
+ val = bswap32(val);
+ }
+#else
+ if (endian == DEVICE_BIG_ENDIAN) {
+ val = bswap32(val);
+ }
+#endif
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
} else {
unsigned long addr1;
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
/* RAM case */
ptr = qemu_get_ram_ptr(addr1);
- stl_p(ptr, val);
+ switch (endian) {
+ case DEVICE_LITTLE_ENDIAN:
+ stl_le_p(ptr, val);
+ break;
+ case DEVICE_BIG_ENDIAN:
+ stl_be_p(ptr, val);
+ break;
+ default:
+ stl_p(ptr, val);
+ break;
+ }
if (!cpu_physical_memory_is_dirty(addr1)) {
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
}
}
+void stl_phys(target_phys_addr_t addr, uint32_t val)
+{
+ stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
+}
+
+void stl_le_phys(target_phys_addr_t addr, uint32_t val)
+{
+ stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
+}
+
+void stl_be_phys(target_phys_addr_t addr, uint32_t val)
+{
+ stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
+}
+
/* XXX: optimize */
void stb_phys(target_phys_addr_t addr, uint32_t val)
{
}
/* warning: addr must be aligned */
-void stw_phys(target_phys_addr_t addr, uint32_t val)
+static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
+ enum device_endian endian)
{
int io_index;
uint8_t *ptr;
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
if (p)
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+#if defined(TARGET_WORDS_BIGENDIAN)
+ if (endian == DEVICE_LITTLE_ENDIAN) {
+ val = bswap16(val);
+ }
+#else
+ if (endian == DEVICE_BIG_ENDIAN) {
+ val = bswap16(val);
+ }
+#endif
io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
} else {
unsigned long addr1;
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
/* RAM case */
ptr = qemu_get_ram_ptr(addr1);
- stw_p(ptr, val);
+ switch (endian) {
+ case DEVICE_LITTLE_ENDIAN:
+ stw_le_p(ptr, val);
+ break;
+ case DEVICE_BIG_ENDIAN:
+ stw_be_p(ptr, val);
+ break;
+ default:
+ stw_p(ptr, val);
+ break;
+ }
if (!cpu_physical_memory_is_dirty(addr1)) {
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
}
}
+void stw_phys(target_phys_addr_t addr, uint32_t val)
+{
+ stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
+}
+
+void stw_le_phys(target_phys_addr_t addr, uint32_t val)
+{
+ stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
+}
+
+void stw_be_phys(target_phys_addr_t addr, uint32_t val)
+{
+ stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
+}
+
/* XXX: optimize */
void stq_phys(target_phys_addr_t addr, uint64_t val)
{
cpu_physical_memory_write(addr, &val, 8);
}
+void stq_le_phys(target_phys_addr_t addr, uint64_t val)
+{
+ val = cpu_to_le64(val);
+ cpu_physical_memory_write(addr, &val, 8);
+}
+
+void stq_be_phys(target_phys_addr_t addr, uint64_t val)
+{
+ val = cpu_to_be64(val);
+ cpu_physical_memory_write(addr, &val, 8);
+}
+
/* virtual memory access for debug (includes writing to ROM) */
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
uint8_t *buf, int len, int is_write)
}
#define MMUSUFFIX _cmmu
+#undef GETPC
#define GETPC() NULL
#define env cpu_single_env
#define SOFTMMU_CODE_ACCESS