#include "trace.h"
#endif
+#define WANT_EXEC_OBSOLETE
+#include "exec-obsolete.h"
+
//#define DEBUG_TB_INVALIDATE
//#define DEBUG_FLUSH
//#define DEBUG_TLB
static MemoryRegion *system_memory;
static MemoryRegion *system_io;
+MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
+static MemoryRegion io_mem_subpage_ram;
+
#endif
CPUState *first_cpu;
/* current CPU in the current thread. It is only valid inside
cpu_exec() */
-CPUState *cpu_single_env;
+DEFINE_TLS(CPUState *,cpu_single_env);
/* 0 = Do not count executed instructions.
1 = Precise instruction counting.
2 = Adaptive rate instruction counting. */
int use_icount = 0;
-/* Current instruction counter. While executing translated code this may
- include some instructions that have not yet been executed. */
-int64_t qemu_icount;
typedef struct PageDesc {
/* list of TBs intersecting this ram page */
#define L2_BITS 10
#define L2_SIZE (1 << L2_BITS)
+#define P_L2_LEVELS \
+ (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
+
/* The bits remaining after N lower levels of page tables. */
-#define P_L1_BITS_REM \
- ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
#define V_L1_BITS_REM \
((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
-/* Size of the L1 page table. Avoid silly small sizes. */
-#if P_L1_BITS_REM < 4
-#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
-#else
-#define P_L1_BITS P_L1_BITS_REM
-#endif
-
#if V_L1_BITS_REM < 4
#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
#else
#define V_L1_BITS V_L1_BITS_REM
#endif
-#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
-#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
unsigned long qemu_real_host_page_size;
static void *l1_map[V_L1_SIZE];
#if !defined(CONFIG_USER_ONLY)
-typedef struct PhysPageDesc {
- /* offset in host memory of the page + io_index in the low bits */
- ram_addr_t phys_offset;
- ram_addr_t region_offset;
-} PhysPageDesc;
+typedef struct PhysPageEntry PhysPageEntry;
+
+static MemoryRegionSection *phys_sections;
+static unsigned phys_sections_nb, phys_sections_nb_alloc;
+static uint16_t phys_section_unassigned;
+
+struct PhysPageEntry {
+ union {
+ uint16_t leaf; /* index into phys_sections */
+ uint16_t node; /* index into phys_map_nodes */
+ } u;
+};
+
+/* Simple allocator for PhysPageEntry nodes */
+static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
+static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
+
+#define PHYS_MAP_NODE_NIL ((uint16_t)~0)
/* This is a multi-level map on the physical address space.
- The bottom level has pointers to PhysPageDesc. */
-static void *l1_phys_map[P_L1_SIZE];
+ The bottom level has pointers to MemoryRegionSections. */
+static PhysPageEntry phys_map = { .u.node = PHYS_MAP_NODE_NIL };
static void io_mem_init(void);
static void memory_map_init(void);
/* io memory support */
-CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
-CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
-void *io_mem_opaque[IO_MEM_NB_ENTRIES];
+MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
static char io_mem_used[IO_MEM_NB_ENTRIES];
-static int io_mem_watch;
+static MemoryRegion io_mem_watch;
#endif
/* log support */
}
#if !defined(CONFIG_USER_ONLY)
-static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
+
+static void phys_map_node_reserve(unsigned nodes)
{
- PhysPageDesc *pd;
- void **lp;
- int i;
+ if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
+ typedef PhysPageEntry Node[L2_SIZE];
+ phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
+ phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
+ phys_map_nodes_nb + nodes);
+ phys_map_nodes = g_renew(Node, phys_map_nodes,
+ phys_map_nodes_nb_alloc);
+ }
+}
- /* Level 1. Always allocated. */
- lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
+static uint16_t phys_map_node_alloc(void)
+{
+ unsigned i;
+ uint16_t ret;
- /* Level 2..N-1. */
- for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
- void **p = *lp;
- if (p == NULL) {
- if (!alloc) {
- return NULL;
- }
- *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
- }
- lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
+ ret = phys_map_nodes_nb++;
+ assert(ret != PHYS_MAP_NODE_NIL);
+ assert(ret != phys_map_nodes_nb_alloc);
+ for (i = 0; i < L2_SIZE; ++i) {
+ phys_map_nodes[ret][i].u.node = PHYS_MAP_NODE_NIL;
}
+ return ret;
+}
- pd = *lp;
- if (pd == NULL) {
- int i;
+static void phys_map_nodes_reset(void)
+{
+ phys_map_nodes_nb = 0;
+}
- if (!alloc) {
- return NULL;
- }
- *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
+static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t index,
+ uint16_t leaf, int level)
+{
+ PhysPageEntry *p;
+ int i;
- for (i = 0; i < L2_SIZE; i++) {
- pd[i].phys_offset = IO_MEM_UNASSIGNED;
- pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
+ if (lp->u.node == PHYS_MAP_NODE_NIL) {
+ lp->u.node = phys_map_node_alloc();
+ p = phys_map_nodes[lp->u.node];
+ if (level == 0) {
+ for (i = 0; i < L2_SIZE; i++) {
+ p[i].u.leaf = phys_section_unassigned;
+ }
}
+ } else {
+ p = phys_map_nodes[lp->u.node];
}
+ lp = &p[(index >> (level * L2_BITS)) & (L2_SIZE - 1)];
- return pd + (index & (L2_SIZE - 1));
+ if (level == 0) {
+ lp->u.leaf = leaf;
+ } else {
+ phys_page_set_level(lp, index, leaf, level - 1);
+ }
}
-static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
+static void phys_page_set(target_phys_addr_t index, uint16_t leaf)
{
- return phys_page_find_alloc(index, 0);
+ phys_map_node_reserve(P_L2_LEVELS);
+
+ phys_page_set_level(&phys_map, index, leaf, P_L2_LEVELS - 1);
+}
+
+static MemoryRegionSection phys_page_find(target_phys_addr_t index)
+{
+ PhysPageEntry lp = phys_map;
+ PhysPageEntry *p;
+ int i;
+ MemoryRegionSection section;
+ target_phys_addr_t delta;
+ uint16_t s_index = phys_section_unassigned;
+
+ for (i = P_L2_LEVELS - 1; i >= 0; i--) {
+ if (lp.u.node == PHYS_MAP_NODE_NIL) {
+ goto not_found;
+ }
+ p = phys_map_nodes[lp.u.node];
+ lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
+ }
+
+ s_index = lp.u.leaf;
+not_found:
+ section = phys_sections[s_index];
+ index <<= TARGET_PAGE_BITS;
+ assert(section.offset_within_address_space <= index
+ && index <= section.offset_within_address_space + section.size-1);
+ delta = index - section.offset_within_address_space;
+ section.offset_within_address_space += delta;
+ section.offset_within_region += delta;
+ section.size -= delta;
+ return section;
}
static void tlb_protect_code(ram_addr_t ram_addr);
code_gen_buffer_size = tb_size;
if (code_gen_buffer_size == 0) {
#if defined(CONFIG_USER_ONLY)
- /* in user mode, phys_ram_size is not meaningful */
code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
#else
/* XXX: needs adjustments */
if (code_gen_buffer_size > (512 * 1024 * 1024))
code_gen_buffer_size = (512 * 1024 * 1024);
#elif defined(__arm__)
- /* Map the buffer below 32M, so we can use direct calls and branches */
- flags |= MAP_FIXED;
- start = (void *) 0x01000000UL;
+ /* Keep the buffer no bigger than 16MB to branch between blocks */
if (code_gen_buffer_size > 16 * 1024 * 1024)
code_gen_buffer_size = 16 * 1024 * 1024;
#elif defined(__s390x__)
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
{
target_phys_addr_t addr;
- target_ulong pd;
ram_addr_t ram_addr;
- PhysPageDesc *p;
+ MemoryRegionSection section;
addr = cpu_get_phys_page_debug(env, pc);
- p = phys_page_find(addr >> TARGET_PAGE_BITS);
- if (!p) {
- pd = IO_MEM_UNASSIGNED;
- } else {
- pd = p->phys_offset;
+ section = phys_page_find(addr >> TARGET_PAGE_BITS);
+ if (!(memory_region_is_ram(section.mr)
+ || (section.mr->rom_device && section.mr->readable))) {
+ return;
}
- ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
+ ram_addr = (memory_region_get_ram_addr(section.mr)
+ + section.offset_within_region) & TARGET_PAGE_MASK;
+ ram_addr |= (pc & ~TARGET_PAGE_MASK);
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
}
#endif
static char logfile_buf[4096];
setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
}
-#elif !defined(_WIN32)
- /* Win32 doesn't support line-buffering and requires size >= 2 */
+#elif defined(_WIN32)
+ /* Win32 doesn't support line-buffering, so use unbuffered output. */
+ setvbuf(logfile, NULL, _IONBF, 0);
+#else
setvbuf(logfile, NULL, _IOLBF, 0);
#endif
log_append = 1;
{ 0, NULL, NULL },
};
-#ifndef CONFIG_USER_ONLY
-static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
- = QLIST_HEAD_INITIALIZER(memory_client_list);
-
-static void cpu_notify_set_memory(target_phys_addr_t start_addr,
- ram_addr_t size,
- ram_addr_t phys_offset,
- bool log_dirty)
-{
- CPUPhysMemoryClient *client;
- QLIST_FOREACH(client, &memory_client_list, list) {
- client->set_memory(client, start_addr, size, phys_offset, log_dirty);
- }
-}
-
-static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
- target_phys_addr_t end)
-{
- CPUPhysMemoryClient *client;
- QLIST_FOREACH(client, &memory_client_list, list) {
- int r = client->sync_dirty_bitmap(client, start, end);
- if (r < 0)
- return r;
- }
- return 0;
-}
-
-static int cpu_notify_migration_log(int enable)
-{
- CPUPhysMemoryClient *client;
- QLIST_FOREACH(client, &memory_client_list, list) {
- int r = client->migration_log(client, enable);
- if (r < 0)
- return r;
- }
- return 0;
-}
-
-struct last_map {
- target_phys_addr_t start_addr;
- ram_addr_t size;
- ram_addr_t phys_offset;
-};
-
-/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
- * address. Each intermediate table provides the next L2_BITs of guest
- * physical address space. The number of levels vary based on host and
- * guest configuration, making it efficient to build the final guest
- * physical address by seeding the L1 offset and shifting and adding in
- * each L2 offset as we recurse through them. */
-static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
- void **lp, target_phys_addr_t addr,
- struct last_map *map)
-{
- int i;
-
- if (*lp == NULL) {
- return;
- }
- if (level == 0) {
- PhysPageDesc *pd = *lp;
- addr <<= L2_BITS + TARGET_PAGE_BITS;
- for (i = 0; i < L2_SIZE; ++i) {
- if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
- target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
-
- if (map->size &&
- start_addr == map->start_addr + map->size &&
- pd[i].phys_offset == map->phys_offset + map->size) {
-
- map->size += TARGET_PAGE_SIZE;
- continue;
- } else if (map->size) {
- client->set_memory(client, map->start_addr,
- map->size, map->phys_offset, false);
- }
-
- map->start_addr = start_addr;
- map->size = TARGET_PAGE_SIZE;
- map->phys_offset = pd[i].phys_offset;
- }
- }
- } else {
- void **pp = *lp;
- for (i = 0; i < L2_SIZE; ++i) {
- phys_page_for_each_1(client, level - 1, pp + i,
- (addr << L2_BITS) | i, map);
- }
- }
-}
-
-static void phys_page_for_each(CPUPhysMemoryClient *client)
-{
- int i;
- struct last_map map = { };
-
- for (i = 0; i < P_L1_SIZE; ++i) {
- phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
- l1_phys_map + i, i, &map);
- }
- if (map.size) {
- client->set_memory(client, map.start_addr, map.size, map.phys_offset,
- false);
- }
-}
-
-void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
-{
- QLIST_INSERT_HEAD(&memory_client_list, client, list);
- phys_page_for_each(client);
-}
-
-void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
-{
- QLIST_REMOVE(client, list);
-}
-#endif
-
static int cmp1(const char *s1, int n, const char *s2)
{
if (strlen(s2) != n)
.addend = -1,
};
-/* NOTE: if flush_global is true, also flush global entries (not
- implemented yet) */
+/* NOTE:
+ * If flush_global is true (the usual case), flush all tlb entries.
+ * If flush_global is false, flush (at least) all tlb entries not
+ * marked global.
+ *
+ * Since QEMU doesn't currently implement a global/not-global flag
+ * for tlb entries, at the moment tlb_flush() will also flush all
+ * tlb entries in the flush_global == false case. This is OK because
+ * CPU architectures generally permit an implementation to drop
+ * entries from the TLB at any time, so flushing more entries than
+ * required is only an efficiency issue, not a correctness issue.
+ */
void tlb_flush(CPUState *env, int flush_global)
{
int i;
unsigned long start, unsigned long length)
{
unsigned long addr;
- if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
+ if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
if ((addr - start) < length) {
tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
{
int ret = 0;
in_migration = enable;
- ret = cpu_notify_migration_log(!!enable);
return ret;
}
-int cpu_physical_memory_get_dirty_tracking(void)
-{
- return in_migration;
-}
-
-int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
- target_phys_addr_t end_addr)
-{
- int ret;
-
- ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
- return ret;
-}
-
-int cpu_physical_log_start(target_phys_addr_t start_addr,
- ram_addr_t size)
-{
- CPUPhysMemoryClient *client;
- QLIST_FOREACH(client, &memory_client_list, list) {
- if (client->log_start) {
- int r = client->log_start(client, start_addr, size);
- if (r < 0) {
- return r;
- }
- }
- }
- return 0;
-}
-
-int cpu_physical_log_stop(target_phys_addr_t start_addr,
- ram_addr_t size)
-{
- CPUPhysMemoryClient *client;
- QLIST_FOREACH(client, &memory_client_list, list) {
- if (client->log_stop) {
- int r = client->log_stop(client, start_addr, size);
- if (r < 0) {
- return r;
- }
- }
- }
- return 0;
-}
-
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
{
ram_addr_t ram_addr;
void *p;
- if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
+ if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
+ tlb_entry->addend);
ram_addr = qemu_ram_addr_from_host_nofail(p);
env->tlb_flush_mask = mask;
}
+static bool is_ram_rom(MemoryRegionSection *s)
+{
+ return memory_region_is_ram(s->mr);
+}
+
+static bool is_romd(MemoryRegionSection *s)
+{
+ MemoryRegion *mr = s->mr;
+
+ return mr->rom_device && mr->readable;
+}
+
+static bool is_ram_rom_romd(MemoryRegionSection *s)
+{
+ return is_ram_rom(s) || is_romd(s);
+}
+
/* Add a new TLB entry. At most one entry for a given virtual address
is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
supplied size is only used by tlb_flush_page. */
target_phys_addr_t paddr, int prot,
int mmu_idx, target_ulong size)
{
- PhysPageDesc *p;
- unsigned long pd;
+ MemoryRegionSection section;
unsigned int index;
target_ulong address;
target_ulong code_address;
if (size != TARGET_PAGE_SIZE) {
tlb_add_large_page(env, vaddr, size);
}
- p = phys_page_find(paddr >> TARGET_PAGE_BITS);
- if (!p) {
- pd = IO_MEM_UNASSIGNED;
- } else {
- pd = p->phys_offset;
- }
+ section = phys_page_find(paddr >> TARGET_PAGE_BITS);
#if defined(DEBUG_TLB)
printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
" prot=%x idx=%d pd=0x%08lx\n",
#endif
address = vaddr;
- if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
+ if (!is_ram_rom_romd(§ion)) {
/* IO memory case (romd handled later) */
address |= TLB_MMIO;
}
- addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
- if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
+ if (is_ram_rom_romd(§ion)) {
+ addend = (unsigned long)(memory_region_get_ram_ptr(section.mr)
+ + section.offset_within_region);
+ } else {
+ addend = 0;
+ }
+ if (is_ram_rom(§ion)) {
/* Normal RAM. */
- iotlb = pd & TARGET_PAGE_MASK;
- if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
- iotlb |= IO_MEM_NOTDIRTY;
+ iotlb = (memory_region_get_ram_addr(section.mr)
+ + section.offset_within_region) & TARGET_PAGE_MASK;
+ if (!section.readonly)
+ iotlb |= io_mem_notdirty.ram_addr;
else
- iotlb |= IO_MEM_ROM;
+ iotlb |= io_mem_rom.ram_addr;
} else {
/* IO handlers are currently passed a physical address.
It would be nice to pass an offset from the base address
and avoid full address decoding in every device.
We can't use the high bits of pd for this because
IO_MEM_ROMD uses these as a ram address. */
- iotlb = (pd & ~TARGET_PAGE_MASK);
- if (p) {
- iotlb += p->region_offset;
- } else {
- iotlb += paddr;
- }
+ iotlb = memory_region_get_ram_addr(section.mr) & ~TARGET_PAGE_MASK;
+ iotlb += section.offset_within_region;
}
code_address = address;
if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
/* Avoid trapping reads of pages with a write breakpoint. */
if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
- iotlb = io_mem_watch + paddr;
+ iotlb = io_mem_watch.ram_addr + paddr;
address |= TLB_MMIO;
break;
}
te->addr_code = -1;
}
if (prot & PAGE_WRITE) {
- if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
- (pd & IO_MEM_ROMD)) {
+ if ((memory_region_is_ram(section.mr) && section.readonly)
+ || is_romd(§ion)) {
/* Write access calls the I/O callback. */
te->addr_write = address | TLB_MMIO;
- } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
- !cpu_physical_memory_is_dirty(pd)) {
+ } else if (memory_region_is_ram(section.mr)
+ && !cpu_physical_memory_is_dirty(
+ section.mr->ram_addr
+ + section.offset_within_region)) {
te->addr_write = address | TLB_NOTDIRTY;
} else {
te->addr_write = address;
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
typedef struct subpage_t {
+ MemoryRegion iomem;
target_phys_addr_t base;
- ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
- ram_addr_t region_offset[TARGET_PAGE_SIZE];
+ uint16_t sub_section[TARGET_PAGE_SIZE];
} subpage_t;
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
- ram_addr_t memory, ram_addr_t region_offset);
-static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
- ram_addr_t orig_memory,
- ram_addr_t region_offset);
-#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
- need_subpage) \
- do { \
- if (addr > start_addr) \
- start_addr2 = 0; \
- else { \
- start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
- if (start_addr2 > 0) \
- need_subpage = 1; \
- } \
- \
- if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
- end_addr2 = TARGET_PAGE_SIZE - 1; \
- else { \
- end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
- if (end_addr2 < TARGET_PAGE_SIZE - 1) \
- need_subpage = 1; \
- } \
- } while (0)
+ uint16_t section);
+static subpage_t *subpage_init(target_phys_addr_t base);
+static void destroy_page_desc(uint16_t section_index)
+{
+ MemoryRegionSection *section = &phys_sections[section_index];
+ MemoryRegion *mr = section->mr;
+
+ if (mr->subpage) {
+ subpage_t *subpage = container_of(mr, subpage_t, iomem);
+ memory_region_destroy(&subpage->iomem);
+ g_free(subpage);
+ }
+}
+
+static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
+{
+ unsigned i;
+ PhysPageEntry *p;
+
+ if (lp->u.node == PHYS_MAP_NODE_NIL) {
+ return;
+ }
+
+ p = phys_map_nodes[lp->u.node];
+ for (i = 0; i < L2_SIZE; ++i) {
+ if (level > 0) {
+ destroy_l2_mapping(&p[i], level - 1);
+ } else {
+ destroy_page_desc(p[i].u.leaf);
+ }
+ }
+ lp->u.node = PHYS_MAP_NODE_NIL;
+}
+
+static void destroy_all_mappings(void)
+{
+ destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
+ phys_map_nodes_reset();
+}
+
+static uint16_t phys_section_add(MemoryRegionSection *section)
+{
+ if (phys_sections_nb == phys_sections_nb_alloc) {
+ phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
+ phys_sections = g_renew(MemoryRegionSection, phys_sections,
+ phys_sections_nb_alloc);
+ }
+ phys_sections[phys_sections_nb] = *section;
+ return phys_sections_nb++;
+}
+
+static void phys_sections_clear(void)
+{
+ phys_sections_nb = 0;
+}
/* register physical memory.
For RAM, 'size' must be a multiple of the target page size.
start_addr and region_offset are rounded down to a page boundary
before calculating this offset. This should not be a problem unless
the low bits of start_addr and region_offset differ. */
-void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
- ram_addr_t size,
- ram_addr_t phys_offset,
- ram_addr_t region_offset,
- bool log_dirty)
+static void register_subpage(MemoryRegionSection *section)
{
- target_phys_addr_t addr, end_addr;
- PhysPageDesc *p;
- CPUState *env;
- ram_addr_t orig_size = size;
subpage_t *subpage;
+ target_phys_addr_t base = section->offset_within_address_space
+ & TARGET_PAGE_MASK;
+ MemoryRegionSection existing = phys_page_find(base >> TARGET_PAGE_BITS);
+ MemoryRegionSection subsection = {
+ .offset_within_address_space = base,
+ .size = TARGET_PAGE_SIZE,
+ };
+ target_phys_addr_t start, end;
+
+ assert(existing.mr->subpage || existing.mr == &io_mem_unassigned);
+
+ if (!(existing.mr->subpage)) {
+ subpage = subpage_init(base);
+ subsection.mr = &subpage->iomem;
+ phys_page_set(base >> TARGET_PAGE_BITS, phys_section_add(&subsection));
+ } else {
+ subpage = container_of(existing.mr, subpage_t, iomem);
+ }
+ start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
+ end = start + section->size;
+ subpage_register(subpage, start, end, phys_section_add(section));
+}
+
+
+static void register_multipage(MemoryRegionSection *section)
+{
+ target_phys_addr_t start_addr = section->offset_within_address_space;
+ ram_addr_t size = section->size;
+ target_phys_addr_t addr, end_addr;
+ uint16_t section_index = phys_section_add(section);
assert(size);
- cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
- if (phys_offset == IO_MEM_UNASSIGNED) {
- region_offset = start_addr;
- }
- region_offset &= TARGET_PAGE_MASK;
- size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
end_addr = start_addr + (target_phys_addr_t)size;
addr = start_addr;
do {
- p = phys_page_find(addr >> TARGET_PAGE_BITS);
- if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
- ram_addr_t orig_memory = p->phys_offset;
- target_phys_addr_t start_addr2, end_addr2;
- int need_subpage = 0;
-
- CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
- need_subpage);
- if (need_subpage) {
- if (!(orig_memory & IO_MEM_SUBPAGE)) {
- subpage = subpage_init((addr & TARGET_PAGE_MASK),
- &p->phys_offset, orig_memory,
- p->region_offset);
- } else {
- subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
- >> IO_MEM_SHIFT];
- }
- subpage_register(subpage, start_addr2, end_addr2, phys_offset,
- region_offset);
- p->region_offset = 0;
- } else {
- p->phys_offset = phys_offset;
- if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
- (phys_offset & IO_MEM_ROMD))
- phys_offset += TARGET_PAGE_SIZE;
- }
- } else {
- p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
- p->phys_offset = phys_offset;
- p->region_offset = region_offset;
- if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
- (phys_offset & IO_MEM_ROMD)) {
- phys_offset += TARGET_PAGE_SIZE;
- } else {
- target_phys_addr_t start_addr2, end_addr2;
- int need_subpage = 0;
-
- CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
- end_addr2, need_subpage);
-
- if (need_subpage) {
- subpage = subpage_init((addr & TARGET_PAGE_MASK),
- &p->phys_offset, IO_MEM_UNASSIGNED,
- addr & TARGET_PAGE_MASK);
- subpage_register(subpage, start_addr2, end_addr2,
- phys_offset, region_offset);
- p->region_offset = 0;
- }
- }
- }
- region_offset += TARGET_PAGE_SIZE;
+ phys_page_set(addr >> TARGET_PAGE_BITS, section_index);
addr += TARGET_PAGE_SIZE;
} while (addr != end_addr);
-
- /* since each CPU stores ram addresses in its TLB cache, we must
- reset the modified entries */
- /* XXX: slow ! */
- for(env = first_cpu; env != NULL; env = env->next_cpu) {
- tlb_flush(env, 1);
- }
}
-/* XXX: temporary until new memory mapping API */
-ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
+void cpu_register_physical_memory_log(MemoryRegionSection *section,
+ bool readonly)
{
- PhysPageDesc *p;
+ MemoryRegionSection now = *section, remain = *section;
- p = phys_page_find(addr >> TARGET_PAGE_BITS);
- if (!p)
- return IO_MEM_UNASSIGNED;
- return p->phys_offset;
+ if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
+ || (now.size < TARGET_PAGE_SIZE)) {
+ now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
+ - now.offset_within_address_space,
+ now.size);
+ register_subpage(&now);
+ remain.size -= now.size;
+ remain.offset_within_address_space += now.size;
+ remain.offset_within_region += now.size;
+ }
+ now = remain;
+ now.size &= TARGET_PAGE_MASK;
+ if (now.size) {
+ register_multipage(&now);
+ remain.size -= now.size;
+ remain.offset_within_address_space += now.size;
+ remain.offset_within_region += now.size;
+ }
+ now = remain;
+ if (now.size) {
+ register_subpage(&now);
+ }
}
+
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
{
if (kvm_enabled())
static ram_addr_t find_ram_offset(ram_addr_t size)
{
RAMBlock *block, *next_block;
- ram_addr_t offset = 0, mingap = RAM_ADDR_MAX;
+ ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
if (QLIST_EMPTY(&ram_list.blocks))
return 0;
}
}
if (next - end >= size && next - end < mingap) {
- offset = end;
+ offset = end;
mingap = next - end;
}
}
+
+ if (offset == RAM_ADDR_MAX) {
+ fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
+ (uint64_t)size);
+ abort();
+ }
+
return offset;
}
return last;
}
-ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
- ram_addr_t size, void *host)
+void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
{
RAMBlock *new_block, *block;
- size = TARGET_PAGE_ALIGN(size);
- new_block = g_malloc0(sizeof(*new_block));
+ new_block = NULL;
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ if (block->offset == addr) {
+ new_block = block;
+ break;
+ }
+ }
+ assert(new_block);
+ assert(!new_block->idstr[0]);
if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
char *id = dev->parent_bus->info->get_dev_path(dev);
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
QLIST_FOREACH(block, &ram_list.blocks, next) {
- if (!strcmp(block->idstr, new_block->idstr)) {
+ if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
new_block->idstr);
abort();
}
}
+}
+
+ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
+ MemoryRegion *mr)
+{
+ RAMBlock *new_block;
+ size = TARGET_PAGE_ALIGN(size);
+ new_block = g_malloc0(sizeof(*new_block));
+
+ new_block->mr = mr;
new_block->offset = find_ram_offset(size);
if (host) {
new_block->host = host;
}
#else
if (xen_enabled()) {
- xen_ram_alloc(new_block->offset, size);
+ xen_ram_alloc(new_block->offset, size, mr);
} else {
new_block->host = qemu_vmalloc(size);
}
return new_block->offset;
}
-ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
+ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
{
- return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
+ return qemu_ram_alloc_from_ptr(size, NULL, mr);
}
void qemu_ram_free_from_ptr(ram_addr_t addr)
return ram_addr;
}
-static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
-{
-#ifdef DEBUG_UNASSIGNED
- printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
-#endif
-#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
-#endif
- return 0;
-}
-
-static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
+static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
+ unsigned size)
{
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
+ cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
#endif
return 0;
}
-static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
+static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
+ uint64_t val, unsigned size)
{
#ifdef DEBUG_UNASSIGNED
- printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
+ printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
#endif
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
+ cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
#endif
- return 0;
}
-static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
-{
-#ifdef DEBUG_UNASSIGNED
- printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
-#endif
-#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
-#endif
-}
+static const MemoryRegionOps unassigned_mem_ops = {
+ .read = unassigned_mem_read,
+ .write = unassigned_mem_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
-static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
+static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
+ unsigned size)
{
-#ifdef DEBUG_UNASSIGNED
- printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
-#endif
-#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
-#endif
+ abort();
}
-static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
+static void error_mem_write(void *opaque, target_phys_addr_t addr,
+ uint64_t value, unsigned size)
{
-#ifdef DEBUG_UNASSIGNED
- printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
-#endif
-#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
- cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
-#endif
+ abort();
}
-static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
- unassigned_mem_readb,
- unassigned_mem_readw,
- unassigned_mem_readl,
+static const MemoryRegionOps error_mem_ops = {
+ .read = error_mem_read,
+ .write = error_mem_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
};
-static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
- unassigned_mem_writeb,
- unassigned_mem_writew,
- unassigned_mem_writel,
+static const MemoryRegionOps rom_mem_ops = {
+ .read = error_mem_read,
+ .write = unassigned_mem_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
};
-static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
- uint32_t val)
+static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
+ uint64_t val, unsigned size)
{
int dirty_flags;
dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
- tb_invalidate_phys_page_fast(ram_addr, 1);
+ tb_invalidate_phys_page_fast(ram_addr, size);
dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
#endif
}
- stb_p(qemu_get_ram_ptr(ram_addr), val);
- dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
- cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
- /* we remove the notdirty callback only if the code has been
- flushed */
- if (dirty_flags == 0xff)
- tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
-}
-
-static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
- uint32_t val)
-{
- int dirty_flags;
- dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
- if (!(dirty_flags & CODE_DIRTY_FLAG)) {
-#if !defined(CONFIG_USER_ONLY)
- tb_invalidate_phys_page_fast(ram_addr, 2);
- dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
-#endif
- }
- stw_p(qemu_get_ram_ptr(ram_addr), val);
- dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
- cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
- /* we remove the notdirty callback only if the code has been
- flushed */
- if (dirty_flags == 0xff)
- tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
-}
-
-static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
- uint32_t val)
-{
- int dirty_flags;
- dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
- if (!(dirty_flags & CODE_DIRTY_FLAG)) {
-#if !defined(CONFIG_USER_ONLY)
- tb_invalidate_phys_page_fast(ram_addr, 4);
- dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
-#endif
+ switch (size) {
+ case 1:
+ stb_p(qemu_get_ram_ptr(ram_addr), val);
+ break;
+ case 2:
+ stw_p(qemu_get_ram_ptr(ram_addr), val);
+ break;
+ case 4:
+ stl_p(qemu_get_ram_ptr(ram_addr), val);
+ break;
+ default:
+ abort();
}
- stl_p(qemu_get_ram_ptr(ram_addr), val);
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
/* we remove the notdirty callback only if the code has been
tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
}
-static CPUReadMemoryFunc * const error_mem_read[3] = {
- NULL, /* never used */
- NULL, /* never used */
- NULL, /* never used */
-};
-
-static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
- notdirty_mem_writeb,
- notdirty_mem_writew,
- notdirty_mem_writel,
+static const MemoryRegionOps notdirty_mem_ops = {
+ .read = error_mem_read,
+ .write = notdirty_mem_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
};
/* Generate a debug exception if a watchpoint has been hit. */
/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
so these check for a hit then pass through to the normal out-of-line
phys routines. */
-static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
-{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
- return ldub_phys(addr);
-}
-
-static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
-{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
- return lduw_phys(addr);
-}
-
-static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
+static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
+ unsigned size)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
- return ldl_phys(addr);
-}
-
-static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
- uint32_t val)
-{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
- stb_phys(addr, val);
-}
-
-static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
- uint32_t val)
-{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
- stw_phys(addr, val);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
+ switch (size) {
+ case 1: return ldub_phys(addr);
+ case 2: return lduw_phys(addr);
+ case 4: return ldl_phys(addr);
+ default: abort();
+ }
}
-static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
- uint32_t val)
+static void watch_mem_write(void *opaque, target_phys_addr_t addr,
+ uint64_t val, unsigned size)
{
- check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
- stl_phys(addr, val);
+ check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
+ switch (size) {
+ case 1: stb_phys(addr, val);
+ case 2: stw_phys(addr, val);
+ case 4: stl_phys(addr, val);
+ default: abort();
+ }
}
-static CPUReadMemoryFunc * const watch_mem_read[3] = {
- watch_mem_readb,
- watch_mem_readw,
- watch_mem_readl,
+static const MemoryRegionOps watch_mem_ops = {
+ .read = watch_mem_read,
+ .write = watch_mem_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
};
-static CPUWriteMemoryFunc * const watch_mem_write[3] = {
- watch_mem_writeb,
- watch_mem_writew,
- watch_mem_writel,
-};
-
-static inline uint32_t subpage_readlen (subpage_t *mmio,
- target_phys_addr_t addr,
- unsigned int len)
+static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
+ unsigned len)
{
+ subpage_t *mmio = opaque;
unsigned int idx = SUBPAGE_IDX(addr);
+ MemoryRegionSection *section;
#if defined(DEBUG_SUBPAGE)
printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
mmio, len, addr, idx);
#endif
- addr += mmio->region_offset[idx];
- idx = mmio->sub_io_index[idx];
- return io_mem_read[idx][len](io_mem_opaque[idx], addr);
+ section = &phys_sections[mmio->sub_section[idx]];
+ addr += mmio->base;
+ addr -= section->offset_within_address_space;
+ addr += section->offset_within_region;
+ return io_mem_read(section->mr->ram_addr, addr, len);
}
-static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
- uint32_t value, unsigned int len)
+static void subpage_write(void *opaque, target_phys_addr_t addr,
+ uint64_t value, unsigned len)
{
+ subpage_t *mmio = opaque;
unsigned int idx = SUBPAGE_IDX(addr);
+ MemoryRegionSection *section;
#if defined(DEBUG_SUBPAGE)
- printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
+ printf("%s: subpage %p len %d addr " TARGET_FMT_plx
+ " idx %d value %"PRIx64"\n",
__func__, mmio, len, addr, idx, value);
#endif
- addr += mmio->region_offset[idx];
- idx = mmio->sub_io_index[idx];
- io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
-}
-
-static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
-{
- return subpage_readlen(opaque, addr, 0);
-}
-
-static void subpage_writeb (void *opaque, target_phys_addr_t addr,
- uint32_t value)
-{
- subpage_writelen(opaque, addr, value, 0);
-}
-
-static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
-{
- return subpage_readlen(opaque, addr, 1);
+ section = &phys_sections[mmio->sub_section[idx]];
+ addr += mmio->base;
+ addr -= section->offset_within_address_space;
+ addr += section->offset_within_region;
+ io_mem_write(section->mr->ram_addr, addr, value, len);
}
-static void subpage_writew (void *opaque, target_phys_addr_t addr,
- uint32_t value)
-{
- subpage_writelen(opaque, addr, value, 1);
-}
+static const MemoryRegionOps subpage_ops = {
+ .read = subpage_read,
+ .write = subpage_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
-static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
+static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
+ unsigned size)
{
- return subpage_readlen(opaque, addr, 2);
+ ram_addr_t raddr = addr;
+ void *ptr = qemu_get_ram_ptr(raddr);
+ switch (size) {
+ case 1: return ldub_p(ptr);
+ case 2: return lduw_p(ptr);
+ case 4: return ldl_p(ptr);
+ default: abort();
+ }
}
-static void subpage_writel (void *opaque, target_phys_addr_t addr,
- uint32_t value)
+static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
+ uint64_t value, unsigned size)
{
- subpage_writelen(opaque, addr, value, 2);
+ ram_addr_t raddr = addr;
+ void *ptr = qemu_get_ram_ptr(raddr);
+ switch (size) {
+ case 1: return stb_p(ptr, value);
+ case 2: return stw_p(ptr, value);
+ case 4: return stl_p(ptr, value);
+ default: abort();
+ }
}
-static CPUReadMemoryFunc * const subpage_read[] = {
- &subpage_readb,
- &subpage_readw,
- &subpage_readl,
-};
-
-static CPUWriteMemoryFunc * const subpage_write[] = {
- &subpage_writeb,
- &subpage_writew,
- &subpage_writel,
+static const MemoryRegionOps subpage_ram_ops = {
+ .read = subpage_ram_read,
+ .write = subpage_ram_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
};
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
- ram_addr_t memory, ram_addr_t region_offset)
+ uint16_t section)
{
int idx, eidx;
printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
mmio, start, end, idx, eidx, memory);
#endif
- if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
- memory = IO_MEM_UNASSIGNED;
- memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ if (memory_region_is_ram(phys_sections[section].mr)) {
+ MemoryRegionSection new_section = phys_sections[section];
+ new_section.mr = &io_mem_subpage_ram;
+ section = phys_section_add(&new_section);
+ }
for (; idx <= eidx; idx++) {
- mmio->sub_io_index[idx] = memory;
- mmio->region_offset[idx] = region_offset;
+ mmio->sub_section[idx] = section;
}
return 0;
}
-static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
- ram_addr_t orig_memory,
- ram_addr_t region_offset)
+static subpage_t *subpage_init(target_phys_addr_t base)
{
subpage_t *mmio;
- int subpage_memory;
mmio = g_malloc0(sizeof(subpage_t));
mmio->base = base;
- subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
- DEVICE_NATIVE_ENDIAN);
+ memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
+ "subpage", TARGET_PAGE_SIZE);
+ mmio->iomem.subpage = true;
#if defined(DEBUG_SUBPAGE)
printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
mmio, base, TARGET_PAGE_SIZE, subpage_memory);
#endif
- *phys = subpage_memory | IO_MEM_SUBPAGE;
- subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
+ subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
return mmio;
}
return -1;
}
-/*
- * Usually, devices operate in little endian mode. There are devices out
- * there that operate in big endian too. Each device gets byte swapped
- * mmio if plugged onto a CPU that does the other endianness.
- *
- * CPU Device swap?
- *
- * little little no
- * little big yes
- * big little yes
- * big big no
- */
+/* mem_read and mem_write are arrays of functions containing the
+ function to access byte (index 0), word (index 1) and dword (index
+ 2). Functions can be omitted with a NULL function pointer.
+ If io_index is non zero, the corresponding io zone is
+ modified. If it is zero, a new io zone is allocated. The return
+ value can be used with cpu_register_physical_memory(). (-1) is
+ returned if error. */
+static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
+{
+ if (io_index <= 0) {
+ io_index = get_free_io_mem_idx();
+ if (io_index == -1)
+ return io_index;
+ } else {
+ if (io_index >= IO_MEM_NB_ENTRIES)
+ return -1;
+ }
-typedef struct SwapEndianContainer {
- CPUReadMemoryFunc *read[3];
- CPUWriteMemoryFunc *write[3];
- void *opaque;
-} SwapEndianContainer;
+ io_mem_region[io_index] = mr;
+
+ return io_index;
+}
-static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
+int cpu_register_io_memory(MemoryRegion *mr)
{
- uint32_t val;
- SwapEndianContainer *c = opaque;
- val = c->read[0](c->opaque, addr);
- return val;
+ return cpu_register_io_memory_fixed(0, mr);
}
-static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
+void cpu_unregister_io_memory(int io_index)
{
- uint32_t val;
- SwapEndianContainer *c = opaque;
- val = bswap16(c->read[1](c->opaque, addr));
- return val;
+ io_mem_region[io_index] = NULL;
+ io_mem_used[io_index] = 0;
}
-static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
+static uint16_t dummy_section(MemoryRegion *mr)
{
- uint32_t val;
- SwapEndianContainer *c = opaque;
- val = bswap32(c->read[2](c->opaque, addr));
- return val;
+ MemoryRegionSection section = {
+ .mr = mr,
+ .offset_within_address_space = 0,
+ .offset_within_region = 0,
+ .size = UINT64_MAX,
+ };
+
+ return phys_section_add(§ion);
}
-static CPUReadMemoryFunc * const swapendian_readfn[3]={
- swapendian_mem_readb,
- swapendian_mem_readw,
- swapendian_mem_readl
-};
+static void io_mem_init(void)
+{
+ int i;
-static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
- uint32_t val)
+ /* Must be first: */
+ memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
+ assert(io_mem_ram.ram_addr == 0);
+ memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
+ memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
+ "unassigned", UINT64_MAX);
+ memory_region_init_io(&io_mem_notdirty, ¬dirty_mem_ops, NULL,
+ "notdirty", UINT64_MAX);
+ memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
+ "subpage-ram", UINT64_MAX);
+ for (i=0; i<5; i++)
+ io_mem_used[i] = 1;
+
+ memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
+ "watch", UINT64_MAX);
+}
+
+static void core_begin(MemoryListener *listener)
{
- SwapEndianContainer *c = opaque;
- c->write[0](c->opaque, addr, val);
+ destroy_all_mappings();
+ phys_sections_clear();
+ phys_map.u.node = PHYS_MAP_NODE_NIL;
+ phys_section_unassigned = dummy_section(&io_mem_unassigned);
}
-static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
- uint32_t val)
+static void core_commit(MemoryListener *listener)
{
- SwapEndianContainer *c = opaque;
- c->write[1](c->opaque, addr, bswap16(val));
+ CPUState *env;
+
+ /* since each CPU stores ram addresses in its TLB cache, we must
+ reset the modified entries */
+ /* XXX: slow ! */
+ for(env = first_cpu; env != NULL; env = env->next_cpu) {
+ tlb_flush(env, 1);
+ }
}
-static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
- uint32_t val)
+static void core_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
{
- SwapEndianContainer *c = opaque;
- c->write[2](c->opaque, addr, bswap32(val));
+ cpu_register_physical_memory_log(section, section->readonly);
}
-static CPUWriteMemoryFunc * const swapendian_writefn[3]={
- swapendian_mem_writeb,
- swapendian_mem_writew,
- swapendian_mem_writel
-};
+static void core_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+}
-static void swapendian_init(int io_index)
+static void core_region_nop(MemoryListener *listener,
+ MemoryRegionSection *section)
{
- SwapEndianContainer *c = g_malloc(sizeof(SwapEndianContainer));
- int i;
+ cpu_register_physical_memory_log(section, section->readonly);
+}
+
+static void core_log_start(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+}
- /* Swap mmio for big endian targets */
- c->opaque = io_mem_opaque[io_index];
- for (i = 0; i < 3; i++) {
- c->read[i] = io_mem_read[io_index][i];
- c->write[i] = io_mem_write[io_index][i];
+static void core_log_stop(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+}
- io_mem_read[io_index][i] = swapendian_readfn[i];
- io_mem_write[io_index][i] = swapendian_writefn[i];
- }
- io_mem_opaque[io_index] = c;
+static void core_log_sync(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
}
-static void swapendian_del(int io_index)
+static void core_log_global_start(MemoryListener *listener)
{
- if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
- g_free(io_mem_opaque[io_index]);
- }
+ cpu_physical_memory_set_dirty_tracking(1);
}
-/* mem_read and mem_write are arrays of functions containing the
- function to access byte (index 0), word (index 1) and dword (index
- 2). Functions can be omitted with a NULL function pointer.
- If io_index is non zero, the corresponding io zone is
- modified. If it is zero, a new io zone is allocated. The return
- value can be used with cpu_register_physical_memory(). (-1) is
- returned if error. */
-static int cpu_register_io_memory_fixed(int io_index,
- CPUReadMemoryFunc * const *mem_read,
- CPUWriteMemoryFunc * const *mem_write,
- void *opaque, enum device_endian endian)
+static void core_log_global_stop(MemoryListener *listener)
{
- int i;
+ cpu_physical_memory_set_dirty_tracking(0);
+}
- if (io_index <= 0) {
- io_index = get_free_io_mem_idx();
- if (io_index == -1)
- return io_index;
- } else {
- io_index >>= IO_MEM_SHIFT;
- if (io_index >= IO_MEM_NB_ENTRIES)
- return -1;
- }
+static void core_eventfd_add(MemoryListener *listener,
+ MemoryRegionSection *section,
+ bool match_data, uint64_t data, int fd)
+{
+}
- for (i = 0; i < 3; ++i) {
- io_mem_read[io_index][i]
- = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
- }
- for (i = 0; i < 3; ++i) {
- io_mem_write[io_index][i]
- = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
- }
- io_mem_opaque[io_index] = opaque;
+static void core_eventfd_del(MemoryListener *listener,
+ MemoryRegionSection *section,
+ bool match_data, uint64_t data, int fd)
+{
+}
- switch (endian) {
- case DEVICE_BIG_ENDIAN:
-#ifndef TARGET_WORDS_BIGENDIAN
- swapendian_init(io_index);
-#endif
- break;
- case DEVICE_LITTLE_ENDIAN:
-#ifdef TARGET_WORDS_BIGENDIAN
- swapendian_init(io_index);
-#endif
- break;
- case DEVICE_NATIVE_ENDIAN:
- default:
- break;
- }
+static void io_begin(MemoryListener *listener)
+{
+}
- return (io_index << IO_MEM_SHIFT);
+static void io_commit(MemoryListener *listener)
+{
}
-int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
- CPUWriteMemoryFunc * const *mem_write,
- void *opaque, enum device_endian endian)
+static void io_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
{
- return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
+ iorange_init(§ion->mr->iorange, &memory_region_iorange_ops,
+ section->offset_within_address_space, section->size);
+ ioport_register(§ion->mr->iorange);
}
-void cpu_unregister_io_memory(int io_table_address)
+static void io_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
{
- int i;
- int io_index = io_table_address >> IO_MEM_SHIFT;
+ isa_unassign_ioport(section->offset_within_address_space, section->size);
+}
- swapendian_del(io_index);
+static void io_region_nop(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+}
- for (i=0;i < 3; i++) {
- io_mem_read[io_index][i] = unassigned_mem_read[i];
- io_mem_write[io_index][i] = unassigned_mem_write[i];
- }
- io_mem_opaque[io_index] = NULL;
- io_mem_used[io_index] = 0;
+static void io_log_start(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
}
-static void io_mem_init(void)
+static void io_log_stop(MemoryListener *listener,
+ MemoryRegionSection *section)
{
- int i;
+}
- cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
- unassigned_mem_write, NULL,
- DEVICE_NATIVE_ENDIAN);
- cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
- unassigned_mem_write, NULL,
- DEVICE_NATIVE_ENDIAN);
- cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
- notdirty_mem_write, NULL,
- DEVICE_NATIVE_ENDIAN);
- for (i=0; i<5; i++)
- io_mem_used[i] = 1;
+static void io_log_sync(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+}
- io_mem_watch = cpu_register_io_memory(watch_mem_read,
- watch_mem_write, NULL,
- DEVICE_NATIVE_ENDIAN);
+static void io_log_global_start(MemoryListener *listener)
+{
}
+static void io_log_global_stop(MemoryListener *listener)
+{
+}
+
+static void io_eventfd_add(MemoryListener *listener,
+ MemoryRegionSection *section,
+ bool match_data, uint64_t data, int fd)
+{
+}
+
+static void io_eventfd_del(MemoryListener *listener,
+ MemoryRegionSection *section,
+ bool match_data, uint64_t data, int fd)
+{
+}
+
+static MemoryListener core_memory_listener = {
+ .begin = core_begin,
+ .commit = core_commit,
+ .region_add = core_region_add,
+ .region_del = core_region_del,
+ .region_nop = core_region_nop,
+ .log_start = core_log_start,
+ .log_stop = core_log_stop,
+ .log_sync = core_log_sync,
+ .log_global_start = core_log_global_start,
+ .log_global_stop = core_log_global_stop,
+ .eventfd_add = core_eventfd_add,
+ .eventfd_del = core_eventfd_del,
+ .priority = 0,
+};
+
+static MemoryListener io_memory_listener = {
+ .begin = io_begin,
+ .commit = io_commit,
+ .region_add = io_region_add,
+ .region_del = io_region_del,
+ .region_nop = io_region_nop,
+ .log_start = io_log_start,
+ .log_stop = io_log_stop,
+ .log_sync = io_log_sync,
+ .log_global_start = io_log_global_start,
+ .log_global_stop = io_log_global_stop,
+ .eventfd_add = io_eventfd_add,
+ .eventfd_del = io_eventfd_del,
+ .priority = 0,
+};
+
static void memory_map_init(void)
{
system_memory = g_malloc(sizeof(*system_memory));
system_io = g_malloc(sizeof(*system_io));
memory_region_init(system_io, "io", 65536);
set_system_io_map(system_io);
+
+ memory_listener_register(&core_memory_listener, system_memory);
+ memory_listener_register(&io_memory_listener, system_io);
}
MemoryRegion *get_system_memory(void)
uint8_t *ptr;
uint32_t val;
target_phys_addr_t page;
- ram_addr_t pd;
- PhysPageDesc *p;
+ MemoryRegionSection section;
while (len > 0) {
page = addr & TARGET_PAGE_MASK;
l = (page + TARGET_PAGE_SIZE) - addr;
if (l > len)
l = len;
- p = phys_page_find(page >> TARGET_PAGE_BITS);
- if (!p) {
- pd = IO_MEM_UNASSIGNED;
- } else {
- pd = p->phys_offset;
- }
+ section = phys_page_find(page >> TARGET_PAGE_BITS);
if (is_write) {
- if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
- target_phys_addr_t addr1 = addr;
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
- if (p)
- addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+ if (!memory_region_is_ram(section.mr)) {
+ target_phys_addr_t addr1;
+ io_index = memory_region_get_ram_addr(section.mr)
+ & (IO_MEM_NB_ENTRIES - 1);
+ addr1 = (addr & ~TARGET_PAGE_MASK)
+ + section.offset_within_region;
/* XXX: could force cpu_single_env to NULL to avoid
potential bugs */
if (l >= 4 && ((addr1 & 3) == 0)) {
/* 32 bit write access */
val = ldl_p(buf);
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
+ io_mem_write(io_index, addr1, val, 4);
l = 4;
} else if (l >= 2 && ((addr1 & 1) == 0)) {
/* 16 bit write access */
val = lduw_p(buf);
- io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
+ io_mem_write(io_index, addr1, val, 2);
l = 2;
} else {
/* 8 bit write access */
val = ldub_p(buf);
- io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
+ io_mem_write(io_index, addr1, val, 1);
l = 1;
}
- } else {
+ } else if (!section.readonly) {
ram_addr_t addr1;
- addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ addr1 = (memory_region_get_ram_addr(section.mr)
+ + section.offset_within_region)
+ | (addr & ~TARGET_PAGE_MASK);
/* RAM case */
ptr = qemu_get_ram_ptr(addr1);
memcpy(ptr, buf, l);
qemu_put_ram_ptr(ptr);
}
} else {
- if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
- !(pd & IO_MEM_ROMD)) {
- target_phys_addr_t addr1 = addr;
+ if (!is_ram_rom_romd(§ion)) {
+ target_phys_addr_t addr1;
/* I/O case */
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
- if (p)
- addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+ io_index = memory_region_get_ram_addr(section.mr)
+ & (IO_MEM_NB_ENTRIES - 1);
+ addr1 = (addr & ~TARGET_PAGE_MASK)
+ + section.offset_within_region;
if (l >= 4 && ((addr1 & 3) == 0)) {
/* 32 bit read access */
- val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
+ val = io_mem_read(io_index, addr1, 4);
stl_p(buf, val);
l = 4;
} else if (l >= 2 && ((addr1 & 1) == 0)) {
/* 16 bit read access */
- val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
+ val = io_mem_read(io_index, addr1, 2);
stw_p(buf, val);
l = 2;
} else {
/* 8 bit read access */
- val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
+ val = io_mem_read(io_index, addr1, 1);
stb_p(buf, val);
l = 1;
}
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
+ ptr = qemu_get_ram_ptr(section.mr->ram_addr
+ + section.offset_within_region);
memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
qemu_put_ram_ptr(ptr);
}
int l;
uint8_t *ptr;
target_phys_addr_t page;
- unsigned long pd;
- PhysPageDesc *p;
+ MemoryRegionSection section;
while (len > 0) {
page = addr & TARGET_PAGE_MASK;
l = (page + TARGET_PAGE_SIZE) - addr;
if (l > len)
l = len;
- p = phys_page_find(page >> TARGET_PAGE_BITS);
- if (!p) {
- pd = IO_MEM_UNASSIGNED;
- } else {
- pd = p->phys_offset;
- }
+ section = phys_page_find(page >> TARGET_PAGE_BITS);
- if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
- (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
- !(pd & IO_MEM_ROMD)) {
+ if (!is_ram_rom_romd(§ion)) {
/* do nothing */
} else {
unsigned long addr1;
- addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ addr1 = (memory_region_get_ram_addr(section.mr)
+ + section.offset_within_region)
+ + (addr & ~TARGET_PAGE_MASK);
/* ROM/RAM case */
ptr = qemu_get_ram_ptr(addr1);
memcpy(ptr, buf, l);
target_phys_addr_t todo = 0;
int l;
target_phys_addr_t page;
- unsigned long pd;
- PhysPageDesc *p;
+ MemoryRegionSection section;
ram_addr_t raddr = RAM_ADDR_MAX;
ram_addr_t rlen;
void *ret;
l = (page + TARGET_PAGE_SIZE) - addr;
if (l > len)
l = len;
- p = phys_page_find(page >> TARGET_PAGE_BITS);
- if (!p) {
- pd = IO_MEM_UNASSIGNED;
- } else {
- pd = p->phys_offset;
- }
+ section = phys_page_find(page >> TARGET_PAGE_BITS);
- if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
+ if (!(memory_region_is_ram(section.mr) && !section.readonly)) {
if (todo || bounce.buffer) {
break;
}
return bounce.buffer;
}
if (!todo) {
- raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ raddr = memory_region_get_ram_addr(section.mr)
+ + section.offset_within_region
+ + (addr & ~TARGET_PAGE_MASK);
}
len -= l;
int io_index;
uint8_t *ptr;
uint32_t val;
- unsigned long pd;
- PhysPageDesc *p;
+ MemoryRegionSection section;
- p = phys_page_find(addr >> TARGET_PAGE_BITS);
- if (!p) {
- pd = IO_MEM_UNASSIGNED;
- } else {
- pd = p->phys_offset;
- }
+ section = phys_page_find(addr >> TARGET_PAGE_BITS);
- if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
- !(pd & IO_MEM_ROMD)) {
+ if (!is_ram_rom_romd(§ion)) {
/* I/O case */
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
- if (p)
- addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
- val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
+ io_index = memory_region_get_ram_addr(section.mr)
+ & (IO_MEM_NB_ENTRIES - 1);
+ addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
+ val = io_mem_read(io_index, addr, 4);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap32(val);
#endif
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
+ ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
+ & TARGET_PAGE_MASK)
+ + section.offset_within_region) +
(addr & ~TARGET_PAGE_MASK);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
int io_index;
uint8_t *ptr;
uint64_t val;
- unsigned long pd;
- PhysPageDesc *p;
+ MemoryRegionSection section;
- p = phys_page_find(addr >> TARGET_PAGE_BITS);
- if (!p) {
- pd = IO_MEM_UNASSIGNED;
- } else {
- pd = p->phys_offset;
- }
+ section = phys_page_find(addr >> TARGET_PAGE_BITS);
- if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
- !(pd & IO_MEM_ROMD)) {
+ if (!is_ram_rom_romd(§ion)) {
/* I/O case */
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
- if (p)
- addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+ io_index = memory_region_get_ram_addr(section.mr)
+ & (IO_MEM_NB_ENTRIES - 1);
+ addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
/* XXX This is broken when device endian != cpu endian.
Fix and add "endian" variable check */
#ifdef TARGET_WORDS_BIGENDIAN
- val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
- val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
+ val = io_mem_read(io_index, addr, 4) << 32;
+ val |= io_mem_read(io_index, addr + 4, 4);
#else
- val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
- val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
+ val = io_mem_read(io_index, addr, 4);
+ val |= io_mem_read(io_index, addr + 4, 4) << 32;
#endif
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
- (addr & ~TARGET_PAGE_MASK);
+ ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
+ & TARGET_PAGE_MASK)
+ + section.offset_within_region)
+ + (addr & ~TARGET_PAGE_MASK);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
val = ldq_le_p(ptr);
int io_index;
uint8_t *ptr;
uint64_t val;
- unsigned long pd;
- PhysPageDesc *p;
+ MemoryRegionSection section;
- p = phys_page_find(addr >> TARGET_PAGE_BITS);
- if (!p) {
- pd = IO_MEM_UNASSIGNED;
- } else {
- pd = p->phys_offset;
- }
+ section = phys_page_find(addr >> TARGET_PAGE_BITS);
- if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
- !(pd & IO_MEM_ROMD)) {
+ if (!is_ram_rom_romd(§ion)) {
/* I/O case */
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
- if (p)
- addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
- val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
+ io_index = memory_region_get_ram_addr(section.mr)
+ & (IO_MEM_NB_ENTRIES - 1);
+ addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
+ val = io_mem_read(io_index, addr, 2);
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap16(val);
#endif
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
- (addr & ~TARGET_PAGE_MASK);
+ ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
+ & TARGET_PAGE_MASK)
+ + section.offset_within_region)
+ + (addr & ~TARGET_PAGE_MASK);
switch (endian) {
case DEVICE_LITTLE_ENDIAN:
val = lduw_le_p(ptr);
{
int io_index;
uint8_t *ptr;
- unsigned long pd;
- PhysPageDesc *p;
+ MemoryRegionSection section;
- p = phys_page_find(addr >> TARGET_PAGE_BITS);
- if (!p) {
- pd = IO_MEM_UNASSIGNED;
- } else {
- pd = p->phys_offset;
- }
+ section = phys_page_find(addr >> TARGET_PAGE_BITS);
- if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
- if (p)
- addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
+ if (!memory_region_is_ram(section.mr) || section.readonly) {
+ if (memory_region_is_ram(section.mr)) {
+ io_index = io_mem_rom.ram_addr;
+ } else {
+ io_index = memory_region_get_ram_addr(section.mr);
+ }
+ addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
+ io_mem_write(io_index, addr, val, 4);
} else {
- unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ unsigned long addr1 = (memory_region_get_ram_addr(section.mr)
+ & TARGET_PAGE_MASK)
+ + section.offset_within_region
+ + (addr & ~TARGET_PAGE_MASK);
ptr = qemu_get_ram_ptr(addr1);
stl_p(ptr, val);
{
int io_index;
uint8_t *ptr;
- unsigned long pd;
- PhysPageDesc *p;
+ MemoryRegionSection section;
- p = phys_page_find(addr >> TARGET_PAGE_BITS);
- if (!p) {
- pd = IO_MEM_UNASSIGNED;
- } else {
- pd = p->phys_offset;
- }
+ section = phys_page_find(addr >> TARGET_PAGE_BITS);
- if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
- if (p)
- addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+ if (!memory_region_is_ram(section.mr) || section.readonly) {
+ if (memory_region_is_ram(section.mr)) {
+ io_index = io_mem_rom.ram_addr;
+ } else {
+ io_index = memory_region_get_ram_addr(section.mr)
+ & (IO_MEM_NB_ENTRIES - 1);
+ }
+ addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
#ifdef TARGET_WORDS_BIGENDIAN
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
+ io_mem_write(io_index, addr, val >> 32, 4);
+ io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
#else
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
+ io_mem_write(io_index, addr, (uint32_t)val, 4);
+ io_mem_write(io_index, addr + 4, val >> 32, 4);
#endif
} else {
- ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
- (addr & ~TARGET_PAGE_MASK);
+ ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section.mr)
+ & TARGET_PAGE_MASK)
+ + section.offset_within_region)
+ + (addr & ~TARGET_PAGE_MASK);
stq_p(ptr, val);
}
}
{
int io_index;
uint8_t *ptr;
- unsigned long pd;
- PhysPageDesc *p;
+ MemoryRegionSection section;
- p = phys_page_find(addr >> TARGET_PAGE_BITS);
- if (!p) {
- pd = IO_MEM_UNASSIGNED;
- } else {
- pd = p->phys_offset;
- }
+ section = phys_page_find(addr >> TARGET_PAGE_BITS);
- if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
- if (p)
- addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+ if (!memory_region_is_ram(section.mr) || section.readonly) {
+ if (memory_region_is_ram(section.mr)) {
+ io_index = io_mem_rom.ram_addr;
+ } else {
+ io_index = memory_region_get_ram_addr(section.mr)
+ & (IO_MEM_NB_ENTRIES - 1);
+ }
+ addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap32(val);
val = bswap32(val);
}
#endif
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
+ io_mem_write(io_index, addr, val, 4);
} else {
unsigned long addr1;
- addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
+ + section.offset_within_region
+ + (addr & ~TARGET_PAGE_MASK);
/* RAM case */
ptr = qemu_get_ram_ptr(addr1);
switch (endian) {
{
int io_index;
uint8_t *ptr;
- unsigned long pd;
- PhysPageDesc *p;
+ MemoryRegionSection section;
- p = phys_page_find(addr >> TARGET_PAGE_BITS);
- if (!p) {
- pd = IO_MEM_UNASSIGNED;
- } else {
- pd = p->phys_offset;
- }
+ section = phys_page_find(addr >> TARGET_PAGE_BITS);
- if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
- if (p)
- addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+ if (!memory_region_is_ram(section.mr) || section.readonly) {
+ if (memory_region_is_ram(section.mr)) {
+ io_index = io_mem_rom.ram_addr;
+ } else {
+ io_index = memory_region_get_ram_addr(section.mr)
+ & (IO_MEM_NB_ENTRIES - 1);
+ }
+ addr = (addr & ~TARGET_PAGE_MASK) + section.offset_within_region;
#if defined(TARGET_WORDS_BIGENDIAN)
if (endian == DEVICE_LITTLE_ENDIAN) {
val = bswap16(val);
val = bswap16(val);
}
#endif
- io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
+ io_mem_write(io_index, addr, val, 2);
} else {
unsigned long addr1;
- addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ addr1 = (memory_region_get_ram_addr(section.mr) & TARGET_PAGE_MASK)
+ + section.offset_within_region + (addr & ~TARGET_PAGE_MASK);
/* RAM case */
ptr = qemu_get_ram_ptr(addr1);
switch (endian) {
tcg_dump_info(f, cpu_fprintf);
}
+/* NOTE: this function can trigger an exception */
+/* NOTE2: the returned address is not exactly the physical address: it
+ is the offset relative to phys_ram_base */
+tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
+{
+ int mmu_idx, page_index, pd;
+ void *p;
+
+ page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ mmu_idx = cpu_mmu_index(env1);
+ if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
+ (addr & TARGET_PAGE_MASK))) {
+ ldub_code(addr);
+ }
+ pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
+ if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
+ && !io_mem_region[pd]->rom_device) {
+#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
+ cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
+#else
+ cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
+#endif
+ }
+ p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
+ return qemu_ram_addr_from_host_nofail(p);
+}
+
+/*
+ * A helper function for the _utterly broken_ virtio device model to find out if
+ * it's running on a big endian machine. Don't do this at home kids!
+ */
+bool virtio_is_big_endian(void);
+bool virtio_is_big_endian(void)
+{
+#if defined(TARGET_WORDS_BIGENDIAN)
+ return true;
+#else
+ return false;
+#endif
+}
+
#define MMUSUFFIX _cmmu
+#undef GETPC
#define GETPC() NULL
#define env cpu_single_env
#define SOFTMMU_CODE_ACCESS