qemu_host_page_bits++;
qemu_host_page_mask = ~(qemu_host_page_size - 1);
-#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
+#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
{
#ifdef HAVE_KINFO_GETVMMAP
struct kinfo_vmentry *freep;
last_brk = (unsigned long)sbrk(0);
-#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
f = fopen("/compat/linux/proc/self/maps", "r");
-#else
- f = fopen("/proc/self/maps", "r");
-#endif
if (f) {
mmap_lock();
int i;
#if defined(CONFIG_USER_ONLY)
- /* We can't use qemu_malloc because it may recurse into a locked mutex.
- Neither can we record the new pages we reserve while allocating a
- given page because that may recurse into an unallocated page table
- entry. Stuff the allocations we do make into a queue and process
- them after having completed one entire page table allocation. */
-
- unsigned long reserve[2 * (V_L1_SHIFT / L2_BITS)];
- int reserve_idx = 0;
-
+ /* We can't use qemu_malloc because it may recurse into a locked mutex. */
# define ALLOC(P, SIZE) \
do { \
P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
- if (h2g_valid(P)) { \
- reserve[reserve_idx] = h2g(P); \
- reserve[reserve_idx + 1] = SIZE; \
- reserve_idx += 2; \
- } \
} while (0)
#else
# define ALLOC(P, SIZE) \
}
#undef ALLOC
-#if defined(CONFIG_USER_ONLY)
- for (i = 0; i < reserve_idx; i += 2) {
- unsigned long addr = reserve[i];
- unsigned long len = reserve[i + 1];
-
- page_set_flags(addr & TARGET_PAGE_MASK,
- TARGET_PAGE_ALIGN(addr + len),
- PAGE_RESERVED);
- }
-#endif
return pd + (index & (L2_SIZE - 1));
}
#endif
#ifdef USE_STATIC_CODE_GEN_BUFFER
-static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
+static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
+ __attribute__((aligned (CODE_GEN_ALIGN)));
#endif
static void code_gen_alloc(unsigned long tb_size)
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
target_ulong vaddr)
{
- phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
+ cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
}
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
{
CPUState *env;
unsigned long length, start1;
- int i, mask, len;
- uint8_t *p;
+ int i;
start &= TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
length = end - start;
if (length == 0)
return;
- len = length >> TARGET_PAGE_BITS;
- mask = ~dirty_flags;
- p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
- for(i = 0; i < len; i++)
- p[i] &= mask;
+ cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
/* we modify the TLB cache so that the dirty bit will be set again
when accessing the range */
unsigned int index;
target_ulong address;
target_ulong code_address;
- target_phys_addr_t addend;
+ unsigned long addend;
CPUTLBEntry *te;
CPUWatchpoint *wp;
target_phys_addr_t iotlb;
assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
#endif
+ if (len == 0) {
+ return 0;
+ }
if (start + len - 1 < start) {
/* We've wrapped around. */
return -1;
page. Return TRUE if the fault was successfully handled. */
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
{
- unsigned int page_index, prot, pindex;
- PageDesc *p, *p1;
+ unsigned int prot;
+ PageDesc *p;
target_ulong host_start, host_end, addr;
/* Technically this isn't safe inside a signal handler. However we
practice it seems to be ok. */
mmap_lock();
- host_start = address & qemu_host_page_mask;
- page_index = host_start >> TARGET_PAGE_BITS;
- p1 = page_find(page_index);
- if (!p1) {
+ p = page_find(address >> TARGET_PAGE_BITS);
+ if (!p) {
mmap_unlock();
return 0;
}
- host_end = host_start + qemu_host_page_size;
- p = p1;
- prot = 0;
- for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
- prot |= p->flags;
- p++;
- }
+
/* if the page was really writable, then we change its
protection back to writable */
- if (prot & PAGE_WRITE_ORG) {
- pindex = (address - host_start) >> TARGET_PAGE_BITS;
- if (!(p1[pindex].flags & PAGE_WRITE)) {
- mprotect((void *)g2h(host_start), qemu_host_page_size,
- (prot & PAGE_BITS) | PAGE_WRITE);
- p1[pindex].flags |= PAGE_WRITE;
+ if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
+ host_start = address & qemu_host_page_mask;
+ host_end = host_start + qemu_host_page_size;
+
+ prot = 0;
+ for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
+ p = page_find(addr >> TARGET_PAGE_BITS);
+ p->flags |= PAGE_WRITE;
+ prot |= p->flags;
+
/* and since the content will be modified, we must invalidate
the corresponding translated code. */
- tb_invalidate_phys_page(address, pc, puc);
+ tb_invalidate_phys_page(addr, pc, puc);
#ifdef DEBUG_TB_CHECK
- tb_invalidate_check(address);
+ tb_invalidate_check(addr);
#endif
- mmap_unlock();
- return 1;
}
+ mprotect((void *)g2h(host_start), qemu_host_page_size,
+ prot & PAGE_BITS);
+
+ mmap_unlock();
+ return 1;
}
mmap_unlock();
return 0;
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
typedef struct subpage_t {
target_phys_addr_t base;
- CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
- CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
- void *opaque[TARGET_PAGE_SIZE][2][4];
- ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
+ ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
+ ram_addr_t region_offset[TARGET_PAGE_SIZE];
} subpage_t;
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
ram_addr_t memory, ram_addr_t region_offset);
-static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
- ram_addr_t orig_memory, ram_addr_t region_offset);
+static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
+ ram_addr_t orig_memory,
+ ram_addr_t region_offset);
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
need_subpage) \
do { \
PhysPageDesc *p;
CPUState *env;
ram_addr_t orig_size = size;
- void *subpage;
+ subpage_t *subpage;
cpu_notify_set_memory(start_addr, size, phys_offset);
CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
need_subpage);
- if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
+ if (need_subpage) {
if (!(orig_memory & IO_MEM_SUBPAGE)) {
subpage = subpage_init((addr & TARGET_PAGE_MASK),
&p->phys_offset, orig_memory,
CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
end_addr2, need_subpage);
- if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
+ if (need_subpage) {
subpage = subpage_init((addr & TARGET_PAGE_MASK),
&p->phys_offset, IO_MEM_UNASSIGNED,
addr & TARGET_PAGE_MASK);
(typically a TLB entry) back to a ram offset. */
ram_addr_t qemu_ram_addr_from_host(void *ptr)
{
- RAMBlock *prev;
RAMBlock *block;
uint8_t *host = ptr;
- prev = NULL;
block = ram_blocks;
while (block && (block->host > host
|| block->host + block->length <= host)) {
- prev = block;
block = block->next;
}
if (!block) {
uint32_t val)
{
int dirty_flags;
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
tb_invalidate_phys_page_fast(ram_addr, 1);
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
#endif
}
stb_p(qemu_get_ram_ptr(ram_addr), val);
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
- phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
+ cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
uint32_t val)
{
int dirty_flags;
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
tb_invalidate_phys_page_fast(ram_addr, 2);
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
#endif
}
stw_p(qemu_get_ram_ptr(ram_addr), val);
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
- phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
+ cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
uint32_t val)
{
int dirty_flags;
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
tb_invalidate_phys_page_fast(ram_addr, 4);
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
#endif
}
stl_p(qemu_get_ram_ptr(ram_addr), val);
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
- phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
+ cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
watch_mem_writel,
};
-static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
- unsigned int len)
+static inline uint32_t subpage_readlen (subpage_t *mmio,
+ target_phys_addr_t addr,
+ unsigned int len)
{
- uint32_t ret;
- unsigned int idx;
-
- idx = SUBPAGE_IDX(addr);
+ unsigned int idx = SUBPAGE_IDX(addr);
#if defined(DEBUG_SUBPAGE)
printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
mmio, len, addr, idx);
#endif
- ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
- addr + mmio->region_offset[idx][0][len]);
- return ret;
+ addr += mmio->region_offset[idx];
+ idx = mmio->sub_io_index[idx];
+ return io_mem_read[idx][len](io_mem_opaque[idx], addr);
}
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
- uint32_t value, unsigned int len)
+ uint32_t value, unsigned int len)
{
- unsigned int idx;
-
- idx = SUBPAGE_IDX(addr);
+ unsigned int idx = SUBPAGE_IDX(addr);
#if defined(DEBUG_SUBPAGE)
- printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
- mmio, len, addr, idx, value);
+ printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
+ __func__, mmio, len, addr, idx, value);
#endif
- (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
- addr + mmio->region_offset[idx][1][len],
- value);
+
+ addr += mmio->region_offset[idx];
+ idx = mmio->sub_io_index[idx];
+ io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
}
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
{
-#if defined(DEBUG_SUBPAGE)
- printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
-#endif
-
return subpage_readlen(opaque, addr, 0);
}
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
uint32_t value)
{
-#if defined(DEBUG_SUBPAGE)
- printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
-#endif
subpage_writelen(opaque, addr, value, 0);
}
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
{
-#if defined(DEBUG_SUBPAGE)
- printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
-#endif
-
return subpage_readlen(opaque, addr, 1);
}
static void subpage_writew (void *opaque, target_phys_addr_t addr,
uint32_t value)
{
-#if defined(DEBUG_SUBPAGE)
- printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
-#endif
subpage_writelen(opaque, addr, value, 1);
}
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
{
-#if defined(DEBUG_SUBPAGE)
- printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
-#endif
-
return subpage_readlen(opaque, addr, 2);
}
-static void subpage_writel (void *opaque,
- target_phys_addr_t addr, uint32_t value)
+static void subpage_writel (void *opaque, target_phys_addr_t addr,
+ uint32_t value)
{
-#if defined(DEBUG_SUBPAGE)
- printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
-#endif
subpage_writelen(opaque, addr, value, 2);
}
ram_addr_t memory, ram_addr_t region_offset)
{
int idx, eidx;
- unsigned int i;
if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
return -1;
printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
mmio, start, end, idx, eidx, memory);
#endif
- memory >>= IO_MEM_SHIFT;
+ memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
for (; idx <= eidx; idx++) {
- for (i = 0; i < 4; i++) {
- if (io_mem_read[memory][i]) {
- mmio->mem_read[idx][i] = &io_mem_read[memory][i];
- mmio->opaque[idx][0][i] = io_mem_opaque[memory];
- mmio->region_offset[idx][0][i] = region_offset;
- }
- if (io_mem_write[memory][i]) {
- mmio->mem_write[idx][i] = &io_mem_write[memory][i];
- mmio->opaque[idx][1][i] = io_mem_opaque[memory];
- mmio->region_offset[idx][1][i] = region_offset;
- }
- }
+ mmio->sub_io_index[idx] = memory;
+ mmio->region_offset[idx] = region_offset;
}
return 0;
}
-static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
- ram_addr_t orig_memory, ram_addr_t region_offset)
+static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
+ ram_addr_t orig_memory,
+ ram_addr_t region_offset)
{
subpage_t *mmio;
int subpage_memory;
mmio, base, TARGET_PAGE_SIZE, subpage_memory);
#endif
*phys = subpage_memory | IO_MEM_SUBPAGE;
- subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
- region_offset);
+ subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
return mmio;
}
CPUWriteMemoryFunc * const *mem_write,
void *opaque)
{
- int i, subwidth = 0;
+ int i;
if (io_index <= 0) {
io_index = get_free_io_mem_idx();
return -1;
}
- for(i = 0;i < 3; i++) {
- if (!mem_read[i] || !mem_write[i])
- subwidth = IO_MEM_SUBWIDTH;
- io_mem_read[io_index][i] = mem_read[i];
- io_mem_write[io_index][i] = mem_write[i];
+ for (i = 0; i < 3; ++i) {
+ io_mem_read[io_index][i]
+ = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
+ }
+ for (i = 0; i < 3; ++i) {
+ io_mem_write[io_index][i]
+ = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
}
io_mem_opaque[io_index] = opaque;
- return (io_index << IO_MEM_SHIFT) | subwidth;
+
+ return (io_index << IO_MEM_SHIFT);
}
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
/* set dirty bit */
- phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
- (0xff & ~CODE_DIRTY_FLAG);
+ cpu_physical_memory_set_dirty_flags(
+ addr1, (0xff & ~CODE_DIRTY_FLAG));
}
}
} else {
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
/* set dirty bit */
- phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
- (0xff & ~CODE_DIRTY_FLAG);
+ cpu_physical_memory_set_dirty_flags(
+ addr1, (0xff & ~CODE_DIRTY_FLAG));
}
addr1 += l;
access_len -= l;
return val;
}
-/* XXX: optimize */
+/* warning: addr must be aligned */
uint32_t lduw_phys(target_phys_addr_t addr)
{
- uint16_t val;
- cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
- return tswap16(val);
+ int io_index;
+ uint8_t *ptr;
+ uint64_t val;
+ unsigned long pd;
+ PhysPageDesc *p;
+
+ p = phys_page_find(addr >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+
+ if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
+ !(pd & IO_MEM_ROMD)) {
+ /* I/O case */
+ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ if (p)
+ addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+ val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
+ } else {
+ /* RAM case */
+ ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
+ (addr & ~TARGET_PAGE_MASK);
+ val = lduw_p(ptr);
+ }
+ return val;
}
/* warning: addr must be aligned. The ram page is not masked as dirty
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
/* set dirty bit */
- phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
- (0xff & ~CODE_DIRTY_FLAG);
+ cpu_physical_memory_set_dirty_flags(
+ addr1, (0xff & ~CODE_DIRTY_FLAG));
}
}
}
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
/* set dirty bit */
- phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
- (0xff & ~CODE_DIRTY_FLAG);
+ cpu_physical_memory_set_dirty_flags(addr1,
+ (0xff & ~CODE_DIRTY_FLAG));
}
}
}
cpu_physical_memory_write(addr, &v, 1);
}
-/* XXX: optimize */
+/* warning: addr must be aligned */
void stw_phys(target_phys_addr_t addr, uint32_t val)
{
- uint16_t v = tswap16(val);
- cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
+ int io_index;
+ uint8_t *ptr;
+ unsigned long pd;
+ PhysPageDesc *p;
+
+ p = phys_page_find(addr >> TARGET_PAGE_BITS);
+ if (!p) {
+ pd = IO_MEM_UNASSIGNED;
+ } else {
+ pd = p->phys_offset;
+ }
+
+ if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
+ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ if (p)
+ addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
+ io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
+ } else {
+ unsigned long addr1;
+ addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+ /* RAM case */
+ ptr = qemu_get_ram_ptr(addr1);
+ stw_p(ptr, val);
+ if (!cpu_physical_memory_is_dirty(addr1)) {
+ /* invalidate code */
+ tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
+ /* set dirty bit */
+ cpu_physical_memory_set_dirty_flags(addr1,
+ (0xff & ~CODE_DIRTY_FLAG));
+ }
+ }
}
/* XXX: optimize */