int mmu_idx,
void *retaddr);
static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
- target_ulong tlb_addr)
+ target_ulong addr)
{
DATA_TYPE res;
int index;
+ index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
- index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
#if SHIFT <= 2
res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
#else
DATA_TYPE res;
int index;
target_ulong tlb_addr;
- target_phys_addr_t physaddr;
+ target_phys_addr_t addend;
void *retaddr;
/* test if there is match for unaligned or IO access */
redo:
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- physaddr = addr + env->tlb_table[mmu_idx][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
- res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
+ addend = env->iotlb[mmu_idx][index];
+ res = glue(io_read, SUFFIX)(addend, addr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
/* slow unaligned access (it spans two pages or IO) */
do_unaligned_access:
do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
}
#endif
- res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)physaddr);
+ addend = env->tlb_table[mmu_idx][index].addend;
+ res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
}
} else {
/* the page is not in the TLB : fill it */
{
DATA_TYPE res, res1, res2;
int index, shift;
- target_phys_addr_t physaddr;
+ target_phys_addr_t addend;
target_ulong tlb_addr, addr1, addr2;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- physaddr = addr + env->tlb_table[mmu_idx][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
- res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
+ addend = env->iotlb[mmu_idx][index];
+ res = glue(io_read, SUFFIX)(addend, addr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
/* slow unaligned access (it spans two pages) */
res = (DATA_TYPE)res;
} else {
/* unaligned/aligned access in the same page */
- res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)physaddr);
+ addend = env->tlb_table[mmu_idx][index].addend;
+ res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
}
} else {
/* the page is not in the TLB : fill it */
static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
DATA_TYPE val,
- target_ulong tlb_addr,
+ target_ulong addr,
void *retaddr)
{
int index;
+ index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
- index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
- env->mem_write_vaddr = tlb_addr;
+ env->mem_write_vaddr = addr;
env->mem_write_pc = (unsigned long)retaddr;
#if SHIFT <= 2
io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
DATA_TYPE val,
int mmu_idx)
{
- target_phys_addr_t physaddr;
+ target_phys_addr_t addend;
target_ulong tlb_addr;
void *retaddr;
int index;
redo:
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- physaddr = addr + env->tlb_table[mmu_idx][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
retaddr = GETPC();
- glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
+ addend = env->iotlb[mmu_idx][index];
+ glue(io_write, SUFFIX)(addend, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
retaddr = GETPC();
do_unaligned_access(addr, 1, mmu_idx, retaddr);
}
#endif
- glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)physaddr, val);
+ addend = env->tlb_table[mmu_idx][index].addend;
+ glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
}
} else {
/* the page is not in the TLB : fill it */
int mmu_idx,
void *retaddr)
{
- target_phys_addr_t physaddr;
+ target_phys_addr_t addend;
target_ulong tlb_addr;
int index, i;
redo:
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
- physaddr = addr + env->tlb_table[mmu_idx][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
- glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
+ addend = env->iotlb[mmu_idx][index];
+ glue(io_write, SUFFIX)(addend, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
/* XXX: not efficient, but simple */
}
} else {
/* aligned/unaligned access in the same page */
- glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)physaddr, val);
+ addend = env->tlb_table[mmu_idx][index].addend;
+ glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
}
} else {
/* the page is not in the TLB : fill it */