is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
supplied size is only used by tlb_flush_page. */
void tlb_set_page(CPUArchState *env, target_ulong vaddr,
is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
supplied size is only used by tlb_flush_page. */
void tlb_set_page(CPUArchState *env, target_ulong vaddr,
int mmu_idx, target_ulong size)
{
MemoryRegionSection *section;
int mmu_idx, target_ulong size)
{
MemoryRegionSection *section;
assert(size >= TARGET_PAGE_SIZE);
if (size != TARGET_PAGE_SIZE) {
tlb_add_large_page(env, vaddr, size);
}
assert(size >= TARGET_PAGE_SIZE);
if (size != TARGET_PAGE_SIZE) {
tlb_add_large_page(env, vaddr, size);
}
#if defined(DEBUG_TLB)
printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
" prot=%x idx=%d pd=0x%08lx\n",
#if defined(DEBUG_TLB)
printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
" prot=%x idx=%d pd=0x%08lx\n",
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
env->iotlb[mmu_idx][index] = iotlb - vaddr;
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
env->iotlb[mmu_idx][index] = iotlb - vaddr;
/* Write access calls the I/O callback. */
te->addr_write = address | TLB_MMIO;
} else if (memory_region_is_ram(section->mr)
&& !cpu_physical_memory_is_dirty(
section->mr->ram_addr
/* Write access calls the I/O callback. */
te->addr_write = address | TLB_MMIO;
} else if (memory_region_is_ram(section->mr)
&& !cpu_physical_memory_is_dirty(
section->mr->ram_addr
/* NOTE: this function can trigger an exception */
/* NOTE2: the returned address is not exactly the physical address: it
/* NOTE: this function can trigger an exception */
/* NOTE2: the returned address is not exactly the physical address: it
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
{
int mmu_idx, page_index, pd;
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
{
int mmu_idx, page_index, pd;
mmu_idx = cpu_mmu_index(env1);
if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
(addr & TARGET_PAGE_MASK))) {
mmu_idx = cpu_mmu_index(env1);
if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
(addr & TARGET_PAGE_MASK))) {