X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/7df9671989c1cfa693764f9ae6349324b2ada02a..36a43ea83b4426344592523fd560e5f76c4867c4:/cputlb.c diff --git a/cputlb.c b/cputlb.c index 4bc6c24e11..2f7a166491 100644 --- a/cputlb.c +++ b/cputlb.c @@ -17,7 +17,7 @@ * License along with this library; if not, see . */ -#include "config.h" +#include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/memory.h" @@ -262,27 +262,24 @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) return ram_addr; } -void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length) +void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) { - CPUState *cpu; CPUArchState *env; - CPU_FOREACH(cpu) { - int mmu_idx; + int mmu_idx; - env = cpu->env_ptr; - for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { - unsigned int i; + env = cpu->env_ptr; + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + unsigned int i; - for (i = 0; i < CPU_TLB_SIZE; i++) { - tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], - start1, length); - } + for (i = 0; i < CPU_TLB_SIZE; i++) { + tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], + start1, length); + } - for (i = 0; i < CPU_VTLB_SIZE; i++) { - tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i], - start1, length); - } + for (i = 0; i < CPU_VTLB_SIZE; i++) { + tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i], + start1, length); } } } @@ -296,8 +293,9 @@ static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) /* update the TLB corresponding to virtual page vaddr so that it is no longer dirty */ -void tlb_set_dirty(CPUArchState *env, target_ulong vaddr) +void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) { + CPUArchState *env = cpu->env_ptr; int i; int mmu_idx; @@ -358,6 +356,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, CPUTLBEntry *te; hwaddr iotlb, xlat, sz; unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE; + int asidx = cpu_asidx_from_attrs(cpu, attrs); assert(size >= TARGET_PAGE_SIZE); if (size != TARGET_PAGE_SIZE) { @@ -365,7 +364,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, } sz = size; - section = address_space_translate_for_iotlb(cpu, paddr, &xlat, &sz); + section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz); assert(sz >= TARGET_PAGE_SIZE); #if defined(DEBUG_TLB) @@ -417,8 +416,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, /* Write access calls the I/O callback. */ te->addr_write = address | TLB_MMIO; } else if (memory_region_is_ram(section->mr) - && cpu_physical_memory_is_clean(section->mr->ram_addr - + xlat)) { + && cpu_physical_memory_is_clean( + memory_region_get_ram_addr(section->mr) + xlat)) { te->addr_write = address | TLB_NOTDIRTY; } else { te->addr_write = address; @@ -450,15 +449,17 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) void *p; MemoryRegion *mr; CPUState *cpu = ENV_GET_CPU(env1); + CPUIOTLBEntry *iotlbentry; page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - mmu_idx = cpu_mmu_index(env1); + mmu_idx = cpu_mmu_index(env1, true); if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code != (addr & TARGET_PAGE_MASK))) { cpu_ldub_code(env1, addr); } - pd = env1->iotlb[mmu_idx][page_index].addr & ~TARGET_PAGE_MASK; - mr = iotlb_to_region(cpu, pd); + iotlbentry = &env1->iotlb[mmu_idx][page_index]; + pd = iotlbentry->addr & ~TARGET_PAGE_MASK; + mr = iotlb_to_region(cpu, pd, iotlbentry->attrs); if (memory_region_is_unassigned(mr)) { CPUClass *cc = CPU_GET_CLASS(cpu);