#endif
#ifdef SOFTMMU_CODE_ACCESS
-#define READ_ACCESS_TYPE 2
+#define READ_ACCESS_TYPE MMU_INST_FETCH
#define ADDR_READ addr_code
#else
-#define READ_ACCESS_TYPE 0
+#define READ_ACCESS_TYPE MMU_DATA_LOAD
#define ADDR_READ addr_read
#endif
# define helper_te_st_name helper_le_st_name
#endif
+/* macro to check the victim tlb */
+#define VICTIM_TLB_HIT(ty) \
+({ \
+ /* we are about to do a page table walk. our last hope is the \
+ * victim tlb. try to refill from the victim tlb before walking the \
+ * page table. */ \
+ int vidx; \
+ hwaddr tmpiotlb; \
+ CPUTLBEntry tmptlb; \
+ for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) { \
+ if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\
+ /* found entry in victim tlb, swap tlb and iotlb */ \
+ tmptlb = env->tlb_table[mmu_idx][index]; \
+ env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \
+ env->tlb_v_table[mmu_idx][vidx] = tmptlb; \
+ tmpiotlb = env->iotlb[mmu_idx][index]; \
+ env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx]; \
+ env->iotlb_v[mmu_idx][vidx] = tmpiotlb; \
+ break; \
+ } \
+ } \
+ /* return true when there is a vtlb hit, i.e. vidx >=0 */ \
+ vidx >= 0; \
+})
+
#ifndef SOFTMMU_CODE_ACCESS
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
hwaddr physaddr,
mmu_idx, retaddr);
}
#endif
- tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ if (!VICTIM_TLB_HIT(ADDR_READ)) {
+ tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
}
mmu_idx, retaddr);
}
#endif
- tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ if (!VICTIM_TLB_HIT(ADDR_READ)) {
+ tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
+ mmu_idx, retaddr);
+ }
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
}
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
}
#endif
- tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ if (!VICTIM_TLB_HIT(addr_write)) {
+ tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
+ }
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
}
int i;
do_unaligned_access:
#ifdef ALIGNED_ONLY
- cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
#endif
/* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the
/* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
}
#endif
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
}
#endif
- tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ if (!VICTIM_TLB_HIT(addr_write)) {
+ tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
+ }
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
}
int i;
do_unaligned_access:
#ifdef ALIGNED_ONLY
- cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
#endif
/* XXX: not efficient, but simple */
/* Note: relies on the fact that tlb_fill() does not remove the
/* Handle aligned access or unaligned access in the same page. */
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
- cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
+ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
}
#endif