for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
hppa_tlb_entry *ent = &env->tlb[i];
- if (ent->va_b <= addr && addr <= ent->va_e && ent->entry_valid) {
+ if (ent->va_b <= addr && addr <= ent->va_e) {
return ent;
}
}
return NULL;
}
+static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
+{
+ CPUState *cs = CPU(hppa_env_get_cpu(env));
+ unsigned i, n = 1 << (2 * ent->page_size);
+ uint64_t addr = ent->va_b;
+
+ for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
+ /* Do not flush MMU_PHYS_IDX. */
+ tlb_flush_page_by_mmuidx(cs, addr, 0xf);
+ }
+
+ memset(ent, 0, sizeof(*ent));
+ ent->va_b = -1;
+}
+
+static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
+{
+ hppa_tlb_entry *ent;
+ uint32_t i = env->tlb_last;
+
+ env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
+ ent = &env->tlb[i];
+
+ hppa_flush_tlb_ent(env, ent);
+ return ent;
+}
+
int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
int type, hwaddr *pphys, int *pprot)
{
/* Find a valid tlb entry that matches the virtual address. */
ent = hppa_find_tlb(env, addr);
- if (ent == NULL) {
+ if (ent == NULL || !ent->entry_valid) {
phys = 0;
prot = 0;
- ret = (type & PAGE_EXEC ? EXCP_ITLB_MISS : EXCP_DTLB_MISS);
+ /* ??? Unconditionally report data tlb miss,
+ even if this is an instruction fetch. */
+ ret = EXCP_DTLB_MISS;
goto egress;
}
break;
default: /* execute: promote to privilege level type & 3 */
prot = x_prot;
+ break;
}
/* ??? Check PSW_P and ent->access_prot. This can remove PAGE_WRITE. */
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
}
+
+/* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
+void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
+{
+ hppa_tlb_entry *empty = NULL;
+ int i;
+
+ /* Zap any old entries covering ADDR; notice empty entries on the way. */
+ for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
+ hppa_tlb_entry *ent = &env->tlb[i];
+ if (!ent->entry_valid) {
+ empty = ent;
+ } else if (ent->va_b <= addr && addr <= ent->va_e) {
+ hppa_flush_tlb_ent(env, ent);
+ empty = ent;
+ }
+ }
+
+ /* If we didn't see an empty entry, evict one. */
+ if (empty == NULL) {
+ empty = hppa_alloc_tlb_ent(env);
+ }
+
+ /* Note that empty->entry_valid == 0 already. */
+ empty->va_b = addr & TARGET_PAGE_MASK;
+ empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
+ empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
+}
+
+/* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
+void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
+{
+ hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
+
+ if (unlikely(ent == NULL || ent->entry_valid)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
+ return;
+ }
+
+ ent->access_id = extract32(reg, 1, 18);
+ ent->u = extract32(reg, 19, 1);
+ ent->ar_pl2 = extract32(reg, 20, 2);
+ ent->ar_pl1 = extract32(reg, 22, 2);
+ ent->ar_type = extract32(reg, 24, 3);
+ ent->b = extract32(reg, 27, 1);
+ ent->d = extract32(reg, 28, 1);
+ ent->t = extract32(reg, 29, 1);
+ ent->entry_valid = 1;
+}
+
+/* Purge (Insn/Data) TLB. This is explicitly page-based, and is
+ synchronous across all processors. */
+static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
+{
+ CPUHPPAState *env = cpu->env_ptr;
+ target_ulong addr = (target_ulong) data.target_ptr;
+ hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
+
+ if (ent && ent->entry_valid) {
+ hppa_flush_tlb_ent(env, ent);
+ }
+}
+
+void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
+{
+ CPUState *src = CPU(hppa_env_get_cpu(env));
+ CPUState *cpu;
+ run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
+
+ CPU_FOREACH(cpu) {
+ if (cpu != src) {
+ async_run_on_cpu(cpu, ptlb_work, data);
+ }
+ }
+ async_safe_run_on_cpu(src, ptlb_work, data);
+}
+
+/* Purge (Insn/Data) TLB entry. This affects an implementation-defined
+ number of pages/entries (we choose all), and is local to the cpu. */
+void HELPER(ptlbe)(CPUHPPAState *env)
+{
+ CPUState *src = CPU(hppa_env_get_cpu(env));
+
+ memset(env->tlb, 0, sizeof(env->tlb));
+ tlb_flush_by_mmuidx(src, 0xf);
+}
+
+target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
+{
+ hwaddr phys;
+ int prot, excp;
+
+ excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
+ &phys, &prot);
+ if (excp >= 0) {
+ if (env->psw & PSW_Q) {
+ /* ??? Needs tweaking for hppa64. */
+ env->cr[CR_IOR] = addr;
+ env->cr[CR_ISR] = addr >> 32;
+ }
+ if (excp == EXCP_DTLB_MISS) {
+ excp = EXCP_NA_DTLB_MISS;
+ }
+ hppa_dynamic_excp(env, excp, GETPC());
+ }
+ return phys;
+}
+
+/* Return the ar_type of the TLB at VADDR, or -1. */
+int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
+{
+ hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
+ return ent ? ent->ar_type : -1;
+}
#endif /* CONFIG_USER_ONLY */