* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "cpu.h"
-#include "helper.h"
+#include "exec/helper-proto.h"
#include "sysemu/kvm.h"
#include "kvm_ppc.h"
#include "mmu-hash64.h"
# define LOG_SLB(...) do { } while (0)
#endif
+/*
+ * Used to indicate whether we have allocated htab in the
+ * host kernel
+ */
+bool kvmppc_kern_htab;
/*
* SLB handling
*/
void helper_slbia(CPUPPCState *env)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
int n, do_invalidate;
do_invalidate = 0;
}
}
if (do_invalidate) {
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
}
void helper_slbie(CPUPPCState *env, target_ulong addr)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
ppc_slb_t *slb;
slb = slb_lookup(env, addr);
* and we still don't have a tlb_flush_mask(env, n, mask)
* in QEMU, we just invalidate all TLBs
*/
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
}
static int ppc_hash64_amr_prot(CPUPPCState *env, ppc_hash_pte64_t pte)
{
int key, amrbits;
- int prot = PAGE_EXEC;
+ int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
/* Only recent MMUs implement Virtual Page Class Key Protection */
if (!(env->mmu_model & POWERPC_MMU_AMR)) {
- return PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return prot;
}
key = HPTE64_R_KEY(pte.pte1);
/* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
/* env->spr[SPR_AMR]); */
+ /*
+ * A store is permitted if the AMR bit is 0. Remove write
+ * protection if it is set.
+ */
if (amrbits & 0x2) {
- prot |= PAGE_WRITE;
+ prot &= ~PAGE_WRITE;
}
+ /*
+ * A load is permitted if the AMR bit is 0. Remove read
+ * protection if it is set.
+ */
if (amrbits & 0x1) {
- prot |= PAGE_READ;
+ prot &= ~PAGE_READ;
}
return prot;
}
-static hwaddr ppc_hash64_pteg_search(CPUPPCState *env, hwaddr pteg_off,
+uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
+{
+ uint64_t token = 0;
+ hwaddr pte_offset;
+
+ pte_offset = pte_index * HASH_PTE_SIZE_64;
+ if (kvmppc_kern_htab) {
+ /*
+ * HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
+ */
+ token = kvmppc_hash64_read_pteg(cpu, pte_index);
+ if (token) {
+ return token;
+ }
+ /*
+ * pteg read failed, even though we have allocated htab via
+ * kvmppc_reset_htab.
+ */
+ return 0;
+ }
+ /*
+ * HTAB is controlled by QEMU. Just point to the internally
+ * accessible PTEG.
+ */
+ if (cpu->env.external_htab) {
+ token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
+ } else if (cpu->env.htab_base) {
+ token = cpu->env.htab_base + pte_offset;
+ }
+ return token;
+}
+
+void ppc_hash64_stop_access(uint64_t token)
+{
+ if (kvmppc_kern_htab) {
+ return kvmppc_hash64_free_pteg(token);
+ }
+}
+
+static hwaddr ppc_hash64_pteg_search(CPUPPCState *env, hwaddr hash,
bool secondary, target_ulong ptem,
ppc_hash_pte64_t *pte)
{
- hwaddr pte_offset = pteg_off;
- target_ulong pte0, pte1;
int i;
+ uint64_t token;
+ target_ulong pte0, pte1;
+ target_ulong pte_index;
+ pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
+ token = ppc_hash64_start_access(ppc_env_get_cpu(env), pte_index);
+ if (!token) {
+ return -1;
+ }
for (i = 0; i < HPTES_PER_GROUP; i++) {
- pte0 = ppc_hash64_load_hpte0(env, pte_offset);
- pte1 = ppc_hash64_load_hpte1(env, pte_offset);
+ pte0 = ppc_hash64_load_hpte0(env, token, i);
+ pte1 = ppc_hash64_load_hpte1(env, token, i);
if ((pte0 & HPTE64_V_VALID)
&& (secondary == !!(pte0 & HPTE64_V_SECONDARY))
&& HPTE64_V_COMPARE(pte0, ptem)) {
pte->pte0 = pte0;
pte->pte1 = pte1;
- return pte_offset;
+ ppc_hash64_stop_access(token);
+ return (pte_index + i) * HASH_PTE_SIZE_64;
}
-
- pte_offset += HASH_PTE_SIZE_64;
}
-
+ ppc_hash64_stop_access(token);
+ /*
+ * We didn't find a valid entry.
+ */
return -1;
}
ppc_slb_t *slb, target_ulong eaddr,
ppc_hash_pte64_t *pte)
{
- hwaddr pteg_off, pte_offset;
+ hwaddr pte_offset;
hwaddr hash;
uint64_t vsid, epnshift, epnmask, epn, ptem;
" vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
" hash=" TARGET_FMT_plx "\n",
env->htab_base, env->htab_mask, vsid, ptem, hash);
- pteg_off = (hash * HASH_PTEG_SIZE_64) & env->htab_mask;
- pte_offset = ppc_hash64_pteg_search(env, pteg_off, 0, ptem, pte);
+ pte_offset = ppc_hash64_pteg_search(env, hash, 0, ptem, pte);
if (pte_offset == -1) {
/* Secondary PTEG lookup */
" hash=" TARGET_FMT_plx "\n", env->htab_base,
env->htab_mask, vsid, ptem, ~hash);
- pteg_off = (~hash * HASH_PTEG_SIZE_64) & env->htab_mask;
- pte_offset = ppc_hash64_pteg_search(env, pteg_off, 1, ptem, pte);
+ pte_offset = ppc_hash64_pteg_search(env, ~hash, 1, ptem, pte);
}
return pte_offset;
return (rpn & ~mask) | (eaddr & mask);
}
-int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr,
+int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
int rwx, int mmu_idx)
{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
ppc_slb_t *slb;
hwaddr pte_offset;
ppc_hash_pte64_t pte;
/* Translation is off */
/* In real mode the top 4 effective address bits are ignored */
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
- tlb_set_page(env, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
TARGET_PAGE_SIZE);
return 0;
if (!slb) {
if (rwx == 2) {
- env->exception_index = POWERPC_EXCP_ISEG;
+ cs->exception_index = POWERPC_EXCP_ISEG;
env->error_code = 0;
} else {
- env->exception_index = POWERPC_EXCP_DSEG;
+ cs->exception_index = POWERPC_EXCP_DSEG;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
}
/* 3. Check for segment level no-execute violation */
if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
- env->exception_index = POWERPC_EXCP_ISI;
+ cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x10000000;
return 1;
}
pte_offset = ppc_hash64_htab_lookup(env, slb, eaddr, &pte);
if (pte_offset == -1) {
if (rwx == 2) {
- env->exception_index = POWERPC_EXCP_ISI;
+ cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x40000000;
} else {
- env->exception_index = POWERPC_EXCP_DSI;
+ cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
if (rwx == 1) {
/* Access right violation */
LOG_MMU("PTE access rejected\n");
if (rwx == 2) {
- env->exception_index = POWERPC_EXCP_ISI;
+ cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x08000000;
} else {
target_ulong dsisr = 0;
- env->exception_index = POWERPC_EXCP_DSI;
+ cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
if (need_prot[rwx] & ~pp_prot) {
}
if (new_pte1 != pte.pte1) {
- ppc_hash64_store_hpte1(env, pte_offset, new_pte1);
+ ppc_hash64_store_hpte(env, pte_offset / HASH_PTE_SIZE_64,
+ pte.pte0, new_pte1);
}
/* 7. Determine the real address from the PTE */
raddr = ppc_hash64_pte_raddr(slb, pte, eaddr);
- tlb_set_page(env, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
return ppc_hash64_pte_raddr(slb, pte, addr) & TARGET_PAGE_MASK;
}
+
+void ppc_hash64_store_hpte(CPUPPCState *env,
+ target_ulong pte_index,
+ target_ulong pte0, target_ulong pte1)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
+ if (kvmppc_kern_htab) {
+ return kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
+ }
+
+ pte_index *= HASH_PTE_SIZE_64;
+ if (env->external_htab) {
+ stq_p(env->external_htab + pte_index, pte0);
+ stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64/2, pte1);
+ } else {
+ stq_phys(cs->as, env->htab_base + pte_index, pte0);
+ stq_phys(cs->as, env->htab_base + pte_index + HASH_PTE_SIZE_64/2, pte1);
+ }
+}