assert((rwx == 0) || (rwx == 1) || (rwx == 2));
+ /* Note on LPCR usage: 970 uses HID4, but our special variant
+ * of store_spr copies relevant fields into env->spr[SPR_LPCR].
+ * Similarily we filter unimplemented bits when storing into
+ * LPCR depending on the MMU version. This code can thus just
+ * use the LPCR "as-is".
+ */
+
/* 1. Handle real mode accesses */
if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
- /* Translation is off */
- /* In real mode the top 4 effective address bits are ignored */
+ /* Translation is supposedly "off" */
+ /* In real mode the top 4 effective address bits are (mostly) ignored */
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
+
+ /* In HV mode, add HRMOR if top EA bit is clear */
+ if (msr_hv || !env->has_hv_mode) {
+ if (!(eaddr >> 63)) {
+ raddr |= env->spr[SPR_HRMOR];
+ }
+ } else {
+ /* Otherwise, check VPM for RMA vs VRMA */
+ if (env->spr[SPR_LPCR] & LPCR_VPM0) {
+ slb = &env->vrma_slb;
+ if (slb->sps) {
+ goto skip_slb_search;
+ }
+ /* Not much else to do here */
+ cs->exception_index = POWERPC_EXCP_MCHECK;
+ env->error_code = 0;
+ return 1;
+ } else if (raddr < env->rmls) {
+ /* RMA. Check bounds in RMLS */
+ raddr |= env->spr[SPR_RMOR];
+ } else {
+ /* The access failed, generate the approriate interrupt */
+ if (rwx == 2) {
+ ppc_hash64_set_isi(cs, env, 0x08000000);
+ } else {
+ dsisr = 0x08000000;
+ if (rwx == 1) {
+ dsisr |= 0x02000000;
+ }
+ ppc_hash64_set_dsi(cs, env, eaddr, dsisr);
+ }
+ return 1;
+ }
+ }
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
TARGET_PAGE_SIZE);
/* 2. Translation is on, so look up the SLB */
slb = slb_lookup(cpu, eaddr);
-
if (!slb) {
if (rwx == 2) {
cs->exception_index = POWERPC_EXCP_ISEG;
return 1;
}
+skip_slb_search:
+
/* 3. Check for segment level no-execute violation */
if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
ppc_hash64_set_isi(cs, env, 0x10000000);
{
CPUPPCState *env = &cpu->env;
ppc_slb_t *slb;
- hwaddr pte_offset;
+ hwaddr pte_offset, raddr;
ppc_hash_pte64_t pte;
unsigned apshift;
+ /* Handle real mode */
if (msr_dr == 0) {
/* In real mode the top 4 effective address bits are ignored */
- return addr & 0x0FFFFFFFFFFFFFFFULL;
- }
+ raddr = addr & 0x0FFFFFFFFFFFFFFFULL;
- slb = slb_lookup(cpu, addr);
- if (!slb) {
- return -1;
+ /* In HV mode, add HRMOR if top EA bit is clear */
+ if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
+ return raddr | env->spr[SPR_HRMOR];
+ }
+
+ /* Otherwise, check VPM for RMA vs VRMA */
+ if (env->spr[SPR_LPCR] & LPCR_VPM0) {
+ slb = &env->vrma_slb;
+ if (!slb->sps) {
+ return -1;
+ }
+ } else if (raddr < env->rmls) {
+ /* RMA. Check bounds in RMLS */
+ return raddr | env->spr[SPR_RMOR];
+ } else {
+ return -1;
+ }
+ } else {
+ slb = slb_lookup(cpu, addr);
+ if (!slb) {
+ return -1;
+ }
}
pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift);
tlb_flush(CPU(cpu), 1);
}
+void ppc_hash64_update_rmls(CPUPPCState *env)
+{
+ uint64_t lpcr = env->spr[SPR_LPCR];
+
+ /*
+ * This is the full 4 bits encoding of POWER8. Previous
+ * CPUs only support a subset of these but the filtering
+ * is done when writing LPCR
+ */
+ switch ((lpcr & LPCR_RMLS) >> LPCR_RMLS_SHIFT) {
+ case 0x8: /* 32MB */
+ env->rmls = 0x2000000ull;
+ break;
+ case 0x3: /* 64MB */
+ env->rmls = 0x4000000ull;
+ break;
+ case 0x7: /* 128MB */
+ env->rmls = 0x8000000ull;
+ break;
+ case 0x4: /* 256MB */
+ env->rmls = 0x10000000ull;
+ break;
+ case 0x2: /* 1GB */
+ env->rmls = 0x40000000ull;
+ break;
+ case 0x1: /* 16GB */
+ env->rmls = 0x400000000ull;
+ break;
+ default:
+ /* What to do here ??? */
+ env->rmls = 0;
+ }
+}
+
+void ppc_hash64_update_vrma(CPUPPCState *env)
+{
+ const struct ppc_one_seg_page_size *sps = NULL;
+ target_ulong esid, vsid, lpcr;
+ ppc_slb_t *slb = &env->vrma_slb;
+ uint32_t vrmasd;
+ int i;
+
+ /* First clear it */
+ slb->esid = slb->vsid = 0;
+ slb->sps = NULL;
+
+ /* Is VRMA enabled ? */
+ lpcr = env->spr[SPR_LPCR];
+ if (!(lpcr & LPCR_VPM0)) {
+ return;
+ }
+
+ /* Make one up. Mostly ignore the ESID which will not be
+ * needed for translation
+ */
+ vsid = SLB_VSID_VRMA;
+ vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
+ vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
+ esid = SLB_ESID_V;
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
+
+ if (!sps1->page_shift) {
+ break;
+ }
+
+ if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
+ sps = sps1;
+ break;
+ }
+ }
+
+ if (!sps) {
+ error_report("Bad page size encoding esid 0x"TARGET_FMT_lx
+ " vsid 0x"TARGET_FMT_lx, esid, vsid);
+ return;
+ }
+
+ slb->vsid = vsid;
+ slb->esid = esid;
+ slb->sps = sps;
+}
+
void helper_store_lpcr(CPUPPCState *env, target_ulong val)
{
uint64_t lpcr = 0;
;
}
env->spr[SPR_LPCR] = lpcr;
+ ppc_hash64_update_rmls(env);
+ ppc_hash64_update_vrma(env);
}
/* Set emulated LPCR to not send interrupts to hypervisor. Note that
* under KVM, the actual HW LPCR will be set differently by KVM itself,
* the settings below ensure proper operations with TCG in absence of
- * a real hypervisor
+ * a real hypervisor.
+ *
+ * Clearing VPM0 will also cause us to use RMOR in mmu-hash64.c for
+ * real mode accesses, which thankfully defaults to 0 and isn't
+ * accessible in guest mode.
*/
lpcr->default_value &= ~(LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV);
lpcr->default_value |= LPCR_LPES0 | LPCR_LPES1;
+ /* Set RMLS to the max (ie, 16G) */
+ lpcr->default_value &= ~LPCR_RMLS;
+ lpcr->default_value |= 1ull << LPCR_RMLS_SHIFT;
+
/* P7 and P8 has slightly different PECE bits, mostly because P8 adds
* bit 47 and 48 which are reserved on P7. Here we set them all, which
* will work as expected for both implementations
/* Set a full AMOR so guest can use the AMR as it sees fit */
env->spr[SPR_AMOR] = amor->default_value = 0xffffffffffffffffull;
+ /* Update some env bits based on new LPCR value */
+ ppc_hash64_update_rmls(env);
+ ppc_hash64_update_vrma(env);
+
/* Tell KVM that we're in PAPR mode */
if (kvm_enabled()) {
kvmppc_set_papr(cpu);