}
}
+static uint64_t sun4v_tte_to_sun4u(CPUSPARCState *env, uint64_t tag,
+ uint64_t sun4v_tte)
+{
+ uint64_t sun4u_tte;
+ if (!(cpu_has_hypervisor(env) && (tag & TLB_UST1_IS_SUN4V_BIT))) {
+ /* is already in the sun4u format */
+ return sun4v_tte;
+ }
+ sun4u_tte = TTE_PA(sun4v_tte) | (sun4v_tte & TTE_VALID_BIT);
+ sun4u_tte |= (sun4v_tte & 3ULL) << 61; /* TTE_PGSIZE */
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_NFO_BIT_UA2005, TTE_NFO_BIT);
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_USED_BIT_UA2005, TTE_USED_BIT);
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_W_OK_BIT_UA2005, TTE_W_OK_BIT);
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_SIDEEFFECT_BIT_UA2005,
+ TTE_SIDEEFFECT_BIT);
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_PRIV_BIT_UA2005, TTE_PRIV_BIT);
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_LOCKED_BIT_UA2005, TTE_LOCKED_BIT);
+ return sun4u_tte;
+}
+
static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
uint64_t tlb_tag, uint64_t tlb_tte,
- const char *strmmu, CPUSPARCState *env1)
+ const char *strmmu, CPUSPARCState *env1,
+ uint64_t addr)
{
unsigned int i, replace_used;
+ tlb_tte = sun4v_tte_to_sun4u(env1, addr, tlb_tte);
+ if (cpu_has_hypervisor(env1)) {
+ uint64_t new_vaddr = tlb_tag & ~0x1fffULL;
+ uint64_t new_size = 8192ULL << 3 * TTE_PGSIZE(tlb_tte);
+ uint32_t new_ctx = tlb_tag & 0x1fffU;
+ for (i = 0; i < 64; i++) {
+ uint32_t ctx = tlb[i].tag & 0x1fffU;
+ /* check if new mapping overlaps an existing one */
+ if (new_ctx == ctx) {
+ uint64_t vaddr = tlb[i].tag & ~0x1fffULL;
+ uint64_t size = 8192ULL << 3 * TTE_PGSIZE(tlb[i].tte);
+ if (new_vaddr == vaddr
+ || (new_vaddr < vaddr + size
+ && vaddr < new_vaddr + new_size)) {
+ DPRINTF_MMU("auto demap entry [%d] %lx->%lx\n", i, vaddr,
+ new_vaddr);
+ replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
+ return;
+ }
+ }
+
+ }
+ }
/* Try replacing invalid entry */
for (i = 0; i < 64; i++) {
if (!TTE_IS_VALID(tlb[i].tte)) {
case 0x00: /* Leon3 Cache Control */
case 0x08: /* Leon3 Instruction Cache config */
case 0x0C: /* Leon3 Date Cache config */
- if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
+ if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
ret = leon3_cache_control_ld(env, addr, size);
}
break;
case 0x00: /* Leon3 Cache Control */
case 0x08: /* Leon3 Instruction Cache config */
case 0x0C: /* Leon3 Date Cache config */
- if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
+ if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
leon3_cache_control_st(env, addr, val, size);
}
break;
/* Mappings generated during no-fault mode
are invalid in normal mode. */
if ((oldreg ^ env->mmuregs[reg])
- & (MMU_NF | env->def->mmu_bm)) {
+ & (MMU_NF | env->def.mmu_bm)) {
tlb_flush(CPU(cpu));
}
break;
case 1: /* Context Table Pointer Register */
- env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
+ env->mmuregs[reg] = val & env->def.mmu_ctpr_mask;
break;
case 2: /* Context Register */
- env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
+ env->mmuregs[reg] = val & env->def.mmu_cxr_mask;
if (oldreg != env->mmuregs[reg]) {
/* we flush when the MMU context changes because
QEMU has no MMU context support */
case 4: /* Synchronous Fault Address Register */
break;
case 0x10: /* TLB Replacement Control Register */
- env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
+ env->mmuregs[reg] = val & env->def.mmu_trcr_mask;
break;
case 0x13: /* Synchronous Fault Status Register with Read
and Clear */
- env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
+ env->mmuregs[3] = val & env->def.mmu_sfsr_mask;
break;
case 0x14: /* Synchronous Fault Address Register */
env->mmuregs[4] = val;
ret = env->scratch[i];
break;
}
+ case ASI_MMU: /* UA2005 Context ID registers */
+ switch ((addr >> 3) & 0x3) {
+ case 1:
+ ret = env->dmmu.mmu_primary_context;
+ break;
+ case 2:
+ ret = env->dmmu.mmu_secondary_context;
+ break;
+ default:
+ cpu_unassigned_access(cs, addr, true, false, 1, size);
+ }
+ break;
case ASI_DCACHE_DATA: /* D-cache data */
case ASI_DCACHE_TAG: /* D-cache tag access */
case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
return;
}
case ASI_ITLB_DATA_IN: /* I-MMU data in */
- replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
+ /* ignore real translation entries */
+ if (!(addr & TLB_UST1_IS_REAL_BIT)) {
+ replace_tlb_1bit_lru(env->itlb, env->immu.tag_access,
+ val, "immu", env, addr);
+ }
return;
case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
{
unsigned int i = (addr >> 3) & 0x3f;
- replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
-
+ /* ignore real translation entries */
+ if (!(addr & TLB_UST1_IS_REAL_BIT)) {
+ replace_tlb_entry(&env->itlb[i], env->immu.tag_access,
+ sun4v_tte_to_sun4u(env, addr, val), env);
+ }
#ifdef DEBUG_MMU
DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
dump_mmu(stdout, fprintf, env);
return;
}
case ASI_DTLB_DATA_IN: /* D-MMU data in */
- replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
- return;
+ /* ignore real translation entries */
+ if (!(addr & TLB_UST1_IS_REAL_BIT)) {
+ replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access,
+ val, "dmmu", env, addr);
+ }
+ return;
case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
{
unsigned int i = (addr >> 3) & 0x3f;
- replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
-
+ /* ignore real translation entries */
+ if (!(addr & TLB_UST1_IS_REAL_BIT)) {
+ replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access,
+ sun4v_tte_to_sun4u(env, addr, val), env);
+ }
#ifdef DEBUG_MMU
DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
dump_mmu(stdout, fprintf, env);
env->scratch[i] = val;
return;
}
+ case ASI_MMU: /* UA2005 Context ID registers */
+ {
+ switch ((addr >> 3) & 0x3) {
+ case 1:
+ env->dmmu.mmu_primary_context = val;
+ env->immu.mmu_primary_context = val;
+ tlb_flush_by_mmuidx(CPU(cpu),
+ (1 << MMU_USER_IDX) | (1 << MMU_KERNEL_IDX));
+ break;
+ case 2:
+ env->dmmu.mmu_secondary_context = val;
+ env->immu.mmu_secondary_context = val;
+ tlb_flush_by_mmuidx(CPU(cpu),
+ (1 << MMU_USER_SECONDARY_IDX) |
+ (1 << MMU_KERNEL_SECONDARY_IDX));
+ break;
+ default:
+ cpu_unassigned_access(cs, addr, true, false, 1, size);
+ }
+ }
+ return;
case ASI_QUEUE: /* UA2005 CPU mondo queue */
case ASI_DCACHE_DATA: /* D-cache data */
case ASI_DCACHE_TAG: /* D-cache tag access */