#include "qemu/osdep.h"
#include "qemu/log.h"
+#include "qemu/main-loop.h"
#include "cpu.h"
#include "exec/exec-all.h"
-#include "tcg-op.h"
+#include "tcg/tcg-op.h"
#include "trace.h"
int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
#ifndef CONFIG_USER_ONLY
static int riscv_cpu_local_irq_pending(CPURISCVState *env)
{
+ target_ulong irqs;
+
target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
- target_ulong pending = atomic_read(&env->mip) & env->mie;
- target_ulong mie = env->priv < PRV_M || (env->priv == PRV_M && mstatus_mie);
- target_ulong sie = env->priv < PRV_S || (env->priv == PRV_S && mstatus_sie);
- target_ulong irqs = (pending & ~env->mideleg & -mie) |
- (pending & env->mideleg & -sie);
+ target_ulong hs_mstatus_sie = get_field(env->mstatus_hs, MSTATUS_SIE);
+
+ target_ulong pending = env->mip & env->mie &
+ ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
+ target_ulong vspending = (env->mip & env->mie &
+ (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP));
+
+ target_ulong mie = env->priv < PRV_M ||
+ (env->priv == PRV_M && mstatus_mie);
+ target_ulong sie = env->priv < PRV_S ||
+ (env->priv == PRV_S && mstatus_sie);
+ target_ulong hs_sie = env->priv < PRV_S ||
+ (env->priv == PRV_S && hs_mstatus_sie);
+
+ if (riscv_cpu_virt_enabled(env)) {
+ target_ulong pending_hs_irq = pending & -hs_sie;
+
+ if (pending_hs_irq) {
+ riscv_cpu_set_force_hs_excep(env, FORCE_HS_EXCEP);
+ return ctz64(pending_hs_irq);
+ }
+
+ pending = vspending;
+ }
+
+ irqs = (pending & ~env->mideleg & -mie) | (pending & env->mideleg & -sie);
if (irqs) {
return ctz64(irqs); /* since non-zero */
#if !defined(CONFIG_USER_ONLY)
-int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
+/* Return true is floating point support is currently enabled */
+bool riscv_cpu_fp_enabled(CPURISCVState *env)
{
- CPURISCVState *env = &cpu->env;
- if (env->miclaim & interrupts) {
- return -1;
+ if (env->mstatus & MSTATUS_FS) {
+ if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) {
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
+{
+ target_ulong mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS |
+ MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE;
+ bool current_virt = riscv_cpu_virt_enabled(env);
+
+ g_assert(riscv_has_ext(env, RVH));
+
+#if defined(TARGET_RISCV64)
+ mstatus_mask |= MSTATUS64_UXL;
+#endif
+
+ if (current_virt) {
+ /* Current V=1 and we are about to change to V=0 */
+ env->vsstatus = env->mstatus & mstatus_mask;
+ env->mstatus &= ~mstatus_mask;
+ env->mstatus |= env->mstatus_hs;
+
+#if defined(TARGET_RISCV32)
+ env->vsstatush = env->mstatush;
+ env->mstatush |= env->mstatush_hs;
+#endif
+
+ env->vstvec = env->stvec;
+ env->stvec = env->stvec_hs;
+
+ env->vsscratch = env->sscratch;
+ env->sscratch = env->sscratch_hs;
+
+ env->vsepc = env->sepc;
+ env->sepc = env->sepc_hs;
+
+ env->vscause = env->scause;
+ env->scause = env->scause_hs;
+
+ env->vstval = env->sbadaddr;
+ env->sbadaddr = env->stval_hs;
+
+ env->vsatp = env->satp;
+ env->satp = env->satp_hs;
} else {
- env->miclaim |= interrupts;
- return 0;
+ /* Current V=0 and we are about to change to V=1 */
+ env->mstatus_hs = env->mstatus & mstatus_mask;
+ env->mstatus &= ~mstatus_mask;
+ env->mstatus |= env->vsstatus;
+
+#if defined(TARGET_RISCV32)
+ env->mstatush_hs = env->mstatush;
+ env->mstatush |= env->vsstatush;
+#endif
+
+ env->stvec_hs = env->stvec;
+ env->stvec = env->vstvec;
+
+ env->sscratch_hs = env->sscratch;
+ env->sscratch = env->vsscratch;
+
+ env->sepc_hs = env->sepc;
+ env->sepc = env->vsepc;
+
+ env->scause_hs = env->scause;
+ env->scause = env->vscause;
+
+ env->stval_hs = env->sbadaddr;
+ env->sbadaddr = env->vstval;
+
+ env->satp_hs = env->satp;
+ env->satp = env->vsatp;
}
}
-struct CpuAsyncInfo {
- uint32_t new_mip;
-};
+bool riscv_cpu_virt_enabled(CPURISCVState *env)
+{
+ if (!riscv_has_ext(env, RVH)) {
+ return false;
+ }
-static void riscv_cpu_update_mip_irqs_async(CPUState *target_cpu_state,
- run_on_cpu_data data)
+ return get_field(env->virt, VIRT_ONOFF);
+}
+
+void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
{
- struct CpuAsyncInfo *info = (struct CpuAsyncInfo *) data.host_ptr;
+ if (!riscv_has_ext(env, RVH)) {
+ return;
+ }
- if (info->new_mip) {
- cpu_interrupt(target_cpu_state, CPU_INTERRUPT_HARD);
- } else {
- cpu_reset_interrupt(target_cpu_state, CPU_INTERRUPT_HARD);
+ /* Flush the TLB on all virt mode changes. */
+ if (get_field(env->virt, VIRT_ONOFF) != enable) {
+ tlb_flush(env_cpu(env));
+ }
+
+ env->virt = set_field(env->virt, VIRT_ONOFF, enable);
+}
+
+bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env)
+{
+ if (!riscv_has_ext(env, RVH)) {
+ return false;
+ }
+
+ return get_field(env->virt, FORCE_HS_EXCEP);
+}
+
+void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable)
+{
+ if (!riscv_has_ext(env, RVH)) {
+ return;
}
- g_free(info);
+ env->virt = set_field(env->virt, FORCE_HS_EXCEP, enable);
+}
+
+int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
+{
+ CPURISCVState *env = &cpu->env;
+ if (env->miclaim & interrupts) {
+ return -1;
+ } else {
+ env->miclaim |= interrupts;
+ return 0;
+ }
}
uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
{
CPURISCVState *env = &cpu->env;
CPUState *cs = CPU(cpu);
- struct CpuAsyncInfo *info;
- uint32_t old, new, cmp = atomic_read(&env->mip);
+ uint32_t old = env->mip;
+ bool locked = false;
- do {
- old = cmp;
- new = (old & ~mask) | (value & mask);
- cmp = atomic_cmpxchg(&env->mip, old, new);
- } while (old != cmp);
+ if (!qemu_mutex_iothread_locked()) {
+ locked = true;
+ qemu_mutex_lock_iothread();
+ }
- info = g_new(struct CpuAsyncInfo, 1);
- info->new_mip = new;
+ env->mip = (env->mip & ~mask) | (value & mask);
- async_run_on_cpu(cs, riscv_cpu_update_mip_irqs_async,
- RUN_ON_CPU_HOST_PTR(info));
+ if (env->mip) {
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+
+ if (locked) {
+ qemu_mutex_unlock_iothread();
+ }
return old;
}
+void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void))
+{
+ env->rdtime_fn = fn;
+}
+
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
{
if (newpriv > PRV_M) {
*
* Adapted from Spike's mmu_t::translate and mmu_t::walk
*
+ * @env: CPURISCVState
+ * @physical: This will be set to the calculated physical address
+ * @prot: The returned protection attributes
+ * @addr: The virtual address to be translated
+ * @access_type: The type of MMU access
+ * @mmu_idx: Indicates current privilege level
+ * @first_stage: Are we in first stage translation?
+ * Second stage is used for hypervisor guest translation
+ * @two_stage: Are we going to perform two stage translation
*/
static int get_physical_address(CPURISCVState *env, hwaddr *physical,
int *prot, target_ulong addr,
- int access_type, int mmu_idx)
+ int access_type, int mmu_idx,
+ bool first_stage, bool two_stage)
{
/* NOTE: the env->pc value visible here will not be
* correct, but the value visible to the exception handler
* (riscv_cpu_do_interrupt) is correct */
-
+ MemTxResult res;
+ MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
int mode = mmu_idx;
+ bool use_background = false;
+ /*
+ * Check if we should use the background registers for the two
+ * stage translation. We don't need to check if we actually need
+ * two stage translation as that happened before this function
+ * was called. Background registers will be used if the guest has
+ * forced a two stage translation to be on (in HS or M mode).
+ */
if (mode == PRV_M && access_type != MMU_INST_FETCH) {
if (get_field(env->mstatus, MSTATUS_MPRV)) {
mode = get_field(env->mstatus, MSTATUS_MPP);
+
+ if (riscv_has_ext(env, RVH) &&
+ MSTATUS_MPV_ISSET(env)) {
+ use_background = true;
+ }
}
}
+ if (mode == PRV_S && access_type != MMU_INST_FETCH &&
+ riscv_has_ext(env, RVH) && !riscv_cpu_virt_enabled(env)) {
+ if (get_field(env->hstatus, HSTATUS_SPRV)) {
+ mode = get_field(env->mstatus, SSTATUS_SPP);
+ use_background = true;
+ }
+ }
+
+ if (first_stage == false) {
+ /* We are in stage 2 translation, this is similar to stage 1. */
+ /* Stage 2 is always taken as U-mode */
+ mode = PRV_U;
+ }
+
if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) {
*physical = addr;
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
*prot = 0;
- target_ulong base;
- int levels, ptidxbits, ptesize, vm, sum;
- int mxr = get_field(env->mstatus, MSTATUS_MXR);
-
- if (env->priv_ver >= PRIV_VERSION_1_10_0) {
- base = get_field(env->satp, SATP_PPN) << PGSHIFT;
- sum = get_field(env->mstatus, MSTATUS_SUM);
- vm = get_field(env->satp, SATP_MODE);
- switch (vm) {
- case VM_1_10_SV32:
- levels = 2; ptidxbits = 10; ptesize = 4; break;
- case VM_1_10_SV39:
- levels = 3; ptidxbits = 9; ptesize = 8; break;
- case VM_1_10_SV48:
- levels = 4; ptidxbits = 9; ptesize = 8; break;
- case VM_1_10_SV57:
- levels = 5; ptidxbits = 9; ptesize = 8; break;
- case VM_1_10_MBARE:
- *physical = addr;
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- return TRANSLATE_SUCCESS;
- default:
- g_assert_not_reached();
- }
+ hwaddr base;
+ int levels, ptidxbits, ptesize, vm, sum, mxr, widened;
+
+ if (first_stage == true) {
+ mxr = get_field(env->mstatus, MSTATUS_MXR);
} else {
- base = env->sptbr << PGSHIFT;
- sum = !get_field(env->mstatus, MSTATUS_PUM);
- vm = get_field(env->mstatus, MSTATUS_VM);
- switch (vm) {
- case VM_1_09_SV32:
- levels = 2; ptidxbits = 10; ptesize = 4; break;
- case VM_1_09_SV39:
- levels = 3; ptidxbits = 9; ptesize = 8; break;
- case VM_1_09_SV48:
- levels = 4; ptidxbits = 9; ptesize = 8; break;
- case VM_1_09_MBARE:
- *physical = addr;
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- return TRANSLATE_SUCCESS;
- default:
- g_assert_not_reached();
+ mxr = get_field(env->vsstatus, MSTATUS_MXR);
+ }
+
+ if (first_stage == true) {
+ if (use_background) {
+ base = (hwaddr)get_field(env->vsatp, SATP_PPN) << PGSHIFT;
+ vm = get_field(env->vsatp, SATP_MODE);
+ } else {
+ base = (hwaddr)get_field(env->satp, SATP_PPN) << PGSHIFT;
+ vm = get_field(env->satp, SATP_MODE);
}
+ widened = 0;
+ } else {
+ base = (hwaddr)get_field(env->hgatp, HGATP_PPN) << PGSHIFT;
+ vm = get_field(env->hgatp, HGATP_MODE);
+ widened = 2;
+ }
+ sum = get_field(env->mstatus, MSTATUS_SUM);
+ switch (vm) {
+ case VM_1_10_SV32:
+ levels = 2; ptidxbits = 10; ptesize = 4; break;
+ case VM_1_10_SV39:
+ levels = 3; ptidxbits = 9; ptesize = 8; break;
+ case VM_1_10_SV48:
+ levels = 4; ptidxbits = 9; ptesize = 8; break;
+ case VM_1_10_SV57:
+ levels = 5; ptidxbits = 9; ptesize = 8; break;
+ case VM_1_10_MBARE:
+ *physical = addr;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TRANSLATE_SUCCESS;
+ default:
+ g_assert_not_reached();
}
CPUState *cs = env_cpu(env);
- int va_bits = PGSHIFT + levels * ptidxbits;
- target_ulong mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
- target_ulong masked_msbs = (addr >> (va_bits - 1)) & mask;
+ int va_bits = PGSHIFT + levels * ptidxbits + widened;
+ target_ulong mask, masked_msbs;
+
+ if (TARGET_LONG_BITS > (va_bits - 1)) {
+ mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
+ } else {
+ mask = 0;
+ }
+ masked_msbs = (addr >> (va_bits - 1)) & mask;
+
if (masked_msbs != 0 && masked_msbs != mask) {
return TRANSLATE_FAIL;
}
restart:
#endif
for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
- target_ulong idx = (addr >> (PGSHIFT + ptshift)) &
+ target_ulong idx;
+ if (i == 0) {
+ idx = (addr >> (PGSHIFT + ptshift)) &
+ ((1 << (ptidxbits + widened)) - 1);
+ } else {
+ idx = (addr >> (PGSHIFT + ptshift)) &
((1 << ptidxbits) - 1);
+ }
/* check that physical address of PTE is legal */
- target_ulong pte_addr = base + idx * ptesize;
+ hwaddr pte_addr;
+
+ if (two_stage && first_stage) {
+ int vbase_prot;
+ hwaddr vbase;
+
+ /* Do the second stage translation on the base PTE address. */
+ get_physical_address(env, &vbase, &vbase_prot, base, MMU_DATA_LOAD,
+ mmu_idx, false, true);
+
+ pte_addr = vbase + idx * ptesize;
+ } else {
+ pte_addr = base + idx * ptesize;
+ }
if (riscv_feature(env, RISCV_FEATURE_PMP) &&
!pmp_hart_has_privs(env, pte_addr, sizeof(target_ulong),
1 << MMU_DATA_LOAD, PRV_S)) {
return TRANSLATE_PMP_FAIL;
}
+
#if defined(TARGET_RISCV32)
- target_ulong pte = ldl_phys(cs->as, pte_addr);
+ target_ulong pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
#elif defined(TARGET_RISCV64)
- target_ulong pte = ldq_phys(cs->as, pte_addr);
+ target_ulong pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
#endif
- target_ulong ppn = pte >> PTE_PPN_SHIFT;
+ if (res != MEMTX_OK) {
+ return TRANSLATE_FAIL;
+ }
+
+ hwaddr ppn = pte >> PTE_PPN_SHIFT;
if (!(pte & PTE_V)) {
/* Invalid PTE */
}
static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
- MMUAccessType access_type, bool pmp_violation)
+ MMUAccessType access_type, bool pmp_violation,
+ bool first_stage)
{
CPUState *cs = env_cpu(env);
- int page_fault_exceptions =
- (env->priv_ver >= PRIV_VERSION_1_10_0) &&
- get_field(env->satp, SATP_MODE) != VM_1_10_MBARE &&
- !pmp_violation;
+ int page_fault_exceptions;
+ if (first_stage) {
+ page_fault_exceptions =
+ get_field(env->satp, SATP_MODE) != VM_1_10_MBARE &&
+ !pmp_violation;
+ } else {
+ page_fault_exceptions =
+ get_field(env->hgatp, HGATP_MODE) != VM_1_10_MBARE &&
+ !pmp_violation;
+ }
switch (access_type) {
case MMU_INST_FETCH:
- cs->exception_index = page_fault_exceptions ?
- RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
+ if (riscv_cpu_virt_enabled(env) && !first_stage) {
+ cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
+ } else {
+ cs->exception_index = page_fault_exceptions ?
+ RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
+ }
break;
case MMU_DATA_LOAD:
- cs->exception_index = page_fault_exceptions ?
- RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
+ if (riscv_cpu_virt_enabled(env) && !first_stage) {
+ cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
+ } else {
+ cs->exception_index = page_fault_exceptions ?
+ RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
+ }
break;
case MMU_DATA_STORE:
- cs->exception_index = page_fault_exceptions ?
- RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+ if (riscv_cpu_virt_enabled(env) && !first_stage) {
+ cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
+ } else {
+ cs->exception_index = page_fault_exceptions ?
+ RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+ }
break;
default:
g_assert_not_reached();
hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
hwaddr phys_addr;
int prot;
int mmu_idx = cpu_mmu_index(&cpu->env, false);
- if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0, mmu_idx)) {
+ if (get_physical_address(env, &phys_addr, &prot, addr, 0, mmu_idx,
+ true, riscv_cpu_virt_enabled(env))) {
return -1;
}
+
+ if (riscv_cpu_virt_enabled(env)) {
+ if (get_physical_address(env, &phys_addr, &prot, phys_addr,
+ 0, mmu_idx, false, true)) {
+ return -1;
+ }
+ }
+
return phys_addr;
}
-void riscv_cpu_unassigned_access(CPUState *cs, hwaddr addr, bool is_write,
- bool is_exec, int unused, unsigned size)
+void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
- if (is_write) {
+ if (access_type == MMU_DATA_STORE) {
cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
} else {
cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
}
env->badaddr = addr;
- riscv_raise_exception(&cpu->env, cs->exception_index, GETPC());
+ riscv_raise_exception(&cpu->env, cs->exception_index, retaddr);
}
void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type, int mmu_idx,
bool probe, uintptr_t retaddr)
{
-#ifndef CONFIG_USER_ONLY
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
+#ifndef CONFIG_USER_ONLY
+ vaddr im_address;
hwaddr pa = 0;
- int prot;
+ int prot, prot2;
bool pmp_violation = false;
+ bool m_mode_two_stage = false;
+ bool hs_mode_two_stage = false;
+ bool first_stage_error = true;
int ret = TRANSLATE_FAIL;
int mode = mmu_idx;
+ env->guest_phys_fault_addr = 0;
+
qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
__func__, address, access_type, mmu_idx);
- ret = get_physical_address(env, &pa, &prot, address, access_type, mmu_idx);
+ /*
+ * Determine if we are in M mode and MPRV is set or in HS mode and SPRV is
+ * set and we want to access a virtulisation address.
+ */
+ if (riscv_has_ext(env, RVH)) {
+ m_mode_two_stage = env->priv == PRV_M &&
+ access_type != MMU_INST_FETCH &&
+ get_field(env->mstatus, MSTATUS_MPRV) &&
+ MSTATUS_MPV_ISSET(env);
+
+ hs_mode_two_stage = env->priv == PRV_S &&
+ !riscv_cpu_virt_enabled(env) &&
+ access_type != MMU_INST_FETCH &&
+ get_field(env->hstatus, HSTATUS_SPRV) &&
+ get_field(env->hstatus, HSTATUS_SPV);
+ }
if (mode == PRV_M && access_type != MMU_INST_FETCH) {
if (get_field(env->mstatus, MSTATUS_MPRV)) {
}
}
- qemu_log_mask(CPU_LOG_MMU,
- "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
- " prot %d\n", __func__, address, ret, pa, prot);
+ if (riscv_cpu_virt_enabled(env) || m_mode_two_stage || hs_mode_two_stage) {
+ /* Two stage lookup */
+ ret = get_physical_address(env, &pa, &prot, address, access_type,
+ mmu_idx, true, true);
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
+ TARGET_FMT_plx " prot %d\n",
+ __func__, address, ret, pa, prot);
+
+ if (ret != TRANSLATE_FAIL) {
+ /* Second stage lookup */
+ im_address = pa;
+
+ ret = get_physical_address(env, &pa, &prot2, im_address,
+ access_type, mmu_idx, false, true);
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical "
+ TARGET_FMT_plx " prot %d\n",
+ __func__, im_address, ret, pa, prot2);
+
+ prot &= prot2;
+
+ if (riscv_feature(env, RISCV_FEATURE_PMP) &&
+ (ret == TRANSLATE_SUCCESS) &&
+ !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) {
+ ret = TRANSLATE_PMP_FAIL;
+ }
+
+ if (ret != TRANSLATE_SUCCESS) {
+ /*
+ * Guest physical address translation failed, this is a HS
+ * level exception
+ */
+ first_stage_error = false;
+ env->guest_phys_fault_addr = (im_address |
+ (address &
+ (TARGET_PAGE_SIZE - 1))) >> 2;
+ }
+ }
+ } else {
+ /* Single stage lookup */
+ ret = get_physical_address(env, &pa, &prot, address, access_type,
+ mmu_idx, true, false);
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s address=%" VADDR_PRIx " ret %d physical "
+ TARGET_FMT_plx " prot %d\n",
+ __func__, address, ret, pa, prot);
+ }
if (riscv_feature(env, RISCV_FEATURE_PMP) &&
(ret == TRANSLATE_SUCCESS) &&
if (ret == TRANSLATE_PMP_FAIL) {
pmp_violation = true;
}
+
if (ret == TRANSLATE_SUCCESS) {
tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
} else if (probe) {
return false;
} else {
- raise_mmu_exception(env, address, access_type, pmp_violation);
+ raise_mmu_exception(env, address, access_type, pmp_violation, first_stage_error);
riscv_raise_exception(env, cs->exception_index, retaddr);
}
+
+ return true;
+
#else
switch (access_type) {
case MMU_INST_FETCH:
case MMU_DATA_STORE:
cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
break;
+ default:
+ g_assert_not_reached();
}
+ env->badaddr = address;
cpu_loop_exit_restore(cs, retaddr);
#endif
}
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
+ bool force_hs_execp = riscv_cpu_force_hs_excep_enabled(env);
+ target_ulong s;
/* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
* so we mask off the MSB and separate into trap type and cause.
target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
target_ulong deleg = async ? env->mideleg : env->medeleg;
target_ulong tval = 0;
-
- static const int ecall_cause_map[] = {
- [PRV_U] = RISCV_EXCP_U_ECALL,
- [PRV_S] = RISCV_EXCP_S_ECALL,
- [PRV_H] = RISCV_EXCP_H_ECALL,
- [PRV_M] = RISCV_EXCP_M_ECALL
- };
+ target_ulong htval = 0;
+ target_ulong mtval2 = 0;
if (!async) {
/* set tval to badaddr for traps with address information */
switch (cause) {
+ case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
+ case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
+ case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
+ force_hs_execp = true;
+ /* fallthrough */
case RISCV_EXCP_INST_ADDR_MIS:
case RISCV_EXCP_INST_ACCESS_FAULT:
case RISCV_EXCP_LOAD_ADDR_MIS:
/* ecall is dispatched as one cause so translate based on mode */
if (cause == RISCV_EXCP_U_ECALL) {
assert(env->priv <= 3);
- cause = ecall_cause_map[env->priv];
+
+ if (env->priv == PRV_M) {
+ cause = RISCV_EXCP_M_ECALL;
+ } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) {
+ cause = RISCV_EXCP_VS_ECALL;
+ } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) {
+ cause = RISCV_EXCP_S_ECALL;
+ } else if (env->priv == PRV_U) {
+ cause = RISCV_EXCP_U_ECALL;
+ }
}
}
- trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, cause < 16 ?
+ trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, cause < 23 ?
(async ? riscv_intr_names : riscv_excp_names)[cause] : "(unknown)");
if (env->priv <= PRV_S &&
cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
/* handle the trap in S-mode */
- target_ulong s = env->mstatus;
- s = set_field(s, MSTATUS_SPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ?
- get_field(s, MSTATUS_SIE) : get_field(s, MSTATUS_UIE << env->priv));
+ if (riscv_has_ext(env, RVH)) {
+ target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
+
+ if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1) &&
+ !force_hs_execp) {
+ /*
+ * See if we need to adjust cause. Yes if its VS mode interrupt
+ * no if hypervisor has delegated one of hs mode's interrupt
+ */
+ if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
+ cause == IRQ_VS_EXT)
+ cause = cause - 1;
+ /* Trap to VS mode */
+ } else if (riscv_cpu_virt_enabled(env)) {
+ /* Trap into HS mode, from virt */
+ riscv_cpu_swap_hypervisor_regs(env);
+ env->hstatus = set_field(env->hstatus, HSTATUS_SP2V,
+ get_field(env->hstatus, HSTATUS_SPV));
+ env->hstatus = set_field(env->hstatus, HSTATUS_SP2P,
+ get_field(env->mstatus, SSTATUS_SPP));
+ env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
+ riscv_cpu_virt_enabled(env));
+
+ htval = env->guest_phys_fault_addr;
+
+ riscv_cpu_set_virt_enabled(env, 0);
+ riscv_cpu_set_force_hs_excep(env, 0);
+ } else {
+ /* Trap into HS mode */
+ env->hstatus = set_field(env->hstatus, HSTATUS_SP2V,
+ get_field(env->hstatus, HSTATUS_SPV));
+ env->hstatus = set_field(env->hstatus, HSTATUS_SP2P,
+ get_field(env->mstatus, SSTATUS_SPP));
+ env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
+ riscv_cpu_virt_enabled(env));
+
+ htval = env->guest_phys_fault_addr;
+ }
+ }
+
+ s = env->mstatus;
+ s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
s = set_field(s, MSTATUS_SPP, env->priv);
s = set_field(s, MSTATUS_SIE, 0);
env->mstatus = s;
env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
env->sepc = env->pc;
env->sbadaddr = tval;
+ env->htval = htval;
env->pc = (env->stvec >> 2 << 2) +
((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
riscv_cpu_set_mode(env, PRV_S);
} else {
/* handle the trap in M-mode */
- target_ulong s = env->mstatus;
- s = set_field(s, MSTATUS_MPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ?
- get_field(s, MSTATUS_MIE) : get_field(s, MSTATUS_UIE << env->priv));
+ if (riscv_has_ext(env, RVH)) {
+ if (riscv_cpu_virt_enabled(env)) {
+ riscv_cpu_swap_hypervisor_regs(env);
+ }
+#ifdef TARGET_RISCV32
+ env->mstatush = set_field(env->mstatush, MSTATUS_MPV,
+ riscv_cpu_virt_enabled(env));
+ env->mstatush = set_field(env->mstatush, MSTATUS_MTL,
+ riscv_cpu_force_hs_excep_enabled(env));
+#else
+ env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
+ riscv_cpu_virt_enabled(env));
+ env->mstatus = set_field(env->mstatus, MSTATUS_MTL,
+ riscv_cpu_force_hs_excep_enabled(env));
+#endif
+
+ mtval2 = env->guest_phys_fault_addr;
+
+ /* Trapping to M mode, virt is disabled */
+ riscv_cpu_set_virt_enabled(env, 0);
+ riscv_cpu_set_force_hs_excep(env, 0);
+ }
+
+ s = env->mstatus;
+ s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
s = set_field(s, MSTATUS_MPP, env->priv);
s = set_field(s, MSTATUS_MIE, 0);
env->mstatus = s;
env->mcause = cause | ~(((target_ulong)-1) >> async);
env->mepc = env->pc;
env->mbadaddr = tval;
+ env->mtval2 = mtval2;
env->pc = (env->mtvec >> 2 << 2) +
((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
riscv_cpu_set_mode(env, PRV_M);