2 * RISC-V Emulation Helpers for QEMU.
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "qemu/main-loop.h"
24 #include "exec/exec-all.h"
25 #include "exec/helper-proto.h"
27 #ifndef CONFIG_USER_ONLY
29 #if defined(TARGET_RISCV32)
30 static const char valid_vm_1_09[16] = {
34 static const char valid_vm_1_10[16] = {
38 #elif defined(TARGET_RISCV64)
39 static const char valid_vm_1_09[16] = {
44 static const char valid_vm_1_10[16] = {
52 static int validate_vm(CPURISCVState *env, target_ulong vm)
54 return (env->priv_ver >= PRIV_VERSION_1_10_0) ?
55 valid_vm_1_10[vm & 0xf] : valid_vm_1_09[vm & 0xf];
60 /* Exceptions processing helpers */
61 void QEMU_NORETURN do_raise_exception_err(CPURISCVState *env,
62 uint32_t exception, uintptr_t pc)
64 CPUState *cs = CPU(riscv_env_get_cpu(env));
65 qemu_log_mask(CPU_LOG_INT, "%s: %d\n", __func__, exception);
66 cs->exception_index = exception;
67 cpu_loop_exit_restore(cs, pc);
70 void helper_raise_exception(CPURISCVState *env, uint32_t exception)
72 do_raise_exception_err(env, exception, 0);
75 static void validate_mstatus_fs(CPURISCVState *env, uintptr_t ra)
77 #ifndef CONFIG_USER_ONLY
78 if (!(env->mstatus & MSTATUS_FS)) {
79 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, ra);
85 * Handle writes to CSRs and any resulting special behavior
87 * Adapted from Spike's processor_t::set_csr
89 void csr_write_helper(CPURISCVState *env, target_ulong val_to_write,
92 #ifndef CONFIG_USER_ONLY
93 uint64_t delegable_ints = MIP_SSIP | MIP_STIP | MIP_SEIP | (1 << IRQ_X_COP);
94 uint64_t all_ints = delegable_ints | MIP_MSIP | MIP_MTIP;
99 validate_mstatus_fs(env, GETPC());
100 cpu_riscv_set_fflags(env, val_to_write & (FSR_AEXC >> FSR_AEXC_SHIFT));
103 validate_mstatus_fs(env, GETPC());
104 env->frm = val_to_write & (FSR_RD >> FSR_RD_SHIFT);
107 validate_mstatus_fs(env, GETPC());
108 env->frm = (val_to_write & FSR_RD) >> FSR_RD_SHIFT;
109 cpu_riscv_set_fflags(env, (val_to_write & FSR_AEXC) >> FSR_AEXC_SHIFT);
111 #ifndef CONFIG_USER_ONLY
113 target_ulong mstatus = env->mstatus;
114 target_ulong mask = 0;
115 target_ulong mpp = get_field(val_to_write, MSTATUS_MPP);
117 /* flush tlb on mstatus fields that affect VM */
118 if (env->priv_ver <= PRIV_VERSION_1_09_1) {
119 if ((val_to_write ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP |
120 MSTATUS_MPRV | MSTATUS_SUM | MSTATUS_VM)) {
121 helper_tlb_flush(env);
123 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
124 MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM |
125 MSTATUS_MPP | MSTATUS_MXR |
126 (validate_vm(env, get_field(val_to_write, MSTATUS_VM)) ?
129 if (env->priv_ver >= PRIV_VERSION_1_10_0) {
130 if ((val_to_write ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP |
131 MSTATUS_MPRV | MSTATUS_SUM)) {
132 helper_tlb_flush(env);
134 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
135 MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM |
136 MSTATUS_MPP | MSTATUS_MXR;
139 /* silenty discard mstatus.mpp writes for unsupported modes */
141 (!riscv_has_ext(env, RVS) && mpp == PRV_S) ||
142 (!riscv_has_ext(env, RVU) && mpp == PRV_U)) {
143 mask &= ~MSTATUS_MPP;
146 mstatus = (mstatus & ~mask) | (val_to_write & mask);
148 /* Note: this is a workaround for an issue where mstatus.FS
149 does not report dirty after floating point operations
150 that modify floating point state. This workaround is
151 technically compliant with the RISC-V Privileged
152 specification as it is legal to return only off, or dirty.
153 at the expense of extra floating point save/restore. */
155 /* FP is always dirty or off */
156 if (mstatus & MSTATUS_FS) {
157 mstatus |= MSTATUS_FS;
160 int dirty = ((mstatus & MSTATUS_FS) == MSTATUS_FS) |
161 ((mstatus & MSTATUS_XS) == MSTATUS_XS);
162 mstatus = set_field(mstatus, MSTATUS_SD, dirty);
163 env->mstatus = mstatus;
168 * Since the writeable bits in MIP are not set asynchrously by the
169 * CLINT, no additional locking is needed for read-modifiy-write
172 qemu_mutex_lock_iothread();
173 RISCVCPU *cpu = riscv_env_get_cpu(env);
174 riscv_set_local_interrupt(cpu, MIP_SSIP,
175 (val_to_write & MIP_SSIP) != 0);
176 riscv_set_local_interrupt(cpu, MIP_STIP,
177 (val_to_write & MIP_STIP) != 0);
179 * csrs, csrc on mip.SEIP is not decomposable into separate read and
180 * write steps, so a different implementation is needed
182 qemu_mutex_unlock_iothread();
186 env->mie = (env->mie & ~all_ints) |
187 (val_to_write & all_ints);
191 env->mideleg = (env->mideleg & ~delegable_ints)
192 | (val_to_write & delegable_ints);
195 target_ulong mask = 0;
196 mask |= 1ULL << (RISCV_EXCP_INST_ADDR_MIS);
197 mask |= 1ULL << (RISCV_EXCP_INST_ACCESS_FAULT);
198 mask |= 1ULL << (RISCV_EXCP_ILLEGAL_INST);
199 mask |= 1ULL << (RISCV_EXCP_BREAKPOINT);
200 mask |= 1ULL << (RISCV_EXCP_LOAD_ADDR_MIS);
201 mask |= 1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT);
202 mask |= 1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS);
203 mask |= 1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT);
204 mask |= 1ULL << (RISCV_EXCP_U_ECALL);
205 mask |= 1ULL << (RISCV_EXCP_S_ECALL);
206 mask |= 1ULL << (RISCV_EXCP_H_ECALL);
207 mask |= 1ULL << (RISCV_EXCP_M_ECALL);
208 mask |= 1ULL << (RISCV_EXCP_INST_PAGE_FAULT);
209 mask |= 1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT);
210 mask |= 1ULL << (RISCV_EXCP_STORE_PAGE_FAULT);
211 env->medeleg = (env->medeleg & ~mask)
212 | (val_to_write & mask);
216 /* minstret is WARL so unsupported writes are ignored */
219 /* mcycle is WARL so unsupported writes are ignored */
221 #if defined(TARGET_RISCV32)
223 /* minstreth is WARL so unsupported writes are ignored */
226 /* mcycleh is WARL so unsupported writes are ignored */
229 case CSR_MUCOUNTEREN:
230 if (env->priv_ver <= PRIV_VERSION_1_09_1) {
231 env->scounteren = val_to_write;
236 case CSR_MSCOUNTEREN:
237 if (env->priv_ver <= PRIV_VERSION_1_09_1) {
238 env->mcounteren = val_to_write;
244 target_ulong ms = env->mstatus;
245 target_ulong mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UIE
246 | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS
247 | SSTATUS_SUM | SSTATUS_SD;
248 if (env->priv_ver >= PRIV_VERSION_1_10_0) {
251 ms = (ms & ~mask) | (val_to_write & mask);
252 csr_write_helper(env, ms, CSR_MSTATUS);
256 qemu_mutex_lock_iothread();
257 target_ulong next_mip = (env->mip & ~env->mideleg)
258 | (val_to_write & env->mideleg);
259 qemu_mutex_unlock_iothread();
260 csr_write_helper(env, next_mip, CSR_MIP);
264 target_ulong next_mie = (env->mie & ~env->mideleg)
265 | (val_to_write & env->mideleg);
266 csr_write_helper(env, next_mie, CSR_MIE);
269 case CSR_SATP: /* CSR_SPTBR */ {
270 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
273 if (env->priv_ver <= PRIV_VERSION_1_09_1 && (val_to_write ^ env->sptbr))
275 helper_tlb_flush(env);
276 env->sptbr = val_to_write & (((target_ulong)
277 1 << (TARGET_PHYS_ADDR_SPACE_BITS - PGSHIFT)) - 1);
279 if (env->priv_ver >= PRIV_VERSION_1_10_0 &&
280 validate_vm(env, get_field(val_to_write, SATP_MODE)) &&
281 ((val_to_write ^ env->satp) & (SATP_MODE | SATP_ASID | SATP_PPN)))
283 helper_tlb_flush(env);
284 env->satp = val_to_write;
289 env->sepc = val_to_write;
292 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
293 if ((val_to_write & 3) == 0) {
294 env->stvec = val_to_write >> 2 << 2;
296 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: vectored traps not supported");
300 if (env->priv_ver >= PRIV_VERSION_1_10_0) {
301 env->scounteren = val_to_write;
307 env->sscratch = val_to_write;
310 env->scause = val_to_write;
313 env->sbadaddr = val_to_write;
316 env->mepc = val_to_write;
319 /* bits [1:0] indicate mode; 0 = direct, 1 = vectored, 2 >= reserved */
320 if ((val_to_write & 3) == 0) {
321 env->mtvec = val_to_write >> 2 << 2;
323 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: vectored traps not supported");
327 if (env->priv_ver >= PRIV_VERSION_1_10_0) {
328 env->mcounteren = val_to_write;
334 env->mscratch = val_to_write;
337 env->mcause = val_to_write;
340 env->mbadaddr = val_to_write;
343 /* misa is WARL so unsupported writes are ignored */
349 pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val_to_write);
367 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val_to_write);
370 #if !defined(CONFIG_USER_ONLY)
374 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
379 * Handle reads to CSRs and any resulting special behavior
381 * Adapted from Spike's processor_t::get_csr
383 target_ulong csr_read_helper(CPURISCVState *env, target_ulong csrno)
385 #ifndef CONFIG_USER_ONLY
386 target_ulong ctr_en = env->priv == PRV_U ? env->scounteren :
387 env->priv == PRV_S ? env->mcounteren : -1U;
389 target_ulong ctr_en = -1;
391 target_ulong ctr_ok = (ctr_en >> (csrno & 31)) & 1;
393 if (csrno >= CSR_HPMCOUNTER3 && csrno <= CSR_HPMCOUNTER31) {
398 #if defined(TARGET_RISCV32)
399 if (csrno >= CSR_HPMCOUNTER3H && csrno <= CSR_HPMCOUNTER31H) {
405 if (csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31) {
408 #if defined(TARGET_RISCV32)
409 if (csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31) {
413 if (csrno >= CSR_MHPMEVENT3 && csrno <= CSR_MHPMEVENT31) {
419 validate_mstatus_fs(env, GETPC());
420 return cpu_riscv_get_fflags(env);
422 validate_mstatus_fs(env, GETPC());
425 validate_mstatus_fs(env, GETPC());
426 return (cpu_riscv_get_fflags(env) << FSR_AEXC_SHIFT)
427 | (env->frm << FSR_RD_SHIFT);
428 /* rdtime/rdtimeh is trapped and emulated by bbl in system mode */
429 #ifdef CONFIG_USER_ONLY
431 return cpu_get_host_ticks();
432 #if defined(TARGET_RISCV32)
434 return cpu_get_host_ticks() >> 32;
440 #if !defined(CONFIG_USER_ONLY)
442 return cpu_get_icount();
444 return cpu_get_host_ticks();
447 return cpu_get_host_ticks();
451 #if defined(TARGET_RISCV32)
455 #if !defined(CONFIG_USER_ONLY)
457 return cpu_get_icount() >> 32;
459 return cpu_get_host_ticks() >> 32;
462 return cpu_get_host_ticks() >> 32;
467 #ifndef CONFIG_USER_ONLY
471 return cpu_get_icount();
473 return cpu_get_host_ticks();
477 #if defined(TARGET_RISCV32)
479 return cpu_get_icount() >> 32;
481 return cpu_get_host_ticks() >> 32;
485 case CSR_MUCOUNTEREN:
486 if (env->priv_ver <= PRIV_VERSION_1_09_1) {
487 return env->scounteren;
489 break; /* illegal instruction */
491 case CSR_MSCOUNTEREN:
492 if (env->priv_ver <= PRIV_VERSION_1_09_1) {
493 return env->mcounteren;
495 break; /* illegal instruction */
498 target_ulong mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UIE
499 | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS
500 | SSTATUS_SUM | SSTATUS_SD;
501 if (env->priv_ver >= PRIV_VERSION_1_10_0) {
504 return env->mstatus & mask;
507 qemu_mutex_lock_iothread();
508 target_ulong tmp = env->mip & env->mideleg;
509 qemu_mutex_unlock_iothread();
513 return env->mie & env->mideleg;
517 return env->sbadaddr;
521 if (env->priv_ver >= PRIV_VERSION_1_10_0) {
522 return env->scounteren;
524 break; /* illegal instruction */
528 case CSR_SATP: /* CSR_SPTBR */
529 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
532 if (env->priv_ver >= PRIV_VERSION_1_10_0) {
538 return env->sscratch;
542 qemu_mutex_lock_iothread();
543 target_ulong tmp = env->mip;
544 qemu_mutex_unlock_iothread();
552 return env->mscratch;
556 return env->mbadaddr;
560 return 0; /* as spike does */
562 return 0; /* as spike does */
564 return 0; /* as spike does */
570 if (env->priv_ver >= PRIV_VERSION_1_10_0) {
571 return env->mcounteren;
573 break; /* illegal instruction */
583 return pmpcfg_csr_read(env, csrno - CSR_PMPCFG0);
600 return pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
603 /* used by e.g. MTIME read */
604 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
608 * Check that CSR access is allowed.
610 * Adapted from Spike's decode.h:validate_csr
612 static void validate_csr(CPURISCVState *env, uint64_t which,
613 uint64_t write, uintptr_t ra)
615 #ifndef CONFIG_USER_ONLY
616 unsigned csr_priv = get_field((which), 0x300);
617 unsigned csr_read_only = get_field((which), 0xC00) == 3;
618 if (((write) && csr_read_only) || (env->priv < csr_priv)) {
619 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, ra);
624 target_ulong helper_csrrw(CPURISCVState *env, target_ulong src,
627 validate_csr(env, csr, 1, GETPC());
628 uint64_t csr_backup = csr_read_helper(env, csr);
629 csr_write_helper(env, src, csr);
633 target_ulong helper_csrrs(CPURISCVState *env, target_ulong src,
634 target_ulong csr, target_ulong rs1_pass)
636 validate_csr(env, csr, rs1_pass != 0, GETPC());
637 uint64_t csr_backup = csr_read_helper(env, csr);
639 csr_write_helper(env, src | csr_backup, csr);
644 target_ulong helper_csrrc(CPURISCVState *env, target_ulong src,
645 target_ulong csr, target_ulong rs1_pass)
647 validate_csr(env, csr, rs1_pass != 0, GETPC());
648 uint64_t csr_backup = csr_read_helper(env, csr);
650 csr_write_helper(env, (~src) & csr_backup, csr);
655 #ifndef CONFIG_USER_ONLY
657 /* iothread_mutex must be held */
658 void riscv_set_local_interrupt(RISCVCPU *cpu, target_ulong mask, int value)
660 target_ulong old_mip = cpu->env.mip;
661 cpu->env.mip = (old_mip & ~mask) | (value ? mask : 0);
663 if (cpu->env.mip && !old_mip) {
664 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
665 } else if (!cpu->env.mip && old_mip) {
666 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
670 void riscv_set_mode(CPURISCVState *env, target_ulong newpriv)
672 if (newpriv > PRV_M) {
673 g_assert_not_reached();
675 if (newpriv == PRV_H) {
678 /* tlb_flush is unnecessary as mode is contained in mmu_idx */
682 target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb)
684 if (!(env->priv >= PRV_S)) {
685 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
688 target_ulong retpc = env->sepc;
689 if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
690 do_raise_exception_err(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
693 target_ulong mstatus = env->mstatus;
694 target_ulong prev_priv = get_field(mstatus, MSTATUS_SPP);
695 mstatus = set_field(mstatus,
696 env->priv_ver >= PRIV_VERSION_1_10_0 ?
697 MSTATUS_SIE : MSTATUS_UIE << prev_priv,
698 get_field(mstatus, MSTATUS_SPIE));
699 mstatus = set_field(mstatus, MSTATUS_SPIE, 0);
700 mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U);
701 riscv_set_mode(env, prev_priv);
702 csr_write_helper(env, mstatus, CSR_MSTATUS);
707 target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb)
709 if (!(env->priv >= PRV_M)) {
710 do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
713 target_ulong retpc = env->mepc;
714 if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
715 do_raise_exception_err(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
718 target_ulong mstatus = env->mstatus;
719 target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
720 mstatus = set_field(mstatus,
721 env->priv_ver >= PRIV_VERSION_1_10_0 ?
722 MSTATUS_MIE : MSTATUS_UIE << prev_priv,
723 get_field(mstatus, MSTATUS_MPIE));
724 mstatus = set_field(mstatus, MSTATUS_MPIE, 0);
725 mstatus = set_field(mstatus, MSTATUS_MPP, PRV_U);
726 riscv_set_mode(env, prev_priv);
727 csr_write_helper(env, mstatus, CSR_MSTATUS);
733 void helper_wfi(CPURISCVState *env)
735 CPUState *cs = CPU(riscv_env_get_cpu(env));
738 cs->exception_index = EXCP_HLT;
742 void helper_tlb_flush(CPURISCVState *env)
744 RISCVCPU *cpu = riscv_env_get_cpu(env);
745 CPUState *cs = CPU(cpu);
749 #endif /* !CONFIG_USER_ONLY */