]>
Commit | Line | Data |
---|---|---|
0c3e702a MC |
1 | /* |
2 | * RISC-V emulation helpers for qemu. | |
3 | * | |
4 | * Copyright (c) 2016-2017 Sagar Karandikar, [email protected] | |
5 | * Copyright (c) 2017-2018 SiFive, Inc. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms and conditions of the GNU General Public License, | |
9 | * version 2 or later, as published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope it will be useful, but WITHOUT | |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | * more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License along with | |
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
21 | #include "qemu/log.h" | |
22 | #include "cpu.h" | |
23 | #include "exec/exec-all.h" | |
24 | #include "tcg-op.h" | |
25 | ||
26 | #define RISCV_DEBUG_INTERRUPT 0 | |
27 | ||
28 | int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) | |
29 | { | |
30 | #ifdef CONFIG_USER_ONLY | |
31 | return 0; | |
32 | #else | |
33 | return env->priv; | |
34 | #endif | |
35 | } | |
36 | ||
37 | #ifndef CONFIG_USER_ONLY | |
38 | /* | |
39 | * Return RISC-V IRQ number if an interrupt should be taken, else -1. | |
40 | * Used in cpu-exec.c | |
41 | * | |
42 | * Adapted from Spike's processor_t::take_interrupt() | |
43 | */ | |
44 | static int riscv_cpu_hw_interrupts_pending(CPURISCVState *env) | |
45 | { | |
46 | target_ulong pending_interrupts = atomic_read(&env->mip) & env->mie; | |
47 | ||
48 | target_ulong mie = get_field(env->mstatus, MSTATUS_MIE); | |
49 | target_ulong m_enabled = env->priv < PRV_M || (env->priv == PRV_M && mie); | |
50 | target_ulong enabled_interrupts = pending_interrupts & | |
51 | ~env->mideleg & -m_enabled; | |
52 | ||
53 | target_ulong sie = get_field(env->mstatus, MSTATUS_SIE); | |
54 | target_ulong s_enabled = env->priv < PRV_S || (env->priv == PRV_S && sie); | |
55 | enabled_interrupts |= pending_interrupts & env->mideleg & | |
56 | -s_enabled; | |
57 | ||
58 | if (enabled_interrupts) { | |
59 | return ctz64(enabled_interrupts); /* since non-zero */ | |
60 | } else { | |
61 | return EXCP_NONE; /* indicates no pending interrupt */ | |
62 | } | |
63 | } | |
64 | #endif | |
65 | ||
66 | bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) | |
67 | { | |
68 | #if !defined(CONFIG_USER_ONLY) | |
69 | if (interrupt_request & CPU_INTERRUPT_HARD) { | |
70 | RISCVCPU *cpu = RISCV_CPU(cs); | |
71 | CPURISCVState *env = &cpu->env; | |
72 | int interruptno = riscv_cpu_hw_interrupts_pending(env); | |
73 | if (interruptno >= 0) { | |
74 | cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; | |
75 | riscv_cpu_do_interrupt(cs); | |
76 | return true; | |
77 | } | |
78 | } | |
79 | #endif | |
80 | return false; | |
81 | } | |
82 | ||
83 | #if !defined(CONFIG_USER_ONLY) | |
84 | ||
85 | /* get_physical_address - get the physical address for this virtual address | |
86 | * | |
87 | * Do a page table walk to obtain the physical address corresponding to a | |
88 | * virtual address. Returns 0 if the translation was successful | |
89 | * | |
90 | * Adapted from Spike's mmu_t::translate and mmu_t::walk | |
91 | * | |
92 | */ | |
93 | static int get_physical_address(CPURISCVState *env, hwaddr *physical, | |
94 | int *prot, target_ulong addr, | |
95 | int access_type, int mmu_idx) | |
96 | { | |
97 | /* NOTE: the env->pc value visible here will not be | |
98 | * correct, but the value visible to the exception handler | |
99 | * (riscv_cpu_do_interrupt) is correct */ | |
100 | ||
101 | int mode = mmu_idx; | |
102 | ||
103 | if (mode == PRV_M && access_type != MMU_INST_FETCH) { | |
104 | if (get_field(env->mstatus, MSTATUS_MPRV)) { | |
105 | mode = get_field(env->mstatus, MSTATUS_MPP); | |
106 | } | |
107 | } | |
108 | ||
109 | if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) { | |
110 | *physical = addr; | |
111 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
112 | return TRANSLATE_SUCCESS; | |
113 | } | |
114 | ||
115 | *prot = 0; | |
116 | ||
117 | target_ulong base; | |
118 | int levels, ptidxbits, ptesize, vm, sum; | |
119 | int mxr = get_field(env->mstatus, MSTATUS_MXR); | |
120 | ||
121 | if (env->priv_ver >= PRIV_VERSION_1_10_0) { | |
122 | base = get_field(env->satp, SATP_PPN) << PGSHIFT; | |
123 | sum = get_field(env->mstatus, MSTATUS_SUM); | |
124 | vm = get_field(env->satp, SATP_MODE); | |
125 | switch (vm) { | |
126 | case VM_1_10_SV32: | |
127 | levels = 2; ptidxbits = 10; ptesize = 4; break; | |
128 | case VM_1_10_SV39: | |
129 | levels = 3; ptidxbits = 9; ptesize = 8; break; | |
130 | case VM_1_10_SV48: | |
131 | levels = 4; ptidxbits = 9; ptesize = 8; break; | |
132 | case VM_1_10_SV57: | |
133 | levels = 5; ptidxbits = 9; ptesize = 8; break; | |
134 | case VM_1_10_MBARE: | |
135 | *physical = addr; | |
136 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
137 | return TRANSLATE_SUCCESS; | |
138 | default: | |
139 | g_assert_not_reached(); | |
140 | } | |
141 | } else { | |
142 | base = env->sptbr << PGSHIFT; | |
143 | sum = !get_field(env->mstatus, MSTATUS_PUM); | |
144 | vm = get_field(env->mstatus, MSTATUS_VM); | |
145 | switch (vm) { | |
146 | case VM_1_09_SV32: | |
147 | levels = 2; ptidxbits = 10; ptesize = 4; break; | |
148 | case VM_1_09_SV39: | |
149 | levels = 3; ptidxbits = 9; ptesize = 8; break; | |
150 | case VM_1_09_SV48: | |
151 | levels = 4; ptidxbits = 9; ptesize = 8; break; | |
152 | case VM_1_09_MBARE: | |
153 | *physical = addr; | |
154 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
155 | return TRANSLATE_SUCCESS; | |
156 | default: | |
157 | g_assert_not_reached(); | |
158 | } | |
159 | } | |
160 | ||
161 | CPUState *cs = CPU(riscv_env_get_cpu(env)); | |
162 | int va_bits = PGSHIFT + levels * ptidxbits; | |
163 | target_ulong mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; | |
164 | target_ulong masked_msbs = (addr >> (va_bits - 1)) & mask; | |
165 | if (masked_msbs != 0 && masked_msbs != mask) { | |
166 | return TRANSLATE_FAIL; | |
167 | } | |
168 | ||
169 | int ptshift = (levels - 1) * ptidxbits; | |
170 | int i; | |
171 | ||
172 | #if !TCG_OVERSIZED_GUEST | |
173 | restart: | |
174 | #endif | |
175 | for (i = 0; i < levels; i++, ptshift -= ptidxbits) { | |
176 | target_ulong idx = (addr >> (PGSHIFT + ptshift)) & | |
177 | ((1 << ptidxbits) - 1); | |
178 | ||
179 | /* check that physical address of PTE is legal */ | |
180 | target_ulong pte_addr = base + idx * ptesize; | |
181 | #if defined(TARGET_RISCV32) | |
182 | target_ulong pte = ldl_phys(cs->as, pte_addr); | |
183 | #elif defined(TARGET_RISCV64) | |
184 | target_ulong pte = ldq_phys(cs->as, pte_addr); | |
185 | #endif | |
186 | target_ulong ppn = pte >> PTE_PPN_SHIFT; | |
187 | ||
188 | if (PTE_TABLE(pte)) { /* next level of page table */ | |
189 | base = ppn << PGSHIFT; | |
190 | } else if ((pte & PTE_U) ? (mode == PRV_S) && !sum : !(mode == PRV_S)) { | |
191 | break; | |
192 | } else if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W))) { | |
193 | break; | |
194 | } else if (access_type == MMU_INST_FETCH ? !(pte & PTE_X) : | |
195 | access_type == MMU_DATA_LOAD ? !(pte & PTE_R) && | |
196 | !(mxr && (pte & PTE_X)) : !((pte & PTE_R) && (pte & PTE_W))) { | |
197 | break; | |
198 | } else { | |
199 | /* if necessary, set accessed and dirty bits. */ | |
200 | target_ulong updated_pte = pte | PTE_A | | |
201 | (access_type == MMU_DATA_STORE ? PTE_D : 0); | |
202 | ||
203 | /* Page table updates need to be atomic with MTTCG enabled */ | |
204 | if (updated_pte != pte) { | |
205 | /* if accessed or dirty bits need updating, and the PTE is | |
206 | * in RAM, then we do so atomically with a compare and swap. | |
207 | * if the PTE is in IO space, then it can't be updated. | |
208 | * if the PTE changed, then we must re-walk the page table | |
209 | as the PTE is no longer valid */ | |
210 | MemoryRegion *mr; | |
211 | hwaddr l = sizeof(target_ulong), addr1; | |
212 | mr = address_space_translate(cs->as, pte_addr, | |
213 | &addr1, &l, false); | |
214 | if (memory_access_is_direct(mr, true)) { | |
215 | target_ulong *pte_pa = | |
216 | qemu_map_ram_ptr(mr->ram_block, addr1); | |
217 | #if TCG_OVERSIZED_GUEST | |
218 | /* MTTCG is not enabled on oversized TCG guests so | |
219 | * page table updates do not need to be atomic */ | |
220 | *pte_pa = pte = updated_pte; | |
221 | #else | |
222 | target_ulong old_pte = | |
223 | atomic_cmpxchg(pte_pa, pte, updated_pte); | |
224 | if (old_pte != pte) { | |
225 | goto restart; | |
226 | } else { | |
227 | pte = updated_pte; | |
228 | } | |
229 | #endif | |
230 | } else { | |
231 | /* misconfigured PTE in ROM (AD bits are not preset) or | |
232 | * PTE is in IO space and can't be updated atomically */ | |
233 | return TRANSLATE_FAIL; | |
234 | } | |
235 | } | |
236 | ||
237 | /* for superpage mappings, make a fake leaf PTE for the TLB's | |
238 | benefit. */ | |
239 | target_ulong vpn = addr >> PGSHIFT; | |
240 | *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT; | |
241 | ||
242 | if ((pte & PTE_R)) { | |
243 | *prot |= PAGE_READ; | |
244 | } | |
245 | if ((pte & PTE_X)) { | |
246 | *prot |= PAGE_EXEC; | |
247 | } | |
248 | /* only add write permission on stores or if the page | |
249 | is already dirty, so that we don't miss further | |
250 | page table walks to update the dirty bit */ | |
251 | if ((pte & PTE_W) && | |
252 | (access_type == MMU_DATA_STORE || (pte & PTE_D))) { | |
253 | *prot |= PAGE_WRITE; | |
254 | } | |
255 | return TRANSLATE_SUCCESS; | |
256 | } | |
257 | } | |
258 | return TRANSLATE_FAIL; | |
259 | } | |
260 | ||
261 | static void raise_mmu_exception(CPURISCVState *env, target_ulong address, | |
262 | MMUAccessType access_type) | |
263 | { | |
264 | CPUState *cs = CPU(riscv_env_get_cpu(env)); | |
265 | int page_fault_exceptions = | |
266 | (env->priv_ver >= PRIV_VERSION_1_10_0) && | |
267 | get_field(env->satp, SATP_MODE) != VM_1_10_MBARE; | |
268 | switch (access_type) { | |
269 | case MMU_INST_FETCH: | |
270 | cs->exception_index = page_fault_exceptions ? | |
271 | RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT; | |
272 | break; | |
273 | case MMU_DATA_LOAD: | |
274 | cs->exception_index = page_fault_exceptions ? | |
275 | RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT; | |
276 | break; | |
277 | case MMU_DATA_STORE: | |
278 | cs->exception_index = page_fault_exceptions ? | |
279 | RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT; | |
280 | break; | |
281 | default: | |
282 | g_assert_not_reached(); | |
283 | } | |
284 | env->badaddr = address; | |
285 | } | |
286 | ||
287 | hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) | |
288 | { | |
289 | RISCVCPU *cpu = RISCV_CPU(cs); | |
290 | hwaddr phys_addr; | |
291 | int prot; | |
292 | int mmu_idx = cpu_mmu_index(&cpu->env, false); | |
293 | ||
294 | if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0, mmu_idx)) { | |
295 | return -1; | |
296 | } | |
297 | return phys_addr; | |
298 | } | |
299 | ||
300 | void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, | |
301 | MMUAccessType access_type, int mmu_idx, | |
302 | uintptr_t retaddr) | |
303 | { | |
304 | RISCVCPU *cpu = RISCV_CPU(cs); | |
305 | CPURISCVState *env = &cpu->env; | |
306 | switch (access_type) { | |
307 | case MMU_INST_FETCH: | |
308 | cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; | |
309 | break; | |
310 | case MMU_DATA_LOAD: | |
311 | cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; | |
312 | break; | |
313 | case MMU_DATA_STORE: | |
314 | cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; | |
315 | break; | |
316 | default: | |
317 | g_assert_not_reached(); | |
318 | } | |
319 | env->badaddr = addr; | |
320 | do_raise_exception_err(env, cs->exception_index, retaddr); | |
321 | } | |
322 | ||
323 | /* called by qemu's softmmu to fill the qemu tlb */ | |
324 | void tlb_fill(CPUState *cs, target_ulong addr, int size, | |
325 | MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) | |
326 | { | |
327 | int ret; | |
328 | ret = riscv_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx); | |
329 | if (ret == TRANSLATE_FAIL) { | |
330 | RISCVCPU *cpu = RISCV_CPU(cs); | |
331 | CPURISCVState *env = &cpu->env; | |
332 | do_raise_exception_err(env, cs->exception_index, retaddr); | |
333 | } | |
334 | } | |
335 | ||
336 | #endif | |
337 | ||
338 | int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, | |
339 | int rw, int mmu_idx) | |
340 | { | |
341 | RISCVCPU *cpu = RISCV_CPU(cs); | |
342 | CPURISCVState *env = &cpu->env; | |
343 | #if !defined(CONFIG_USER_ONLY) | |
344 | hwaddr pa = 0; | |
345 | int prot; | |
346 | #endif | |
347 | int ret = TRANSLATE_FAIL; | |
348 | ||
349 | qemu_log_mask(CPU_LOG_MMU, | |
350 | "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx \ | |
351 | %d\n", __func__, env->pc, address, rw, mmu_idx); | |
352 | ||
353 | #if !defined(CONFIG_USER_ONLY) | |
354 | ret = get_physical_address(env, &pa, &prot, address, rw, mmu_idx); | |
355 | qemu_log_mask(CPU_LOG_MMU, | |
356 | "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx | |
357 | " prot %d\n", __func__, address, ret, pa, prot); | |
358 | if (!pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << rw)) { | |
359 | ret = TRANSLATE_FAIL; | |
360 | } | |
361 | if (ret == TRANSLATE_SUCCESS) { | |
362 | tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, | |
363 | prot, mmu_idx, TARGET_PAGE_SIZE); | |
364 | } else if (ret == TRANSLATE_FAIL) { | |
365 | raise_mmu_exception(env, address, rw); | |
366 | } | |
367 | #else | |
368 | switch (rw) { | |
369 | case MMU_INST_FETCH: | |
370 | cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT; | |
371 | break; | |
372 | case MMU_DATA_LOAD: | |
373 | cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT; | |
374 | break; | |
375 | case MMU_DATA_STORE: | |
376 | cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT; | |
377 | break; | |
378 | } | |
379 | #endif | |
380 | return ret; | |
381 | } | |
382 | ||
383 | /* | |
384 | * Handle Traps | |
385 | * | |
386 | * Adapted from Spike's processor_t::take_trap. | |
387 | * | |
388 | */ | |
389 | void riscv_cpu_do_interrupt(CPUState *cs) | |
390 | { | |
391 | #if !defined(CONFIG_USER_ONLY) | |
392 | ||
393 | RISCVCPU *cpu = RISCV_CPU(cs); | |
394 | CPURISCVState *env = &cpu->env; | |
395 | ||
396 | if (RISCV_DEBUG_INTERRUPT) { | |
397 | int log_cause = cs->exception_index & RISCV_EXCP_INT_MASK; | |
398 | if (cs->exception_index & RISCV_EXCP_INT_FLAG) { | |
399 | qemu_log_mask(LOG_TRACE, "core 0: trap %s, epc 0x" TARGET_FMT_lx, | |
400 | riscv_intr_names[log_cause], env->pc); | |
401 | } else { | |
402 | qemu_log_mask(LOG_TRACE, "core 0: intr %s, epc 0x" TARGET_FMT_lx, | |
403 | riscv_excp_names[log_cause], env->pc); | |
404 | } | |
405 | } | |
406 | ||
407 | target_ulong fixed_cause = 0; | |
408 | if (cs->exception_index & (RISCV_EXCP_INT_FLAG)) { | |
409 | /* hacky for now. the MSB (bit 63) indicates interrupt but cs->exception | |
410 | index is only 32 bits wide */ | |
411 | fixed_cause = cs->exception_index & RISCV_EXCP_INT_MASK; | |
412 | fixed_cause |= ((target_ulong)1) << (TARGET_LONG_BITS - 1); | |
413 | } else { | |
414 | /* fixup User ECALL -> correct priv ECALL */ | |
415 | if (cs->exception_index == RISCV_EXCP_U_ECALL) { | |
416 | switch (env->priv) { | |
417 | case PRV_U: | |
418 | fixed_cause = RISCV_EXCP_U_ECALL; | |
419 | break; | |
420 | case PRV_S: | |
421 | fixed_cause = RISCV_EXCP_S_ECALL; | |
422 | break; | |
423 | case PRV_H: | |
424 | fixed_cause = RISCV_EXCP_H_ECALL; | |
425 | break; | |
426 | case PRV_M: | |
427 | fixed_cause = RISCV_EXCP_M_ECALL; | |
428 | break; | |
429 | } | |
430 | } else { | |
431 | fixed_cause = cs->exception_index; | |
432 | } | |
433 | } | |
434 | ||
435 | target_ulong backup_epc = env->pc; | |
436 | ||
437 | target_ulong bit = fixed_cause; | |
438 | target_ulong deleg = env->medeleg; | |
439 | ||
440 | int hasbadaddr = | |
441 | (fixed_cause == RISCV_EXCP_INST_ADDR_MIS) || | |
442 | (fixed_cause == RISCV_EXCP_INST_ACCESS_FAULT) || | |
443 | (fixed_cause == RISCV_EXCP_LOAD_ADDR_MIS) || | |
444 | (fixed_cause == RISCV_EXCP_STORE_AMO_ADDR_MIS) || | |
445 | (fixed_cause == RISCV_EXCP_LOAD_ACCESS_FAULT) || | |
446 | (fixed_cause == RISCV_EXCP_STORE_AMO_ACCESS_FAULT) || | |
447 | (fixed_cause == RISCV_EXCP_INST_PAGE_FAULT) || | |
448 | (fixed_cause == RISCV_EXCP_LOAD_PAGE_FAULT) || | |
449 | (fixed_cause == RISCV_EXCP_STORE_PAGE_FAULT); | |
450 | ||
451 | if (bit & ((target_ulong)1 << (TARGET_LONG_BITS - 1))) { | |
452 | deleg = env->mideleg; | |
453 | bit &= ~((target_ulong)1 << (TARGET_LONG_BITS - 1)); | |
454 | } | |
455 | ||
456 | if (env->priv <= PRV_S && bit < 64 && ((deleg >> bit) & 1)) { | |
457 | /* handle the trap in S-mode */ | |
458 | /* No need to check STVEC for misaligned - lower 2 bits cannot be set */ | |
459 | env->pc = env->stvec; | |
460 | env->scause = fixed_cause; | |
461 | env->sepc = backup_epc; | |
462 | ||
463 | if (hasbadaddr) { | |
464 | if (RISCV_DEBUG_INTERRUPT) { | |
465 | qemu_log_mask(LOG_TRACE, "core " TARGET_FMT_ld | |
466 | ": badaddr 0x" TARGET_FMT_lx, env->mhartid, env->badaddr); | |
467 | } | |
468 | env->sbadaddr = env->badaddr; | |
67185dad MC |
469 | } else { |
470 | /* otherwise we must clear sbadaddr/stval | |
471 | * todo: support populating stval on illegal instructions */ | |
472 | env->sbadaddr = 0; | |
0c3e702a MC |
473 | } |
474 | ||
475 | target_ulong s = env->mstatus; | |
476 | s = set_field(s, MSTATUS_SPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? | |
477 | get_field(s, MSTATUS_SIE) : get_field(s, MSTATUS_UIE << env->priv)); | |
478 | s = set_field(s, MSTATUS_SPP, env->priv); | |
479 | s = set_field(s, MSTATUS_SIE, 0); | |
480 | csr_write_helper(env, s, CSR_MSTATUS); | |
481 | riscv_set_mode(env, PRV_S); | |
482 | } else { | |
483 | /* No need to check MTVEC for misaligned - lower 2 bits cannot be set */ | |
484 | env->pc = env->mtvec; | |
485 | env->mepc = backup_epc; | |
486 | env->mcause = fixed_cause; | |
487 | ||
488 | if (hasbadaddr) { | |
489 | if (RISCV_DEBUG_INTERRUPT) { | |
490 | qemu_log_mask(LOG_TRACE, "core " TARGET_FMT_ld | |
491 | ": badaddr 0x" TARGET_FMT_lx, env->mhartid, env->badaddr); | |
492 | } | |
493 | env->mbadaddr = env->badaddr; | |
67185dad MC |
494 | } else { |
495 | /* otherwise we must clear mbadaddr/mtval | |
496 | * todo: support populating mtval on illegal instructions */ | |
497 | env->mbadaddr = 0; | |
0c3e702a MC |
498 | } |
499 | ||
500 | target_ulong s = env->mstatus; | |
501 | s = set_field(s, MSTATUS_MPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? | |
502 | get_field(s, MSTATUS_MIE) : get_field(s, MSTATUS_UIE << env->priv)); | |
503 | s = set_field(s, MSTATUS_MPP, env->priv); | |
504 | s = set_field(s, MSTATUS_MIE, 0); | |
505 | csr_write_helper(env, s, CSR_MSTATUS); | |
506 | riscv_set_mode(env, PRV_M); | |
507 | } | |
508 | /* TODO yield load reservation */ | |
509 | #endif | |
510 | cs->exception_index = EXCP_NONE; /* mark handled to qemu */ | |
511 | } |