]> Git Repo - qemu.git/blame - target/arm/m_helper.c
target/arm/vfp_helper: Call set_fpscr_to_host before updating to FPSCR
[qemu.git] / target / arm / m_helper.c
CommitLineData
7aab5a8c
PMD
1/*
2 * ARM generic helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8#include "qemu/osdep.h"
9#include "qemu/units.h"
10#include "target/arm/idau.h"
11#include "trace.h"
12#include "cpu.h"
13#include "internals.h"
14#include "exec/gdbstub.h"
15#include "exec/helper-proto.h"
16#include "qemu/host-utils.h"
17#include "sysemu/sysemu.h"
18#include "qemu/bitops.h"
19#include "qemu/crc32c.h"
20#include "qemu/qemu-print.h"
21#include "exec/exec-all.h"
22#include <zlib.h> /* For crc32 */
23#include "hw/semihosting/semihost.h"
24#include "sysemu/cpus.h"
25#include "sysemu/kvm.h"
26#include "qemu/range.h"
27#include "qapi/qapi-commands-machine-target.h"
28#include "qapi/error.h"
29#include "qemu/guest-random.h"
30#ifdef CONFIG_TCG
31#include "arm_ldst.h"
32#include "exec/cpu_ldst.h"
33#endif
34
35#ifdef CONFIG_USER_ONLY
36
37/* These should probably raise undefined insn exceptions. */
38void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
39{
40 ARMCPU *cpu = env_archcpu(env);
41
42 cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
43}
44
45uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
46{
47 ARMCPU *cpu = env_archcpu(env);
48
49 cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
50 return 0;
51}
52
53void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
54{
55 /* translate.c should never generate calls here in user-only mode */
56 g_assert_not_reached();
57}
58
59void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
60{
61 /* translate.c should never generate calls here in user-only mode */
62 g_assert_not_reached();
63}
64
65void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
66{
67 /* translate.c should never generate calls here in user-only mode */
68 g_assert_not_reached();
69}
70
71void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
72{
73 /* translate.c should never generate calls here in user-only mode */
74 g_assert_not_reached();
75}
76
77void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
78{
79 /* translate.c should never generate calls here in user-only mode */
80 g_assert_not_reached();
81}
82
83uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
84{
85 /*
86 * The TT instructions can be used by unprivileged code, but in
87 * user-only emulation we don't have the MPU.
88 * Luckily since we know we are NonSecure unprivileged (and that in
89 * turn means that the A flag wasn't specified), all the bits in the
90 * register must be zero:
91 * IREGION: 0 because IRVALID is 0
92 * IRVALID: 0 because NS
93 * S: 0 because NS
94 * NSRW: 0 because NS
95 * NSR: 0 because NS
96 * RW: 0 because unpriv and A flag not set
97 * R: 0 because unpriv and A flag not set
98 * SRVALID: 0 because NS
99 * MRVALID: 0 because unpriv and A flag not set
100 * SREGION: 0 becaus SRVALID is 0
101 * MREGION: 0 because MRVALID is 0
102 */
103 return 0;
104}
105
106#else
107
108/*
109 * What kind of stack write are we doing? This affects how exceptions
110 * generated during the stacking are treated.
111 */
112typedef enum StackingMode {
113 STACK_NORMAL,
114 STACK_IGNFAULTS,
115 STACK_LAZYFP,
116} StackingMode;
117
118static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
119 ARMMMUIdx mmu_idx, StackingMode mode)
120{
121 CPUState *cs = CPU(cpu);
122 CPUARMState *env = &cpu->env;
123 MemTxAttrs attrs = {};
124 MemTxResult txres;
125 target_ulong page_size;
126 hwaddr physaddr;
127 int prot;
128 ARMMMUFaultInfo fi = {};
129 bool secure = mmu_idx & ARM_MMU_IDX_M_S;
130 int exc;
131 bool exc_secure;
132
133 if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
134 &attrs, &prot, &page_size, &fi, NULL)) {
135 /* MPU/SAU lookup failed */
136 if (fi.type == ARMFault_QEMU_SFault) {
137 if (mode == STACK_LAZYFP) {
138 qemu_log_mask(CPU_LOG_INT,
139 "...SecureFault with SFSR.LSPERR "
140 "during lazy stacking\n");
141 env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
142 } else {
143 qemu_log_mask(CPU_LOG_INT,
144 "...SecureFault with SFSR.AUVIOL "
145 "during stacking\n");
146 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
147 }
148 env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
149 env->v7m.sfar = addr;
150 exc = ARMV7M_EXCP_SECURE;
151 exc_secure = false;
152 } else {
153 if (mode == STACK_LAZYFP) {
154 qemu_log_mask(CPU_LOG_INT,
155 "...MemManageFault with CFSR.MLSPERR\n");
156 env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
157 } else {
158 qemu_log_mask(CPU_LOG_INT,
159 "...MemManageFault with CFSR.MSTKERR\n");
160 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
161 }
162 exc = ARMV7M_EXCP_MEM;
163 exc_secure = secure;
164 }
165 goto pend_fault;
166 }
167 address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
168 attrs, &txres);
169 if (txres != MEMTX_OK) {
170 /* BusFault trying to write the data */
171 if (mode == STACK_LAZYFP) {
172 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
173 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
174 } else {
175 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
176 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
177 }
178 exc = ARMV7M_EXCP_BUS;
179 exc_secure = false;
180 goto pend_fault;
181 }
182 return true;
183
184pend_fault:
185 /*
186 * By pending the exception at this point we are making
187 * the IMPDEF choice "overridden exceptions pended" (see the
188 * MergeExcInfo() pseudocode). The other choice would be to not
189 * pend them now and then make a choice about which to throw away
190 * later if we have two derived exceptions.
191 * The only case when we must not pend the exception but instead
192 * throw it away is if we are doing the push of the callee registers
193 * and we've already generated a derived exception (this is indicated
194 * by the caller passing STACK_IGNFAULTS). Even in this case we will
195 * still update the fault status registers.
196 */
197 switch (mode) {
198 case STACK_NORMAL:
199 armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
200 break;
201 case STACK_LAZYFP:
202 armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
203 break;
204 case STACK_IGNFAULTS:
205 break;
206 }
207 return false;
208}
209
210static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
211 ARMMMUIdx mmu_idx)
212{
213 CPUState *cs = CPU(cpu);
214 CPUARMState *env = &cpu->env;
215 MemTxAttrs attrs = {};
216 MemTxResult txres;
217 target_ulong page_size;
218 hwaddr physaddr;
219 int prot;
220 ARMMMUFaultInfo fi = {};
221 bool secure = mmu_idx & ARM_MMU_IDX_M_S;
222 int exc;
223 bool exc_secure;
224 uint32_t value;
225
226 if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
227 &attrs, &prot, &page_size, &fi, NULL)) {
228 /* MPU/SAU lookup failed */
229 if (fi.type == ARMFault_QEMU_SFault) {
230 qemu_log_mask(CPU_LOG_INT,
231 "...SecureFault with SFSR.AUVIOL during unstack\n");
232 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
233 env->v7m.sfar = addr;
234 exc = ARMV7M_EXCP_SECURE;
235 exc_secure = false;
236 } else {
237 qemu_log_mask(CPU_LOG_INT,
238 "...MemManageFault with CFSR.MUNSTKERR\n");
239 env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
240 exc = ARMV7M_EXCP_MEM;
241 exc_secure = secure;
242 }
243 goto pend_fault;
244 }
245
246 value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
247 attrs, &txres);
248 if (txres != MEMTX_OK) {
249 /* BusFault trying to read the data */
250 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
251 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
252 exc = ARMV7M_EXCP_BUS;
253 exc_secure = false;
254 goto pend_fault;
255 }
256
257 *dest = value;
258 return true;
259
260pend_fault:
261 /*
262 * By pending the exception at this point we are making
263 * the IMPDEF choice "overridden exceptions pended" (see the
264 * MergeExcInfo() pseudocode). The other choice would be to not
265 * pend them now and then make a choice about which to throw away
266 * later if we have two derived exceptions.
267 */
268 armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
269 return false;
270}
271
272void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
273{
274 /*
275 * Preserve FP state (because LSPACT was set and we are about
276 * to execute an FP instruction). This corresponds to the
277 * PreserveFPState() pseudocode.
278 * We may throw an exception if the stacking fails.
279 */
280 ARMCPU *cpu = env_archcpu(env);
281 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
282 bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
283 bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
284 bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
285 uint32_t fpcar = env->v7m.fpcar[is_secure];
286 bool stacked_ok = true;
287 bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
288 bool take_exception;
289
290 /* Take the iothread lock as we are going to touch the NVIC */
291 qemu_mutex_lock_iothread();
292
293 /* Check the background context had access to the FPU */
294 if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
295 armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
296 env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
297 stacked_ok = false;
298 } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
299 armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
300 env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
301 stacked_ok = false;
302 }
303
304 if (!splimviol && stacked_ok) {
305 /* We only stack if the stack limit wasn't violated */
306 int i;
307 ARMMMUIdx mmu_idx;
308
309 mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
310 for (i = 0; i < (ts ? 32 : 16); i += 2) {
311 uint64_t dn = *aa32_vfp_dreg(env, i / 2);
312 uint32_t faddr = fpcar + 4 * i;
313 uint32_t slo = extract64(dn, 0, 32);
314 uint32_t shi = extract64(dn, 32, 32);
315
316 if (i >= 16) {
317 faddr += 8; /* skip the slot for the FPSCR */
318 }
319 stacked_ok = stacked_ok &&
320 v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
321 v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
322 }
323
324 stacked_ok = stacked_ok &&
325 v7m_stack_write(cpu, fpcar + 0x40,
326 vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
327 }
328
329 /*
330 * We definitely pended an exception, but it's possible that it
331 * might not be able to be taken now. If its priority permits us
332 * to take it now, then we must not update the LSPACT or FP regs,
333 * but instead jump out to take the exception immediately.
334 * If it's just pending and won't be taken until the current
335 * handler exits, then we do update LSPACT and the FP regs.
336 */
337 take_exception = !stacked_ok &&
338 armv7m_nvic_can_take_pending_exception(env->nvic);
339
340 qemu_mutex_unlock_iothread();
341
342 if (take_exception) {
343 raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
344 }
345
346 env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
347
348 if (ts) {
349 /* Clear s0 to s31 and the FPSCR */
350 int i;
351
352 for (i = 0; i < 32; i += 2) {
353 *aa32_vfp_dreg(env, i / 2) = 0;
354 }
355 vfp_set_fpscr(env, 0);
356 }
357 /*
358 * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them
359 * unchanged.
360 */
361}
362
363/*
364 * Write to v7M CONTROL.SPSEL bit for the specified security bank.
365 * This may change the current stack pointer between Main and Process
366 * stack pointers if it is done for the CONTROL register for the current
367 * security state.
368 */
369static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
370 bool new_spsel,
371 bool secstate)
372{
373 bool old_is_psp = v7m_using_psp(env);
374
375 env->v7m.control[secstate] =
376 deposit32(env->v7m.control[secstate],
377 R_V7M_CONTROL_SPSEL_SHIFT,
378 R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
379
380 if (secstate == env->v7m.secure) {
381 bool new_is_psp = v7m_using_psp(env);
382 uint32_t tmp;
383
384 if (old_is_psp != new_is_psp) {
385 tmp = env->v7m.other_sp;
386 env->v7m.other_sp = env->regs[13];
387 env->regs[13] = tmp;
388 }
389 }
390}
391
392/*
393 * Write to v7M CONTROL.SPSEL bit. This may change the current
394 * stack pointer between Main and Process stack pointers.
395 */
396static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
397{
398 write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
399}
400
401void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
402{
403 /*
404 * Write a new value to v7m.exception, thus transitioning into or out
405 * of Handler mode; this may result in a change of active stack pointer.
406 */
407 bool new_is_psp, old_is_psp = v7m_using_psp(env);
408 uint32_t tmp;
409
410 env->v7m.exception = new_exc;
411
412 new_is_psp = v7m_using_psp(env);
413
414 if (old_is_psp != new_is_psp) {
415 tmp = env->v7m.other_sp;
416 env->v7m.other_sp = env->regs[13];
417 env->regs[13] = tmp;
418 }
419}
420
421/* Switch M profile security state between NS and S */
422static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
423{
424 uint32_t new_ss_msp, new_ss_psp;
425
426 if (env->v7m.secure == new_secstate) {
427 return;
428 }
429
430 /*
431 * All the banked state is accessed by looking at env->v7m.secure
432 * except for the stack pointer; rearrange the SP appropriately.
433 */
434 new_ss_msp = env->v7m.other_ss_msp;
435 new_ss_psp = env->v7m.other_ss_psp;
436
437 if (v7m_using_psp(env)) {
438 env->v7m.other_ss_psp = env->regs[13];
439 env->v7m.other_ss_msp = env->v7m.other_sp;
440 } else {
441 env->v7m.other_ss_msp = env->regs[13];
442 env->v7m.other_ss_psp = env->v7m.other_sp;
443 }
444
445 env->v7m.secure = new_secstate;
446
447 if (v7m_using_psp(env)) {
448 env->regs[13] = new_ss_psp;
449 env->v7m.other_sp = new_ss_msp;
450 } else {
451 env->regs[13] = new_ss_msp;
452 env->v7m.other_sp = new_ss_psp;
453 }
454}
455
456void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
457{
458 /*
459 * Handle v7M BXNS:
460 * - if the return value is a magic value, do exception return (like BX)
461 * - otherwise bit 0 of the return value is the target security state
462 */
463 uint32_t min_magic;
464
465 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
466 /* Covers FNC_RETURN and EXC_RETURN magic */
467 min_magic = FNC_RETURN_MIN_MAGIC;
468 } else {
469 /* EXC_RETURN magic only */
470 min_magic = EXC_RETURN_MIN_MAGIC;
471 }
472
473 if (dest >= min_magic) {
474 /*
475 * This is an exception return magic value; put it where
476 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
477 * Note that if we ever add gen_ss_advance() singlestep support to
478 * M profile this should count as an "instruction execution complete"
479 * event (compare gen_bx_excret_final_code()).
480 */
481 env->regs[15] = dest & ~1;
482 env->thumb = dest & 1;
483 HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
484 /* notreached */
485 }
486
487 /* translate.c should have made BXNS UNDEF unless we're secure */
488 assert(env->v7m.secure);
489
490 if (!(dest & 1)) {
491 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
492 }
493 switch_v7m_security_state(env, dest & 1);
494 env->thumb = 1;
495 env->regs[15] = dest & ~1;
496}
497
498void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
499{
500 /*
501 * Handle v7M BLXNS:
502 * - bit 0 of the destination address is the target security state
503 */
504
505 /* At this point regs[15] is the address just after the BLXNS */
506 uint32_t nextinst = env->regs[15] | 1;
507 uint32_t sp = env->regs[13] - 8;
508 uint32_t saved_psr;
509
510 /* translate.c will have made BLXNS UNDEF unless we're secure */
511 assert(env->v7m.secure);
512
513 if (dest & 1) {
514 /*
515 * Target is Secure, so this is just a normal BLX,
516 * except that the low bit doesn't indicate Thumb/not.
517 */
518 env->regs[14] = nextinst;
519 env->thumb = 1;
520 env->regs[15] = dest & ~1;
521 return;
522 }
523
524 /* Target is non-secure: first push a stack frame */
525 if (!QEMU_IS_ALIGNED(sp, 8)) {
526 qemu_log_mask(LOG_GUEST_ERROR,
527 "BLXNS with misaligned SP is UNPREDICTABLE\n");
528 }
529
530 if (sp < v7m_sp_limit(env)) {
531 raise_exception(env, EXCP_STKOF, 0, 1);
532 }
533
534 saved_psr = env->v7m.exception;
535 if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
536 saved_psr |= XPSR_SFPA;
537 }
538
539 /* Note that these stores can throw exceptions on MPU faults */
2884fbb6
PM
540 cpu_stl_data_ra(env, sp, nextinst, GETPC());
541 cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
7aab5a8c
PMD
542
543 env->regs[13] = sp;
544 env->regs[14] = 0xfeffffff;
545 if (arm_v7m_is_handler_mode(env)) {
546 /*
547 * Write a dummy value to IPSR, to avoid leaking the current secure
548 * exception number to non-secure code. This is guaranteed not
549 * to cause write_v7m_exception() to actually change stacks.
550 */
551 write_v7m_exception(env, 1);
552 }
553 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
554 switch_v7m_security_state(env, 0);
555 env->thumb = 1;
556 env->regs[15] = dest;
557}
558
559static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
560 bool spsel)
561{
562 /*
563 * Return a pointer to the location where we currently store the
564 * stack pointer for the requested security state and thread mode.
565 * This pointer will become invalid if the CPU state is updated
566 * such that the stack pointers are switched around (eg changing
567 * the SPSEL control bit).
568 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
569 * Unlike that pseudocode, we require the caller to pass us in the
570 * SPSEL control bit value; this is because we also use this
571 * function in handling of pushing of the callee-saves registers
572 * part of the v8M stack frame (pseudocode PushCalleeStack()),
573 * and in the tailchain codepath the SPSEL bit comes from the exception
574 * return magic LR value from the previous exception. The pseudocode
575 * opencodes the stack-selection in PushCalleeStack(), but we prefer
576 * to make this utility function generic enough to do the job.
577 */
578 bool want_psp = threadmode && spsel;
579
580 if (secure == env->v7m.secure) {
581 if (want_psp == v7m_using_psp(env)) {
582 return &env->regs[13];
583 } else {
584 return &env->v7m.other_sp;
585 }
586 } else {
587 if (want_psp) {
588 return &env->v7m.other_ss_psp;
589 } else {
590 return &env->v7m.other_ss_msp;
591 }
592 }
593}
594
595static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
596 uint32_t *pvec)
597{
598 CPUState *cs = CPU(cpu);
599 CPUARMState *env = &cpu->env;
600 MemTxResult result;
601 uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
602 uint32_t vector_entry;
603 MemTxAttrs attrs = {};
604 ARMMMUIdx mmu_idx;
605 bool exc_secure;
606
607 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
608
609 /*
610 * We don't do a get_phys_addr() here because the rules for vector
611 * loads are special: they always use the default memory map, and
612 * the default memory map permits reads from all addresses.
613 * Since there's no easy way to pass through to pmsav8_mpu_lookup()
614 * that we want this special case which would always say "yes",
615 * we just do the SAU lookup here followed by a direct physical load.
616 */
617 attrs.secure = targets_secure;
618 attrs.user = false;
619
620 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
621 V8M_SAttributes sattrs = {};
622
623 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
624 if (sattrs.ns) {
625 attrs.secure = false;
626 } else if (!targets_secure) {
627 /* NS access to S memory */
628 goto load_fail;
629 }
630 }
631
632 vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
633 attrs, &result);
634 if (result != MEMTX_OK) {
635 goto load_fail;
636 }
637 *pvec = vector_entry;
638 return true;
639
640load_fail:
641 /*
642 * All vector table fetch fails are reported as HardFault, with
643 * HFSR.VECTTBL and .FORCED set. (FORCED is set because
644 * technically the underlying exception is a MemManage or BusFault
645 * that is escalated to HardFault.) This is a terminal exception,
646 * so we will either take the HardFault immediately or else enter
647 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
648 */
649 exc_secure = targets_secure ||
650 !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
651 env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
652 armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
653 return false;
654}
655
656static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
657{
658 /*
659 * Return the integrity signature value for the callee-saves
660 * stack frame section. @lr is the exception return payload/LR value
661 * whose FType bit forms bit 0 of the signature if FP is present.
662 */
663 uint32_t sig = 0xfefa125a;
664
665 if (!arm_feature(env, ARM_FEATURE_VFP) || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
666 sig |= 1;
667 }
668 return sig;
669}
670
671static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
672 bool ignore_faults)
673{
674 /*
675 * For v8M, push the callee-saves register part of the stack frame.
676 * Compare the v8M pseudocode PushCalleeStack().
677 * In the tailchaining case this may not be the current stack.
678 */
679 CPUARMState *env = &cpu->env;
680 uint32_t *frame_sp_p;
681 uint32_t frameptr;
682 ARMMMUIdx mmu_idx;
683 bool stacked_ok;
684 uint32_t limit;
685 bool want_psp;
686 uint32_t sig;
687 StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
688
689 if (dotailchain) {
690 bool mode = lr & R_V7M_EXCRET_MODE_MASK;
691 bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
692 !mode;
693
694 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
695 frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
696 lr & R_V7M_EXCRET_SPSEL_MASK);
697 want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
698 if (want_psp) {
699 limit = env->v7m.psplim[M_REG_S];
700 } else {
701 limit = env->v7m.msplim[M_REG_S];
702 }
703 } else {
704 mmu_idx = arm_mmu_idx(env);
705 frame_sp_p = &env->regs[13];
706 limit = v7m_sp_limit(env);
707 }
708
709 frameptr = *frame_sp_p - 0x28;
710 if (frameptr < limit) {
711 /*
712 * Stack limit failure: set SP to the limit value, and generate
713 * STKOF UsageFault. Stack pushes below the limit must not be
714 * performed. It is IMPDEF whether pushes above the limit are
715 * performed; we choose not to.
716 */
717 qemu_log_mask(CPU_LOG_INT,
718 "...STKOF during callee-saves register stacking\n");
719 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
720 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
721 env->v7m.secure);
722 *frame_sp_p = limit;
723 return true;
724 }
725
726 /*
727 * Write as much of the stack frame as we can. A write failure may
728 * cause us to pend a derived exception.
729 */
730 sig = v7m_integrity_sig(env, lr);
731 stacked_ok =
732 v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
733 v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
734 v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
735 v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
736 v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
737 v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
738 v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
739 v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
740 v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
741
742 /* Update SP regardless of whether any of the stack accesses failed. */
743 *frame_sp_p = frameptr;
744
745 return !stacked_ok;
746}
747
748static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
749 bool ignore_stackfaults)
750{
751 /*
752 * Do the "take the exception" parts of exception entry,
753 * but not the pushing of state to the stack. This is
754 * similar to the pseudocode ExceptionTaken() function.
755 */
756 CPUARMState *env = &cpu->env;
757 uint32_t addr;
758 bool targets_secure;
759 int exc;
760 bool push_failed = false;
761
762 armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
763 qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
764 targets_secure ? "secure" : "nonsecure", exc);
765
766 if (dotailchain) {
767 /* Sanitize LR FType and PREFIX bits */
768 if (!arm_feature(env, ARM_FEATURE_VFP)) {
769 lr |= R_V7M_EXCRET_FTYPE_MASK;
770 }
771 lr = deposit32(lr, 24, 8, 0xff);
772 }
773
774 if (arm_feature(env, ARM_FEATURE_V8)) {
775 if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
776 (lr & R_V7M_EXCRET_S_MASK)) {
777 /*
778 * The background code (the owner of the registers in the
779 * exception frame) is Secure. This means it may either already
780 * have or now needs to push callee-saves registers.
781 */
782 if (targets_secure) {
783 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
784 /*
785 * We took an exception from Secure to NonSecure
786 * (which means the callee-saved registers got stacked)
787 * and are now tailchaining to a Secure exception.
788 * Clear DCRS so eventual return from this Secure
789 * exception unstacks the callee-saved registers.
790 */
791 lr &= ~R_V7M_EXCRET_DCRS_MASK;
792 }
793 } else {
794 /*
795 * We're going to a non-secure exception; push the
796 * callee-saves registers to the stack now, if they're
797 * not already saved.
798 */
799 if (lr & R_V7M_EXCRET_DCRS_MASK &&
800 !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
801 push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
802 ignore_stackfaults);
803 }
804 lr |= R_V7M_EXCRET_DCRS_MASK;
805 }
806 }
807
808 lr &= ~R_V7M_EXCRET_ES_MASK;
809 if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
810 lr |= R_V7M_EXCRET_ES_MASK;
811 }
812 lr &= ~R_V7M_EXCRET_SPSEL_MASK;
813 if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
814 lr |= R_V7M_EXCRET_SPSEL_MASK;
815 }
816
817 /*
818 * Clear registers if necessary to prevent non-secure exception
819 * code being able to see register values from secure code.
820 * Where register values become architecturally UNKNOWN we leave
821 * them with their previous values.
822 */
823 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
824 if (!targets_secure) {
825 /*
826 * Always clear the caller-saved registers (they have been
827 * pushed to the stack earlier in v7m_push_stack()).
828 * Clear callee-saved registers if the background code is
829 * Secure (in which case these regs were saved in
830 * v7m_push_callee_stack()).
831 */
832 int i;
833
834 for (i = 0; i < 13; i++) {
835 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
836 if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
837 env->regs[i] = 0;
838 }
839 }
840 /* Clear EAPSR */
841 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
842 }
843 }
844 }
845
846 if (push_failed && !ignore_stackfaults) {
847 /*
848 * Derived exception on callee-saves register stacking:
849 * we might now want to take a different exception which
850 * targets a different security state, so try again from the top.
851 */
852 qemu_log_mask(CPU_LOG_INT,
853 "...derived exception on callee-saves register stacking");
854 v7m_exception_taken(cpu, lr, true, true);
855 return;
856 }
857
858 if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
859 /* Vector load failed: derived exception */
860 qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
861 v7m_exception_taken(cpu, lr, true, true);
862 return;
863 }
864
865 /*
866 * Now we've done everything that might cause a derived exception
867 * we can go ahead and activate whichever exception we're going to
868 * take (which might now be the derived exception).
869 */
870 armv7m_nvic_acknowledge_irq(env->nvic);
871
872 /* Switch to target security state -- must do this before writing SPSEL */
873 switch_v7m_security_state(env, targets_secure);
874 write_v7m_control_spsel(env, 0);
875 arm_clear_exclusive(env);
876 /* Clear SFPA and FPCA (has no effect if no FPU) */
877 env->v7m.control[M_REG_S] &=
878 ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
879 /* Clear IT bits */
880 env->condexec_bits = 0;
881 env->regs[14] = lr;
882 env->regs[15] = addr & 0xfffffffe;
883 env->thumb = addr & 1;
884}
885
886static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
887 bool apply_splim)
888{
889 /*
890 * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
891 * that we will need later in order to do lazy FP reg stacking.
892 */
893 bool is_secure = env->v7m.secure;
894 void *nvic = env->nvic;
895 /*
896 * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
897 * are banked and we want to update the bit in the bank for the
898 * current security state; and in one case we want to specifically
899 * update the NS banked version of a bit even if we are secure.
900 */
901 uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
902 uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
903 uint32_t *fpccr = &env->v7m.fpccr[is_secure];
904 bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
905
906 env->v7m.fpcar[is_secure] = frameptr & ~0x7;
907
908 if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
909 bool splimviol;
910 uint32_t splim = v7m_sp_limit(env);
911 bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
912 (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
913
914 splimviol = !ign && frameptr < splim;
915 *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
916 }
917
918 *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
919
920 *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
921
922 *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
923
924 *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
925 !arm_v7m_is_handler_mode(env));
926
927 hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
928 *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
929
930 bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
931 *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
932
933 mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
934 *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
935
936 ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
937 *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
938
939 monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
940 *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
941
942 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
943 s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
944 *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
945
946 sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
947 *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
948 }
949}
950
951void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
952{
953 /* fptr is the value of Rn, the frame pointer we store the FP regs to */
954 bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
955 bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
2884fbb6 956 uintptr_t ra = GETPC();
7aab5a8c
PMD
957
958 assert(env->v7m.secure);
959
960 if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
961 return;
962 }
963
964 /* Check access to the coprocessor is permitted */
965 if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
966 raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
967 }
968
969 if (lspact) {
970 /* LSPACT should not be active when there is active FP state */
971 raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
972 }
973
974 if (fptr & 7) {
975 raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
976 }
977
978 /*
979 * Note that we do not use v7m_stack_write() here, because the
980 * accesses should not set the FSR bits for stacking errors if they
981 * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
2884fbb6 982 * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
7aab5a8c
PMD
983 * and longjmp out.
984 */
985 if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
986 bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
987 int i;
988
989 for (i = 0; i < (ts ? 32 : 16); i += 2) {
990 uint64_t dn = *aa32_vfp_dreg(env, i / 2);
991 uint32_t faddr = fptr + 4 * i;
992 uint32_t slo = extract64(dn, 0, 32);
993 uint32_t shi = extract64(dn, 32, 32);
994
995 if (i >= 16) {
996 faddr += 8; /* skip the slot for the FPSCR */
997 }
2884fbb6
PM
998 cpu_stl_data_ra(env, faddr, slo, ra);
999 cpu_stl_data_ra(env, faddr + 4, shi, ra);
7aab5a8c 1000 }
2884fbb6 1001 cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
7aab5a8c
PMD
1002
1003 /*
1004 * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
1005 * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1006 */
1007 if (ts) {
1008 for (i = 0; i < 32; i += 2) {
1009 *aa32_vfp_dreg(env, i / 2) = 0;
1010 }
1011 vfp_set_fpscr(env, 0);
1012 }
1013 } else {
1014 v7m_update_fpccr(env, fptr, false);
1015 }
1016
1017 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1018}
1019
1020void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1021{
2884fbb6
PM
1022 uintptr_t ra = GETPC();
1023
7aab5a8c
PMD
1024 /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1025 assert(env->v7m.secure);
1026
1027 if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1028 return;
1029 }
1030
1031 /* Check access to the coprocessor is permitted */
1032 if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1033 raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1034 }
1035
1036 if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1037 /* State in FP is still valid */
1038 env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1039 } else {
1040 bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1041 int i;
1042 uint32_t fpscr;
1043
1044 if (fptr & 7) {
1045 raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1046 }
1047
1048 for (i = 0; i < (ts ? 32 : 16); i += 2) {
1049 uint32_t slo, shi;
1050 uint64_t dn;
1051 uint32_t faddr = fptr + 4 * i;
1052
1053 if (i >= 16) {
1054 faddr += 8; /* skip the slot for the FPSCR */
1055 }
1056
2884fbb6
PM
1057 slo = cpu_ldl_data_ra(env, faddr, ra);
1058 shi = cpu_ldl_data_ra(env, faddr + 4, ra);
7aab5a8c
PMD
1059
1060 dn = (uint64_t) shi << 32 | slo;
1061 *aa32_vfp_dreg(env, i / 2) = dn;
1062 }
2884fbb6 1063 fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
7aab5a8c
PMD
1064 vfp_set_fpscr(env, fpscr);
1065 }
1066
1067 env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1068}
1069
1070static bool v7m_push_stack(ARMCPU *cpu)
1071{
1072 /*
1073 * Do the "set up stack frame" part of exception entry,
1074 * similar to pseudocode PushStack().
1075 * Return true if we generate a derived exception (and so
1076 * should ignore further stack faults trying to process
1077 * that derived exception.)
1078 */
1079 bool stacked_ok = true, limitviol = false;
1080 CPUARMState *env = &cpu->env;
1081 uint32_t xpsr = xpsr_read(env);
1082 uint32_t frameptr = env->regs[13];
1083 ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1084 uint32_t framesize;
1085 bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1086
1087 if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1088 (env->v7m.secure || nsacr_cp10)) {
1089 if (env->v7m.secure &&
1090 env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1091 framesize = 0xa8;
1092 } else {
1093 framesize = 0x68;
1094 }
1095 } else {
1096 framesize = 0x20;
1097 }
1098
1099 /* Align stack pointer if the guest wants that */
1100 if ((frameptr & 4) &&
1101 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1102 frameptr -= 4;
1103 xpsr |= XPSR_SPREALIGN;
1104 }
1105
1106 xpsr &= ~XPSR_SFPA;
1107 if (env->v7m.secure &&
1108 (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1109 xpsr |= XPSR_SFPA;
1110 }
1111
1112 frameptr -= framesize;
1113
1114 if (arm_feature(env, ARM_FEATURE_V8)) {
1115 uint32_t limit = v7m_sp_limit(env);
1116
1117 if (frameptr < limit) {
1118 /*
1119 * Stack limit failure: set SP to the limit value, and generate
1120 * STKOF UsageFault. Stack pushes below the limit must not be
1121 * performed. It is IMPDEF whether pushes above the limit are
1122 * performed; we choose not to.
1123 */
1124 qemu_log_mask(CPU_LOG_INT,
1125 "...STKOF during stacking\n");
1126 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1127 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1128 env->v7m.secure);
1129 env->regs[13] = limit;
1130 /*
1131 * We won't try to perform any further memory accesses but
1132 * we must continue through the following code to check for
1133 * permission faults during FPU state preservation, and we
1134 * must update FPCCR if lazy stacking is enabled.
1135 */
1136 limitviol = true;
1137 stacked_ok = false;
1138 }
1139 }
1140
1141 /*
1142 * Write as much of the stack frame as we can. If we fail a stack
1143 * write this will result in a derived exception being pended
1144 * (which may be taken in preference to the one we started with
1145 * if it has higher priority).
1146 */
1147 stacked_ok = stacked_ok &&
1148 v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1149 v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1150 mmu_idx, STACK_NORMAL) &&
1151 v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1152 mmu_idx, STACK_NORMAL) &&
1153 v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1154 mmu_idx, STACK_NORMAL) &&
1155 v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1156 mmu_idx, STACK_NORMAL) &&
1157 v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1158 mmu_idx, STACK_NORMAL) &&
1159 v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1160 mmu_idx, STACK_NORMAL) &&
1161 v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1162
1163 if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1164 /* FPU is active, try to save its registers */
1165 bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1166 bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1167
1168 if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1169 qemu_log_mask(CPU_LOG_INT,
1170 "...SecureFault because LSPACT and FPCA both set\n");
1171 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1172 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1173 } else if (!env->v7m.secure && !nsacr_cp10) {
1174 qemu_log_mask(CPU_LOG_INT,
1175 "...Secure UsageFault with CFSR.NOCP because "
1176 "NSACR.CP10 prevents stacking FP regs\n");
1177 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1178 env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1179 } else {
1180 if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1181 /* Lazy stacking disabled, save registers now */
1182 int i;
1183 bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1184 arm_current_el(env) != 0);
1185
1186 if (stacked_ok && !cpacr_pass) {
1187 /*
1188 * Take UsageFault if CPACR forbids access. The pseudocode
1189 * here does a full CheckCPEnabled() but we know the NSACR
1190 * check can never fail as we have already handled that.
1191 */
1192 qemu_log_mask(CPU_LOG_INT,
1193 "...UsageFault with CFSR.NOCP because "
1194 "CPACR.CP10 prevents stacking FP regs\n");
1195 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1196 env->v7m.secure);
1197 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1198 stacked_ok = false;
1199 }
1200
1201 for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1202 uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1203 uint32_t faddr = frameptr + 0x20 + 4 * i;
1204 uint32_t slo = extract64(dn, 0, 32);
1205 uint32_t shi = extract64(dn, 32, 32);
1206
1207 if (i >= 16) {
1208 faddr += 8; /* skip the slot for the FPSCR */
1209 }
1210 stacked_ok = stacked_ok &&
1211 v7m_stack_write(cpu, faddr, slo,
1212 mmu_idx, STACK_NORMAL) &&
1213 v7m_stack_write(cpu, faddr + 4, shi,
1214 mmu_idx, STACK_NORMAL);
1215 }
1216 stacked_ok = stacked_ok &&
1217 v7m_stack_write(cpu, frameptr + 0x60,
1218 vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1219 if (cpacr_pass) {
1220 for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1221 *aa32_vfp_dreg(env, i / 2) = 0;
1222 }
1223 vfp_set_fpscr(env, 0);
1224 }
1225 } else {
1226 /* Lazy stacking enabled, save necessary info to stack later */
1227 v7m_update_fpccr(env, frameptr + 0x20, true);
1228 }
1229 }
1230 }
1231
1232 /*
1233 * If we broke a stack limit then SP was already updated earlier;
1234 * otherwise we update SP regardless of whether any of the stack
1235 * accesses failed or we took some other kind of fault.
1236 */
1237 if (!limitviol) {
1238 env->regs[13] = frameptr;
1239 }
1240
1241 return !stacked_ok;
1242}
1243
1244static void do_v7m_exception_exit(ARMCPU *cpu)
1245{
1246 CPUARMState *env = &cpu->env;
1247 uint32_t excret;
1248 uint32_t xpsr, xpsr_mask;
1249 bool ufault = false;
1250 bool sfault = false;
1251 bool return_to_sp_process;
1252 bool return_to_handler;
1253 bool rettobase = false;
1254 bool exc_secure = false;
1255 bool return_to_secure;
1256 bool ftype;
1257 bool restore_s16_s31;
1258
1259 /*
1260 * If we're not in Handler mode then jumps to magic exception-exit
1261 * addresses don't have magic behaviour. However for the v8M
1262 * security extensions the magic secure-function-return has to
1263 * work in thread mode too, so to avoid doing an extra check in
1264 * the generated code we allow exception-exit magic to also cause the
1265 * internal exception and bring us here in thread mode. Correct code
1266 * will never try to do this (the following insn fetch will always
1267 * fault) so we the overhead of having taken an unnecessary exception
1268 * doesn't matter.
1269 */
1270 if (!arm_v7m_is_handler_mode(env)) {
1271 return;
1272 }
1273
1274 /*
1275 * In the spec pseudocode ExceptionReturn() is called directly
1276 * from BXWritePC() and gets the full target PC value including
1277 * bit zero. In QEMU's implementation we treat it as a normal
1278 * jump-to-register (which is then caught later on), and so split
1279 * the target value up between env->regs[15] and env->thumb in
1280 * gen_bx(). Reconstitute it.
1281 */
1282 excret = env->regs[15];
1283 if (env->thumb) {
1284 excret |= 1;
1285 }
1286
1287 qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1288 " previous exception %d\n",
1289 excret, env->v7m.exception);
1290
1291 if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1292 qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1293 "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1294 excret);
1295 }
1296
1297 ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1298
1299 if (!arm_feature(env, ARM_FEATURE_VFP) && !ftype) {
1300 qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1301 "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1302 "if FPU not present\n",
1303 excret);
1304 ftype = true;
1305 }
1306
1307 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1308 /*
1309 * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1310 * we pick which FAULTMASK to clear.
1311 */
1312 if (!env->v7m.secure &&
1313 ((excret & R_V7M_EXCRET_ES_MASK) ||
1314 !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1315 sfault = 1;
1316 /* For all other purposes, treat ES as 0 (R_HXSR) */
1317 excret &= ~R_V7M_EXCRET_ES_MASK;
1318 }
1319 exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1320 }
1321
1322 if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1323 /*
1324 * Auto-clear FAULTMASK on return from other than NMI.
1325 * If the security extension is implemented then this only
1326 * happens if the raw execution priority is >= 0; the
1327 * value of the ES bit in the exception return value indicates
1328 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1329 */
1330 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1331 if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1332 env->v7m.faultmask[exc_secure] = 0;
1333 }
1334 } else {
1335 env->v7m.faultmask[M_REG_NS] = 0;
1336 }
1337 }
1338
1339 switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1340 exc_secure)) {
1341 case -1:
1342 /* attempt to exit an exception that isn't active */
1343 ufault = true;
1344 break;
1345 case 0:
1346 /* still an irq active now */
1347 break;
1348 case 1:
1349 /*
1350 * We returned to base exception level, no nesting.
1351 * (In the pseudocode this is written using "NestedActivation != 1"
1352 * where we have 'rettobase == false'.)
1353 */
1354 rettobase = true;
1355 break;
1356 default:
1357 g_assert_not_reached();
1358 }
1359
1360 return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1361 return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1362 return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1363 (excret & R_V7M_EXCRET_S_MASK);
1364
1365 if (arm_feature(env, ARM_FEATURE_V8)) {
1366 if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1367 /*
1368 * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1369 * we choose to take the UsageFault.
1370 */
1371 if ((excret & R_V7M_EXCRET_S_MASK) ||
1372 (excret & R_V7M_EXCRET_ES_MASK) ||
1373 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1374 ufault = true;
1375 }
1376 }
1377 if (excret & R_V7M_EXCRET_RES0_MASK) {
1378 ufault = true;
1379 }
1380 } else {
1381 /* For v7M we only recognize certain combinations of the low bits */
1382 switch (excret & 0xf) {
1383 case 1: /* Return to Handler */
1384 break;
1385 case 13: /* Return to Thread using Process stack */
1386 case 9: /* Return to Thread using Main stack */
1387 /*
1388 * We only need to check NONBASETHRDENA for v7M, because in
1389 * v8M this bit does not exist (it is RES1).
1390 */
1391 if (!rettobase &&
1392 !(env->v7m.ccr[env->v7m.secure] &
1393 R_V7M_CCR_NONBASETHRDENA_MASK)) {
1394 ufault = true;
1395 }
1396 break;
1397 default:
1398 ufault = true;
1399 }
1400 }
1401
1402 /*
1403 * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1404 * Handler mode (and will be until we write the new XPSR.Interrupt
1405 * field) this does not switch around the current stack pointer.
1406 * We must do this before we do any kind of tailchaining, including
1407 * for the derived exceptions on integrity check failures, or we will
1408 * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1409 */
1410 write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1411
1412 /*
1413 * Clear scratch FP values left in caller saved registers; this
1414 * must happen before any kind of tail chaining.
1415 */
1416 if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1417 (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1418 if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1419 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1420 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1421 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1422 "stackframe: error during lazy state deactivation\n");
1423 v7m_exception_taken(cpu, excret, true, false);
1424 return;
1425 } else {
1426 /* Clear s0..s15 and FPSCR */
1427 int i;
1428
1429 for (i = 0; i < 16; i += 2) {
1430 *aa32_vfp_dreg(env, i / 2) = 0;
1431 }
1432 vfp_set_fpscr(env, 0);
1433 }
1434 }
1435
1436 if (sfault) {
1437 env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1438 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1439 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1440 "stackframe: failed EXC_RETURN.ES validity check\n");
1441 v7m_exception_taken(cpu, excret, true, false);
1442 return;
1443 }
1444
1445 if (ufault) {
1446 /*
1447 * Bad exception return: instead of popping the exception
1448 * stack, directly take a usage fault on the current stack.
1449 */
1450 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1451 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1452 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1453 "stackframe: failed exception return integrity check\n");
1454 v7m_exception_taken(cpu, excret, true, false);
1455 return;
1456 }
1457
1458 /*
1459 * Tailchaining: if there is currently a pending exception that
1460 * is high enough priority to preempt execution at the level we're
1461 * about to return to, then just directly take that exception now,
1462 * avoiding an unstack-and-then-stack. Note that now we have
1463 * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1464 * our current execution priority is already the execution priority we are
1465 * returning to -- none of the state we would unstack or set based on
1466 * the EXCRET value affects it.
1467 */
1468 if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1469 qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1470 v7m_exception_taken(cpu, excret, true, false);
1471 return;
1472 }
1473
1474 switch_v7m_security_state(env, return_to_secure);
1475
1476 {
1477 /*
1478 * The stack pointer we should be reading the exception frame from
1479 * depends on bits in the magic exception return type value (and
1480 * for v8M isn't necessarily the stack pointer we will eventually
1481 * end up resuming execution with). Get a pointer to the location
1482 * in the CPU state struct where the SP we need is currently being
1483 * stored; we will use and modify it in place.
1484 * We use this limited C variable scope so we don't accidentally
1485 * use 'frame_sp_p' after we do something that makes it invalid.
1486 */
1487 uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
1488 return_to_secure,
1489 !return_to_handler,
1490 return_to_sp_process);
1491 uint32_t frameptr = *frame_sp_p;
1492 bool pop_ok = true;
1493 ARMMMUIdx mmu_idx;
1494 bool return_to_priv = return_to_handler ||
1495 !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
1496
1497 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
1498 return_to_priv);
1499
1500 if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1501 arm_feature(env, ARM_FEATURE_V8)) {
1502 qemu_log_mask(LOG_GUEST_ERROR,
1503 "M profile exception return with non-8-aligned SP "
1504 "for destination state is UNPREDICTABLE\n");
1505 }
1506
1507 /* Do we need to pop callee-saved registers? */
1508 if (return_to_secure &&
1509 ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1510 (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1511 uint32_t actual_sig;
1512
1513 pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1514
1515 if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1516 /* Take a SecureFault on the current stack */
1517 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1518 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1519 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1520 "stackframe: failed exception return integrity "
1521 "signature check\n");
1522 v7m_exception_taken(cpu, excret, true, false);
1523 return;
1524 }
1525
1526 pop_ok = pop_ok &&
1527 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1528 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1529 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1530 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1531 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1532 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1533 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1534 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1535
1536 frameptr += 0x28;
1537 }
1538
1539 /* Pop registers */
1540 pop_ok = pop_ok &&
1541 v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1542 v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1543 v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1544 v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1545 v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1546 v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1547 v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1548 v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1549
1550 if (!pop_ok) {
1551 /*
1552 * v7m_stack_read() pended a fault, so take it (as a tail
1553 * chained exception on the same stack frame)
1554 */
1555 qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1556 v7m_exception_taken(cpu, excret, true, false);
1557 return;
1558 }
1559
1560 /*
1561 * Returning from an exception with a PC with bit 0 set is defined
1562 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1563 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1564 * the lsbit, and there are several RTOSes out there which incorrectly
1565 * assume the r15 in the stack frame should be a Thumb-style "lsbit
1566 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1567 * complain about the badly behaved guest.
1568 */
1569 if (env->regs[15] & 1) {
1570 env->regs[15] &= ~1U;
1571 if (!arm_feature(env, ARM_FEATURE_V8)) {
1572 qemu_log_mask(LOG_GUEST_ERROR,
1573 "M profile return from interrupt with misaligned "
1574 "PC is UNPREDICTABLE on v7M\n");
1575 }
1576 }
1577
1578 if (arm_feature(env, ARM_FEATURE_V8)) {
1579 /*
1580 * For v8M we have to check whether the xPSR exception field
1581 * matches the EXCRET value for return to handler/thread
1582 * before we commit to changing the SP and xPSR.
1583 */
1584 bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1585 if (return_to_handler != will_be_handler) {
1586 /*
1587 * Take an INVPC UsageFault on the current stack.
1588 * By this point we will have switched to the security state
1589 * for the background state, so this UsageFault will target
1590 * that state.
1591 */
1592 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1593 env->v7m.secure);
1594 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1595 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1596 "stackframe: failed exception return integrity "
1597 "check\n");
1598 v7m_exception_taken(cpu, excret, true, false);
1599 return;
1600 }
1601 }
1602
1603 if (!ftype) {
1604 /* FP present and we need to handle it */
1605 if (!return_to_secure &&
1606 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1607 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1608 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1609 qemu_log_mask(CPU_LOG_INT,
1610 "...taking SecureFault on existing stackframe: "
1611 "Secure LSPACT set but exception return is "
1612 "not to secure state\n");
1613 v7m_exception_taken(cpu, excret, true, false);
1614 return;
1615 }
1616
1617 restore_s16_s31 = return_to_secure &&
1618 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1619
1620 if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1621 /* State in FPU is still valid, just clear LSPACT */
1622 env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1623 } else {
1624 int i;
1625 uint32_t fpscr;
1626 bool cpacr_pass, nsacr_pass;
1627
1628 cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1629 return_to_priv);
1630 nsacr_pass = return_to_secure ||
1631 extract32(env->v7m.nsacr, 10, 1);
1632
1633 if (!cpacr_pass) {
1634 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1635 return_to_secure);
1636 env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1637 qemu_log_mask(CPU_LOG_INT,
1638 "...taking UsageFault on existing "
1639 "stackframe: CPACR.CP10 prevents unstacking "
1640 "FP regs\n");
1641 v7m_exception_taken(cpu, excret, true, false);
1642 return;
1643 } else if (!nsacr_pass) {
1644 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1645 env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1646 qemu_log_mask(CPU_LOG_INT,
1647 "...taking Secure UsageFault on existing "
1648 "stackframe: NSACR.CP10 prevents unstacking "
1649 "FP regs\n");
1650 v7m_exception_taken(cpu, excret, true, false);
1651 return;
1652 }
1653
1654 for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1655 uint32_t slo, shi;
1656 uint64_t dn;
1657 uint32_t faddr = frameptr + 0x20 + 4 * i;
1658
1659 if (i >= 16) {
1660 faddr += 8; /* Skip the slot for the FPSCR */
1661 }
1662
1663 pop_ok = pop_ok &&
1664 v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1665 v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1666
1667 if (!pop_ok) {
1668 break;
1669 }
1670
1671 dn = (uint64_t)shi << 32 | slo;
1672 *aa32_vfp_dreg(env, i / 2) = dn;
1673 }
1674 pop_ok = pop_ok &&
1675 v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1676 if (pop_ok) {
1677 vfp_set_fpscr(env, fpscr);
1678 }
1679 if (!pop_ok) {
1680 /*
1681 * These regs are 0 if security extension present;
1682 * otherwise merely UNKNOWN. We zero always.
1683 */
1684 for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1685 *aa32_vfp_dreg(env, i / 2) = 0;
1686 }
1687 vfp_set_fpscr(env, 0);
1688 }
1689 }
1690 }
1691 env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1692 V7M_CONTROL, FPCA, !ftype);
1693
1694 /* Commit to consuming the stack frame */
1695 frameptr += 0x20;
1696 if (!ftype) {
1697 frameptr += 0x48;
1698 if (restore_s16_s31) {
1699 frameptr += 0x40;
1700 }
1701 }
1702 /*
1703 * Undo stack alignment (the SPREALIGN bit indicates that the original
1704 * pre-exception SP was not 8-aligned and we added a padding word to
1705 * align it, so we undo this by ORing in the bit that increases it
1706 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1707 * would work too but a logical OR is how the pseudocode specifies it.)
1708 */
1709 if (xpsr & XPSR_SPREALIGN) {
1710 frameptr |= 4;
1711 }
1712 *frame_sp_p = frameptr;
1713 }
1714
1715 xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1716 if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1717 xpsr_mask &= ~XPSR_GE;
1718 }
1719 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1720 xpsr_write(env, xpsr, xpsr_mask);
1721
1722 if (env->v7m.secure) {
1723 bool sfpa = xpsr & XPSR_SFPA;
1724
1725 env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1726 V7M_CONTROL, SFPA, sfpa);
1727 }
1728
1729 /*
1730 * The restored xPSR exception field will be zero if we're
1731 * resuming in Thread mode. If that doesn't match what the
1732 * exception return excret specified then this is a UsageFault.
1733 * v7M requires we make this check here; v8M did it earlier.
1734 */
1735 if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1736 /*
1737 * Take an INVPC UsageFault by pushing the stack again;
1738 * we know we're v7M so this is never a Secure UsageFault.
1739 */
1740 bool ignore_stackfaults;
1741
1742 assert(!arm_feature(env, ARM_FEATURE_V8));
1743 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1744 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1745 ignore_stackfaults = v7m_push_stack(cpu);
1746 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1747 "failed exception return integrity check\n");
1748 v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1749 return;
1750 }
1751
1752 /* Otherwise, we have a successful exception exit. */
1753 arm_clear_exclusive(env);
1754 qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1755}
1756
1757static bool do_v7m_function_return(ARMCPU *cpu)
1758{
1759 /*
1760 * v8M security extensions magic function return.
1761 * We may either:
1762 * (1) throw an exception (longjump)
1763 * (2) return true if we successfully handled the function return
1764 * (3) return false if we failed a consistency check and have
1765 * pended a UsageFault that needs to be taken now
1766 *
1767 * At this point the magic return value is split between env->regs[15]
1768 * and env->thumb. We don't bother to reconstitute it because we don't
1769 * need it (all values are handled the same way).
1770 */
1771 CPUARMState *env = &cpu->env;
1772 uint32_t newpc, newpsr, newpsr_exc;
1773
1774 qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1775
1776 {
1777 bool threadmode, spsel;
1778 TCGMemOpIdx oi;
1779 ARMMMUIdx mmu_idx;
1780 uint32_t *frame_sp_p;
1781 uint32_t frameptr;
1782
1783 /* Pull the return address and IPSR from the Secure stack */
1784 threadmode = !arm_v7m_is_handler_mode(env);
1785 spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1786
1787 frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
1788 frameptr = *frame_sp_p;
1789
1790 /*
1791 * These loads may throw an exception (for MPU faults). We want to
1792 * do them as secure, so work out what MMU index that is.
1793 */
1794 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1795 oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
1796 newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
1797 newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
1798
1799 /* Consistency checks on new IPSR */
1800 newpsr_exc = newpsr & XPSR_EXCP;
1801 if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1802 (env->v7m.exception == 1 && newpsr_exc != 0))) {
1803 /* Pend the fault and tell our caller to take it */
1804 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1805 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1806 env->v7m.secure);
1807 qemu_log_mask(CPU_LOG_INT,
1808 "...taking INVPC UsageFault: "
1809 "IPSR consistency check failed\n");
1810 return false;
1811 }
1812
1813 *frame_sp_p = frameptr + 8;
1814 }
1815
1816 /* This invalidates frame_sp_p */
1817 switch_v7m_security_state(env, true);
1818 env->v7m.exception = newpsr_exc;
1819 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1820 if (newpsr & XPSR_SFPA) {
1821 env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
1822 }
1823 xpsr_write(env, 0, XPSR_IT);
1824 env->thumb = newpc & 1;
1825 env->regs[15] = newpc & ~1;
1826
1827 qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
1828 return true;
1829}
1830
1831static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1832 uint32_t addr, uint16_t *insn)
1833{
1834 /*
1835 * Load a 16-bit portion of a v7M instruction, returning true on success,
1836 * or false on failure (in which case we will have pended the appropriate
1837 * exception).
1838 * We need to do the instruction fetch's MPU and SAU checks
1839 * like this because there is no MMU index that would allow
1840 * doing the load with a single function call. Instead we must
1841 * first check that the security attributes permit the load
1842 * and that they don't mismatch on the two halves of the instruction,
1843 * and then we do the load as a secure load (ie using the security
1844 * attributes of the address, not the CPU, as architecturally required).
1845 */
1846 CPUState *cs = CPU(cpu);
1847 CPUARMState *env = &cpu->env;
1848 V8M_SAttributes sattrs = {};
1849 MemTxAttrs attrs = {};
1850 ARMMMUFaultInfo fi = {};
1851 MemTxResult txres;
1852 target_ulong page_size;
1853 hwaddr physaddr;
1854 int prot;
1855
1856 v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
1857 if (!sattrs.nsc || sattrs.ns) {
1858 /*
1859 * This must be the second half of the insn, and it straddles a
1860 * region boundary with the second half not being S&NSC.
1861 */
1862 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
1863 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1864 qemu_log_mask(CPU_LOG_INT,
1865 "...really SecureFault with SFSR.INVEP\n");
1866 return false;
1867 }
1868 if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
1869 &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
1870 /* the MPU lookup failed */
1871 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
1872 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
1873 qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
1874 return false;
1875 }
1876 *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
1877 attrs, &txres);
1878 if (txres != MEMTX_OK) {
1879 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
1880 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
1881 qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
1882 return false;
1883 }
1884 return true;
1885}
1886
1887static bool v7m_handle_execute_nsc(ARMCPU *cpu)
1888{
1889 /*
1890 * Check whether this attempt to execute code in a Secure & NS-Callable
1891 * memory region is for an SG instruction; if so, then emulate the
1892 * effect of the SG instruction and return true. Otherwise pend
1893 * the correct kind of exception and return false.
1894 */
1895 CPUARMState *env = &cpu->env;
1896 ARMMMUIdx mmu_idx;
1897 uint16_t insn;
1898
1899 /*
1900 * We should never get here unless get_phys_addr_pmsav8() caused
1901 * an exception for NS executing in S&NSC memory.
1902 */
1903 assert(!env->v7m.secure);
1904 assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
1905
1906 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
1907 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1908
1909 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
1910 return false;
1911 }
1912
1913 if (!env->thumb) {
1914 goto gen_invep;
1915 }
1916
1917 if (insn != 0xe97f) {
1918 /*
1919 * Not an SG instruction first half (we choose the IMPDEF
1920 * early-SG-check option).
1921 */
1922 goto gen_invep;
1923 }
1924
1925 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
1926 return false;
1927 }
1928
1929 if (insn != 0xe97f) {
1930 /*
1931 * Not an SG instruction second half (yes, both halves of the SG
1932 * insn have the same hex value)
1933 */
1934 goto gen_invep;
1935 }
1936
1937 /*
1938 * OK, we have confirmed that we really have an SG instruction.
1939 * We know we're NS in S memory so don't need to repeat those checks.
1940 */
1941 qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
1942 ", executing it\n", env->regs[15]);
1943 env->regs[14] &= ~1;
1944 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1945 switch_v7m_security_state(env, true);
1946 xpsr_write(env, 0, XPSR_IT);
1947 env->regs[15] += 4;
1948 return true;
1949
1950gen_invep:
1951 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
1952 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1953 qemu_log_mask(CPU_LOG_INT,
1954 "...really SecureFault with SFSR.INVEP\n");
1955 return false;
1956}
1957
1958void arm_v7m_cpu_do_interrupt(CPUState *cs)
1959{
1960 ARMCPU *cpu = ARM_CPU(cs);
1961 CPUARMState *env = &cpu->env;
1962 uint32_t lr;
1963 bool ignore_stackfaults;
1964
1965 arm_log_exception(cs->exception_index);
1966
1967 /*
1968 * For exceptions we just mark as pending on the NVIC, and let that
1969 * handle it.
1970 */
1971 switch (cs->exception_index) {
1972 case EXCP_UDEF:
1973 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1974 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
1975 break;
1976 case EXCP_NOCP:
1977 {
1978 /*
1979 * NOCP might be directed to something other than the current
1980 * security state if this fault is because of NSACR; we indicate
1981 * the target security state using exception.target_el.
1982 */
1983 int target_secstate;
1984
1985 if (env->exception.target_el == 3) {
1986 target_secstate = M_REG_S;
1987 } else {
1988 target_secstate = env->v7m.secure;
1989 }
1990 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
1991 env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
1992 break;
1993 }
1994 case EXCP_INVSTATE:
1995 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1996 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
1997 break;
1998 case EXCP_STKOF:
1999 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2000 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2001 break;
2002 case EXCP_LSERR:
2003 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2004 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2005 break;
2006 case EXCP_UNALIGNED:
2007 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2008 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2009 break;
2010 case EXCP_SWI:
2011 /* The PC already points to the next instruction. */
2012 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2013 break;
2014 case EXCP_PREFETCH_ABORT:
2015 case EXCP_DATA_ABORT:
2016 /*
2017 * Note that for M profile we don't have a guest facing FSR, but
2018 * the env->exception.fsr will be populated by the code that
2019 * raises the fault, in the A profile short-descriptor format.
2020 */
2021 switch (env->exception.fsr & 0xf) {
2022 case M_FAKE_FSR_NSC_EXEC:
2023 /*
2024 * Exception generated when we try to execute code at an address
2025 * which is marked as Secure & Non-Secure Callable and the CPU
2026 * is in the Non-Secure state. The only instruction which can
2027 * be executed like this is SG (and that only if both halves of
2028 * the SG instruction have the same security attributes.)
2029 * Everything else must generate an INVEP SecureFault, so we
2030 * emulate the SG instruction here.
2031 */
2032 if (v7m_handle_execute_nsc(cpu)) {
2033 return;
2034 }
2035 break;
2036 case M_FAKE_FSR_SFAULT:
2037 /*
2038 * Various flavours of SecureFault for attempts to execute or
2039 * access data in the wrong security state.
2040 */
2041 switch (cs->exception_index) {
2042 case EXCP_PREFETCH_ABORT:
2043 if (env->v7m.secure) {
2044 env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2045 qemu_log_mask(CPU_LOG_INT,
2046 "...really SecureFault with SFSR.INVTRAN\n");
2047 } else {
2048 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2049 qemu_log_mask(CPU_LOG_INT,
2050 "...really SecureFault with SFSR.INVEP\n");
2051 }
2052 break;
2053 case EXCP_DATA_ABORT:
2054 /* This must be an NS access to S memory */
2055 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2056 qemu_log_mask(CPU_LOG_INT,
2057 "...really SecureFault with SFSR.AUVIOL\n");
2058 break;
2059 }
2060 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2061 break;
2062 case 0x8: /* External Abort */
2063 switch (cs->exception_index) {
2064 case EXCP_PREFETCH_ABORT:
2065 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2066 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2067 break;
2068 case EXCP_DATA_ABORT:
2069 env->v7m.cfsr[M_REG_NS] |=
2070 (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2071 env->v7m.bfar = env->exception.vaddress;
2072 qemu_log_mask(CPU_LOG_INT,
2073 "...with CFSR.PRECISERR and BFAR 0x%x\n",
2074 env->v7m.bfar);
2075 break;
2076 }
2077 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2078 break;
2079 default:
2080 /*
2081 * All other FSR values are either MPU faults or "can't happen
2082 * for M profile" cases.
2083 */
2084 switch (cs->exception_index) {
2085 case EXCP_PREFETCH_ABORT:
2086 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2087 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2088 break;
2089 case EXCP_DATA_ABORT:
2090 env->v7m.cfsr[env->v7m.secure] |=
2091 (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2092 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2093 qemu_log_mask(CPU_LOG_INT,
2094 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2095 env->v7m.mmfar[env->v7m.secure]);
2096 break;
2097 }
2098 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2099 env->v7m.secure);
2100 break;
2101 }
2102 break;
2103 case EXCP_BKPT:
2104 if (semihosting_enabled()) {
2105 int nr;
2106 nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
2107 if (nr == 0xab) {
2108 env->regs[15] += 2;
2109 qemu_log_mask(CPU_LOG_INT,
2110 "...handling as semihosting call 0x%x\n",
2111 env->regs[0]);
2112 env->regs[0] = do_arm_semihosting(env);
2113 return;
2114 }
2115 }
2116 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2117 break;
2118 case EXCP_IRQ:
2119 break;
2120 case EXCP_EXCEPTION_EXIT:
2121 if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2122 /* Must be v8M security extension function return */
2123 assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2124 assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2125 if (do_v7m_function_return(cpu)) {
2126 return;
2127 }
2128 } else {
2129 do_v7m_exception_exit(cpu);
2130 return;
2131 }
2132 break;
2133 case EXCP_LAZYFP:
2134 /*
2135 * We already pended the specific exception in the NVIC in the
2136 * v7m_preserve_fp_state() helper function.
2137 */
2138 break;
2139 default:
2140 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2141 return; /* Never happens. Keep compiler happy. */
2142 }
2143
2144 if (arm_feature(env, ARM_FEATURE_V8)) {
2145 lr = R_V7M_EXCRET_RES1_MASK |
2146 R_V7M_EXCRET_DCRS_MASK;
2147 /*
2148 * The S bit indicates whether we should return to Secure
2149 * or NonSecure (ie our current state).
2150 * The ES bit indicates whether we're taking this exception
2151 * to Secure or NonSecure (ie our target state). We set it
2152 * later, in v7m_exception_taken().
2153 * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2154 * This corresponds to the ARM ARM pseudocode for v8M setting
2155 * some LR bits in PushStack() and some in ExceptionTaken();
2156 * the distinction matters for the tailchain cases where we
2157 * can take an exception without pushing the stack.
2158 */
2159 if (env->v7m.secure) {
2160 lr |= R_V7M_EXCRET_S_MASK;
2161 }
2162 if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2163 lr |= R_V7M_EXCRET_FTYPE_MASK;
2164 }
2165 } else {
2166 lr = R_V7M_EXCRET_RES1_MASK |
2167 R_V7M_EXCRET_S_MASK |
2168 R_V7M_EXCRET_DCRS_MASK |
2169 R_V7M_EXCRET_FTYPE_MASK |
2170 R_V7M_EXCRET_ES_MASK;
2171 if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2172 lr |= R_V7M_EXCRET_SPSEL_MASK;
2173 }
2174 }
2175 if (!arm_v7m_is_handler_mode(env)) {
2176 lr |= R_V7M_EXCRET_MODE_MASK;
2177 }
2178
2179 ignore_stackfaults = v7m_push_stack(cpu);
2180 v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2181}
2182
2183uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2184{
2185 uint32_t mask;
2186 unsigned el = arm_current_el(env);
2187
2188 /* First handle registers which unprivileged can read */
2189
2190 switch (reg) {
2191 case 0 ... 7: /* xPSR sub-fields */
2192 mask = 0;
2193 if ((reg & 1) && el) {
2194 mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
2195 }
2196 if (!(reg & 4)) {
2197 mask |= XPSR_NZCV | XPSR_Q; /* APSR */
2198 if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
2199 mask |= XPSR_GE;
2200 }
2201 }
2202 /* EPSR reads as zero */
2203 return xpsr_read(env) & mask;
2204 break;
2205 case 20: /* CONTROL */
2206 {
2207 uint32_t value = env->v7m.control[env->v7m.secure];
2208 if (!env->v7m.secure) {
2209 /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
2210 value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
2211 }
2212 return value;
2213 }
2214 case 0x94: /* CONTROL_NS */
2215 /*
2216 * We have to handle this here because unprivileged Secure code
2217 * can read the NS CONTROL register.
2218 */
2219 if (!env->v7m.secure) {
2220 return 0;
2221 }
2222 return env->v7m.control[M_REG_NS] |
2223 (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2224 }
2225
2226 if (el == 0) {
2227 return 0; /* unprivileged reads others as zero */
2228 }
2229
2230 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2231 switch (reg) {
2232 case 0x88: /* MSP_NS */
2233 if (!env->v7m.secure) {
2234 return 0;
2235 }
2236 return env->v7m.other_ss_msp;
2237 case 0x89: /* PSP_NS */
2238 if (!env->v7m.secure) {
2239 return 0;
2240 }
2241 return env->v7m.other_ss_psp;
2242 case 0x8a: /* MSPLIM_NS */
2243 if (!env->v7m.secure) {
2244 return 0;
2245 }
2246 return env->v7m.msplim[M_REG_NS];
2247 case 0x8b: /* PSPLIM_NS */
2248 if (!env->v7m.secure) {
2249 return 0;
2250 }
2251 return env->v7m.psplim[M_REG_NS];
2252 case 0x90: /* PRIMASK_NS */
2253 if (!env->v7m.secure) {
2254 return 0;
2255 }
2256 return env->v7m.primask[M_REG_NS];
2257 case 0x91: /* BASEPRI_NS */
2258 if (!env->v7m.secure) {
2259 return 0;
2260 }
2261 return env->v7m.basepri[M_REG_NS];
2262 case 0x93: /* FAULTMASK_NS */
2263 if (!env->v7m.secure) {
2264 return 0;
2265 }
2266 return env->v7m.faultmask[M_REG_NS];
2267 case 0x98: /* SP_NS */
2268 {
2269 /*
2270 * This gives the non-secure SP selected based on whether we're
2271 * currently in handler mode or not, using the NS CONTROL.SPSEL.
2272 */
2273 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2274
2275 if (!env->v7m.secure) {
2276 return 0;
2277 }
2278 if (!arm_v7m_is_handler_mode(env) && spsel) {
2279 return env->v7m.other_ss_psp;
2280 } else {
2281 return env->v7m.other_ss_msp;
2282 }
2283 }
2284 default:
2285 break;
2286 }
2287 }
2288
2289 switch (reg) {
2290 case 8: /* MSP */
2291 return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2292 case 9: /* PSP */
2293 return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2294 case 10: /* MSPLIM */
2295 if (!arm_feature(env, ARM_FEATURE_V8)) {
2296 goto bad_reg;
2297 }
2298 return env->v7m.msplim[env->v7m.secure];
2299 case 11: /* PSPLIM */
2300 if (!arm_feature(env, ARM_FEATURE_V8)) {
2301 goto bad_reg;
2302 }
2303 return env->v7m.psplim[env->v7m.secure];
2304 case 16: /* PRIMASK */
2305 return env->v7m.primask[env->v7m.secure];
2306 case 17: /* BASEPRI */
2307 case 18: /* BASEPRI_MAX */
2308 return env->v7m.basepri[env->v7m.secure];
2309 case 19: /* FAULTMASK */
2310 return env->v7m.faultmask[env->v7m.secure];
2311 default:
2312 bad_reg:
2313 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2314 " register %d\n", reg);
2315 return 0;
2316 }
2317}
2318
2319void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2320{
2321 /*
2322 * We're passed bits [11..0] of the instruction; extract
2323 * SYSm and the mask bits.
2324 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2325 * we choose to treat them as if the mask bits were valid.
2326 * NB that the pseudocode 'mask' variable is bits [11..10],
2327 * whereas ours is [11..8].
2328 */
2329 uint32_t mask = extract32(maskreg, 8, 4);
2330 uint32_t reg = extract32(maskreg, 0, 8);
2331 int cur_el = arm_current_el(env);
2332
2333 if (cur_el == 0 && reg > 7 && reg != 20) {
2334 /*
2335 * only xPSR sub-fields and CONTROL.SFPA may be written by
2336 * unprivileged code
2337 */
2338 return;
2339 }
2340
2341 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2342 switch (reg) {
2343 case 0x88: /* MSP_NS */
2344 if (!env->v7m.secure) {
2345 return;
2346 }
2347 env->v7m.other_ss_msp = val;
2348 return;
2349 case 0x89: /* PSP_NS */
2350 if (!env->v7m.secure) {
2351 return;
2352 }
2353 env->v7m.other_ss_psp = val;
2354 return;
2355 case 0x8a: /* MSPLIM_NS */
2356 if (!env->v7m.secure) {
2357 return;
2358 }
2359 env->v7m.msplim[M_REG_NS] = val & ~7;
2360 return;
2361 case 0x8b: /* PSPLIM_NS */
2362 if (!env->v7m.secure) {
2363 return;
2364 }
2365 env->v7m.psplim[M_REG_NS] = val & ~7;
2366 return;
2367 case 0x90: /* PRIMASK_NS */
2368 if (!env->v7m.secure) {
2369 return;
2370 }
2371 env->v7m.primask[M_REG_NS] = val & 1;
2372 return;
2373 case 0x91: /* BASEPRI_NS */
2374 if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2375 return;
2376 }
2377 env->v7m.basepri[M_REG_NS] = val & 0xff;
2378 return;
2379 case 0x93: /* FAULTMASK_NS */
2380 if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2381 return;
2382 }
2383 env->v7m.faultmask[M_REG_NS] = val & 1;
2384 return;
2385 case 0x94: /* CONTROL_NS */
2386 if (!env->v7m.secure) {
2387 return;
2388 }
2389 write_v7m_control_spsel_for_secstate(env,
2390 val & R_V7M_CONTROL_SPSEL_MASK,
2391 M_REG_NS);
2392 if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2393 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2394 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2395 }
2396 /*
2397 * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2398 * RES0 if the FPU is not present, and is stored in the S bank
2399 */
2400 if (arm_feature(env, ARM_FEATURE_VFP) &&
2401 extract32(env->v7m.nsacr, 10, 1)) {
2402 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2403 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2404 }
2405 return;
2406 case 0x98: /* SP_NS */
2407 {
2408 /*
2409 * This gives the non-secure SP selected based on whether we're
2410 * currently in handler mode or not, using the NS CONTROL.SPSEL.
2411 */
2412 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2413 bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2414 uint32_t limit;
2415
2416 if (!env->v7m.secure) {
2417 return;
2418 }
2419
2420 limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2421
2422 if (val < limit) {
2423 CPUState *cs = env_cpu(env);
2424
2425 cpu_restore_state(cs, GETPC(), true);
2426 raise_exception(env, EXCP_STKOF, 0, 1);
2427 }
2428
2429 if (is_psp) {
2430 env->v7m.other_ss_psp = val;
2431 } else {
2432 env->v7m.other_ss_msp = val;
2433 }
2434 return;
2435 }
2436 default:
2437 break;
2438 }
2439 }
2440
2441 switch (reg) {
2442 case 0 ... 7: /* xPSR sub-fields */
2443 /* only APSR is actually writable */
2444 if (!(reg & 4)) {
2445 uint32_t apsrmask = 0;
2446
2447 if (mask & 8) {
2448 apsrmask |= XPSR_NZCV | XPSR_Q;
2449 }
2450 if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
2451 apsrmask |= XPSR_GE;
2452 }
2453 xpsr_write(env, val, apsrmask);
2454 }
2455 break;
2456 case 8: /* MSP */
2457 if (v7m_using_psp(env)) {
2458 env->v7m.other_sp = val;
2459 } else {
2460 env->regs[13] = val;
2461 }
2462 break;
2463 case 9: /* PSP */
2464 if (v7m_using_psp(env)) {
2465 env->regs[13] = val;
2466 } else {
2467 env->v7m.other_sp = val;
2468 }
2469 break;
2470 case 10: /* MSPLIM */
2471 if (!arm_feature(env, ARM_FEATURE_V8)) {
2472 goto bad_reg;
2473 }
2474 env->v7m.msplim[env->v7m.secure] = val & ~7;
2475 break;
2476 case 11: /* PSPLIM */
2477 if (!arm_feature(env, ARM_FEATURE_V8)) {
2478 goto bad_reg;
2479 }
2480 env->v7m.psplim[env->v7m.secure] = val & ~7;
2481 break;
2482 case 16: /* PRIMASK */
2483 env->v7m.primask[env->v7m.secure] = val & 1;
2484 break;
2485 case 17: /* BASEPRI */
2486 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2487 goto bad_reg;
2488 }
2489 env->v7m.basepri[env->v7m.secure] = val & 0xff;
2490 break;
2491 case 18: /* BASEPRI_MAX */
2492 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2493 goto bad_reg;
2494 }
2495 val &= 0xff;
2496 if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2497 || env->v7m.basepri[env->v7m.secure] == 0)) {
2498 env->v7m.basepri[env->v7m.secure] = val;
2499 }
2500 break;
2501 case 19: /* FAULTMASK */
2502 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2503 goto bad_reg;
2504 }
2505 env->v7m.faultmask[env->v7m.secure] = val & 1;
2506 break;
2507 case 20: /* CONTROL */
2508 /*
2509 * Writing to the SPSEL bit only has an effect if we are in
2510 * thread mode; other bits can be updated by any privileged code.
2511 * write_v7m_control_spsel() deals with updating the SPSEL bit in
2512 * env->v7m.control, so we only need update the others.
2513 * For v7M, we must just ignore explicit writes to SPSEL in handler
2514 * mode; for v8M the write is permitted but will have no effect.
2515 * All these bits are writes-ignored from non-privileged code,
2516 * except for SFPA.
2517 */
2518 if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2519 !arm_v7m_is_handler_mode(env))) {
2520 write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2521 }
2522 if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2523 env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2524 env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2525 }
2526 if (arm_feature(env, ARM_FEATURE_VFP)) {
2527 /*
2528 * SFPA is RAZ/WI from NS or if no FPU.
2529 * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2530 * Both are stored in the S bank.
2531 */
2532 if (env->v7m.secure) {
2533 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2534 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2535 }
2536 if (cur_el > 0 &&
2537 (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2538 extract32(env->v7m.nsacr, 10, 1))) {
2539 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2540 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2541 }
2542 }
2543 break;
2544 default:
2545 bad_reg:
2546 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2547 " register %d\n", reg);
2548 return;
2549 }
2550}
2551
2552uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2553{
2554 /* Implement the TT instruction. op is bits [7:6] of the insn. */
2555 bool forceunpriv = op & 1;
2556 bool alt = op & 2;
2557 V8M_SAttributes sattrs = {};
2558 uint32_t tt_resp;
2559 bool r, rw, nsr, nsrw, mrvalid;
2560 int prot;
2561 ARMMMUFaultInfo fi = {};
2562 MemTxAttrs attrs = {};
2563 hwaddr phys_addr;
2564 ARMMMUIdx mmu_idx;
2565 uint32_t mregion;
2566 bool targetpriv;
2567 bool targetsec = env->v7m.secure;
2568 bool is_subpage;
2569
2570 /*
2571 * Work out what the security state and privilege level we're
2572 * interested in is...
2573 */
2574 if (alt) {
2575 targetsec = !targetsec;
2576 }
2577
2578 if (forceunpriv) {
2579 targetpriv = false;
2580 } else {
2581 targetpriv = arm_v7m_is_handler_mode(env) ||
2582 !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2583 }
2584
2585 /* ...and then figure out which MMU index this is */
2586 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2587
2588 /*
2589 * We know that the MPU and SAU don't care about the access type
2590 * for our purposes beyond that we don't want to claim to be
2591 * an insn fetch, so we arbitrarily call this a read.
2592 */
2593
2594 /*
2595 * MPU region info only available for privileged or if
2596 * inspecting the other MPU state.
2597 */
2598 if (arm_current_el(env) != 0 || alt) {
2599 /* We can ignore the return value as prot is always set */
2600 pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2601 &phys_addr, &attrs, &prot, &is_subpage,
2602 &fi, &mregion);
2603 if (mregion == -1) {
2604 mrvalid = false;
2605 mregion = 0;
2606 } else {
2607 mrvalid = true;
2608 }
2609 r = prot & PAGE_READ;
2610 rw = prot & PAGE_WRITE;
2611 } else {
2612 r = false;
2613 rw = false;
2614 mrvalid = false;
2615 mregion = 0;
2616 }
2617
2618 if (env->v7m.secure) {
2619 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
2620 nsr = sattrs.ns && r;
2621 nsrw = sattrs.ns && rw;
2622 } else {
2623 sattrs.ns = true;
2624 nsr = false;
2625 nsrw = false;
2626 }
2627
2628 tt_resp = (sattrs.iregion << 24) |
2629 (sattrs.irvalid << 23) |
2630 ((!sattrs.ns) << 22) |
2631 (nsrw << 21) |
2632 (nsr << 20) |
2633 (rw << 19) |
2634 (r << 18) |
2635 (sattrs.srvalid << 17) |
2636 (mrvalid << 16) |
2637 (sattrs.sregion << 8) |
2638 mregion;
2639
2640 return tt_resp;
2641}
2642
2643#endif /* !CONFIG_USER_ONLY */
2644
2645ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
2646 bool secstate, bool priv, bool negpri)
2647{
2648 ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
2649
2650 if (priv) {
2651 mmu_idx |= ARM_MMU_IDX_M_PRIV;
2652 }
2653
2654 if (negpri) {
2655 mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
2656 }
2657
2658 if (secstate) {
2659 mmu_idx |= ARM_MMU_IDX_M_S;
2660 }
2661
2662 return mmu_idx;
2663}
2664
2665ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
2666 bool secstate, bool priv)
2667{
2668 bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
2669
2670 return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
2671}
2672
2673/* Return the MMU index for a v7M CPU in the specified security state */
2674ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
2675{
2676 bool priv = arm_current_el(env) != 0;
2677
2678 return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
2679}
This page took 0.282031 seconds and 4 git commands to generate.