]> Git Repo - qemu.git/blame - target/arm/m_helper.c
Include qemu/main-loop.h less
[qemu.git] / target / arm / m_helper.c
CommitLineData
7aab5a8c
PMD
1/*
2 * ARM generic helpers.
3 *
4 * This code is licensed under the GNU GPL v2 or later.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
db725815 8
7aab5a8c
PMD
9#include "qemu/osdep.h"
10#include "qemu/units.h"
11#include "target/arm/idau.h"
12#include "trace.h"
13#include "cpu.h"
14#include "internals.h"
15#include "exec/gdbstub.h"
16#include "exec/helper-proto.h"
17#include "qemu/host-utils.h"
db725815 18#include "qemu/main-loop.h"
7aab5a8c
PMD
19#include "sysemu/sysemu.h"
20#include "qemu/bitops.h"
21#include "qemu/crc32c.h"
22#include "qemu/qemu-print.h"
23#include "exec/exec-all.h"
24#include <zlib.h> /* For crc32 */
25#include "hw/semihosting/semihost.h"
26#include "sysemu/cpus.h"
27#include "sysemu/kvm.h"
28#include "qemu/range.h"
29#include "qapi/qapi-commands-machine-target.h"
30#include "qapi/error.h"
31#include "qemu/guest-random.h"
32#ifdef CONFIG_TCG
33#include "arm_ldst.h"
34#include "exec/cpu_ldst.h"
35#endif
36
37#ifdef CONFIG_USER_ONLY
38
39/* These should probably raise undefined insn exceptions. */
40void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
41{
42 ARMCPU *cpu = env_archcpu(env);
43
44 cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
45}
46
47uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
48{
49 ARMCPU *cpu = env_archcpu(env);
50
51 cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
52 return 0;
53}
54
55void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
56{
57 /* translate.c should never generate calls here in user-only mode */
58 g_assert_not_reached();
59}
60
61void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
62{
63 /* translate.c should never generate calls here in user-only mode */
64 g_assert_not_reached();
65}
66
67void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
68{
69 /* translate.c should never generate calls here in user-only mode */
70 g_assert_not_reached();
71}
72
73void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
74{
75 /* translate.c should never generate calls here in user-only mode */
76 g_assert_not_reached();
77}
78
79void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
80{
81 /* translate.c should never generate calls here in user-only mode */
82 g_assert_not_reached();
83}
84
85uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
86{
87 /*
88 * The TT instructions can be used by unprivileged code, but in
89 * user-only emulation we don't have the MPU.
90 * Luckily since we know we are NonSecure unprivileged (and that in
91 * turn means that the A flag wasn't specified), all the bits in the
92 * register must be zero:
93 * IREGION: 0 because IRVALID is 0
94 * IRVALID: 0 because NS
95 * S: 0 because NS
96 * NSRW: 0 because NS
97 * NSR: 0 because NS
98 * RW: 0 because unpriv and A flag not set
99 * R: 0 because unpriv and A flag not set
100 * SRVALID: 0 because NS
101 * MRVALID: 0 because unpriv and A flag not set
102 * SREGION: 0 becaus SRVALID is 0
103 * MREGION: 0 because MRVALID is 0
104 */
105 return 0;
106}
107
108#else
109
110/*
111 * What kind of stack write are we doing? This affects how exceptions
112 * generated during the stacking are treated.
113 */
114typedef enum StackingMode {
115 STACK_NORMAL,
116 STACK_IGNFAULTS,
117 STACK_LAZYFP,
118} StackingMode;
119
120static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
121 ARMMMUIdx mmu_idx, StackingMode mode)
122{
123 CPUState *cs = CPU(cpu);
124 CPUARMState *env = &cpu->env;
125 MemTxAttrs attrs = {};
126 MemTxResult txres;
127 target_ulong page_size;
128 hwaddr physaddr;
129 int prot;
130 ARMMMUFaultInfo fi = {};
131 bool secure = mmu_idx & ARM_MMU_IDX_M_S;
132 int exc;
133 bool exc_secure;
134
135 if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
136 &attrs, &prot, &page_size, &fi, NULL)) {
137 /* MPU/SAU lookup failed */
138 if (fi.type == ARMFault_QEMU_SFault) {
139 if (mode == STACK_LAZYFP) {
140 qemu_log_mask(CPU_LOG_INT,
141 "...SecureFault with SFSR.LSPERR "
142 "during lazy stacking\n");
143 env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
144 } else {
145 qemu_log_mask(CPU_LOG_INT,
146 "...SecureFault with SFSR.AUVIOL "
147 "during stacking\n");
148 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
149 }
150 env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
151 env->v7m.sfar = addr;
152 exc = ARMV7M_EXCP_SECURE;
153 exc_secure = false;
154 } else {
155 if (mode == STACK_LAZYFP) {
156 qemu_log_mask(CPU_LOG_INT,
157 "...MemManageFault with CFSR.MLSPERR\n");
158 env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
159 } else {
160 qemu_log_mask(CPU_LOG_INT,
161 "...MemManageFault with CFSR.MSTKERR\n");
162 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
163 }
164 exc = ARMV7M_EXCP_MEM;
165 exc_secure = secure;
166 }
167 goto pend_fault;
168 }
169 address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
170 attrs, &txres);
171 if (txres != MEMTX_OK) {
172 /* BusFault trying to write the data */
173 if (mode == STACK_LAZYFP) {
174 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
175 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
176 } else {
177 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
178 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
179 }
180 exc = ARMV7M_EXCP_BUS;
181 exc_secure = false;
182 goto pend_fault;
183 }
184 return true;
185
186pend_fault:
187 /*
188 * By pending the exception at this point we are making
189 * the IMPDEF choice "overridden exceptions pended" (see the
190 * MergeExcInfo() pseudocode). The other choice would be to not
191 * pend them now and then make a choice about which to throw away
192 * later if we have two derived exceptions.
193 * The only case when we must not pend the exception but instead
194 * throw it away is if we are doing the push of the callee registers
195 * and we've already generated a derived exception (this is indicated
196 * by the caller passing STACK_IGNFAULTS). Even in this case we will
197 * still update the fault status registers.
198 */
199 switch (mode) {
200 case STACK_NORMAL:
201 armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
202 break;
203 case STACK_LAZYFP:
204 armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
205 break;
206 case STACK_IGNFAULTS:
207 break;
208 }
209 return false;
210}
211
212static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
213 ARMMMUIdx mmu_idx)
214{
215 CPUState *cs = CPU(cpu);
216 CPUARMState *env = &cpu->env;
217 MemTxAttrs attrs = {};
218 MemTxResult txres;
219 target_ulong page_size;
220 hwaddr physaddr;
221 int prot;
222 ARMMMUFaultInfo fi = {};
223 bool secure = mmu_idx & ARM_MMU_IDX_M_S;
224 int exc;
225 bool exc_secure;
226 uint32_t value;
227
228 if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
229 &attrs, &prot, &page_size, &fi, NULL)) {
230 /* MPU/SAU lookup failed */
231 if (fi.type == ARMFault_QEMU_SFault) {
232 qemu_log_mask(CPU_LOG_INT,
233 "...SecureFault with SFSR.AUVIOL during unstack\n");
234 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
235 env->v7m.sfar = addr;
236 exc = ARMV7M_EXCP_SECURE;
237 exc_secure = false;
238 } else {
239 qemu_log_mask(CPU_LOG_INT,
240 "...MemManageFault with CFSR.MUNSTKERR\n");
241 env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
242 exc = ARMV7M_EXCP_MEM;
243 exc_secure = secure;
244 }
245 goto pend_fault;
246 }
247
248 value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
249 attrs, &txres);
250 if (txres != MEMTX_OK) {
251 /* BusFault trying to read the data */
252 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
253 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
254 exc = ARMV7M_EXCP_BUS;
255 exc_secure = false;
256 goto pend_fault;
257 }
258
259 *dest = value;
260 return true;
261
262pend_fault:
263 /*
264 * By pending the exception at this point we are making
265 * the IMPDEF choice "overridden exceptions pended" (see the
266 * MergeExcInfo() pseudocode). The other choice would be to not
267 * pend them now and then make a choice about which to throw away
268 * later if we have two derived exceptions.
269 */
270 armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
271 return false;
272}
273
274void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
275{
276 /*
277 * Preserve FP state (because LSPACT was set and we are about
278 * to execute an FP instruction). This corresponds to the
279 * PreserveFPState() pseudocode.
280 * We may throw an exception if the stacking fails.
281 */
282 ARMCPU *cpu = env_archcpu(env);
283 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
284 bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
285 bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
286 bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
287 uint32_t fpcar = env->v7m.fpcar[is_secure];
288 bool stacked_ok = true;
289 bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
290 bool take_exception;
291
292 /* Take the iothread lock as we are going to touch the NVIC */
293 qemu_mutex_lock_iothread();
294
295 /* Check the background context had access to the FPU */
296 if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
297 armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
298 env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
299 stacked_ok = false;
300 } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
301 armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
302 env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
303 stacked_ok = false;
304 }
305
306 if (!splimviol && stacked_ok) {
307 /* We only stack if the stack limit wasn't violated */
308 int i;
309 ARMMMUIdx mmu_idx;
310
311 mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
312 for (i = 0; i < (ts ? 32 : 16); i += 2) {
313 uint64_t dn = *aa32_vfp_dreg(env, i / 2);
314 uint32_t faddr = fpcar + 4 * i;
315 uint32_t slo = extract64(dn, 0, 32);
316 uint32_t shi = extract64(dn, 32, 32);
317
318 if (i >= 16) {
319 faddr += 8; /* skip the slot for the FPSCR */
320 }
321 stacked_ok = stacked_ok &&
322 v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
323 v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
324 }
325
326 stacked_ok = stacked_ok &&
327 v7m_stack_write(cpu, fpcar + 0x40,
328 vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
329 }
330
331 /*
332 * We definitely pended an exception, but it's possible that it
333 * might not be able to be taken now. If its priority permits us
334 * to take it now, then we must not update the LSPACT or FP regs,
335 * but instead jump out to take the exception immediately.
336 * If it's just pending and won't be taken until the current
337 * handler exits, then we do update LSPACT and the FP regs.
338 */
339 take_exception = !stacked_ok &&
340 armv7m_nvic_can_take_pending_exception(env->nvic);
341
342 qemu_mutex_unlock_iothread();
343
344 if (take_exception) {
345 raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
346 }
347
348 env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
349
350 if (ts) {
351 /* Clear s0 to s31 and the FPSCR */
352 int i;
353
354 for (i = 0; i < 32; i += 2) {
355 *aa32_vfp_dreg(env, i / 2) = 0;
356 }
357 vfp_set_fpscr(env, 0);
358 }
359 /*
360 * Otherwise s0 to s15 and FPSCR are UNKNOWN; we choose to leave them
361 * unchanged.
362 */
363}
364
365/*
366 * Write to v7M CONTROL.SPSEL bit for the specified security bank.
367 * This may change the current stack pointer between Main and Process
368 * stack pointers if it is done for the CONTROL register for the current
369 * security state.
370 */
371static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
372 bool new_spsel,
373 bool secstate)
374{
375 bool old_is_psp = v7m_using_psp(env);
376
377 env->v7m.control[secstate] =
378 deposit32(env->v7m.control[secstate],
379 R_V7M_CONTROL_SPSEL_SHIFT,
380 R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
381
382 if (secstate == env->v7m.secure) {
383 bool new_is_psp = v7m_using_psp(env);
384 uint32_t tmp;
385
386 if (old_is_psp != new_is_psp) {
387 tmp = env->v7m.other_sp;
388 env->v7m.other_sp = env->regs[13];
389 env->regs[13] = tmp;
390 }
391 }
392}
393
394/*
395 * Write to v7M CONTROL.SPSEL bit. This may change the current
396 * stack pointer between Main and Process stack pointers.
397 */
398static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
399{
400 write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
401}
402
403void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
404{
405 /*
406 * Write a new value to v7m.exception, thus transitioning into or out
407 * of Handler mode; this may result in a change of active stack pointer.
408 */
409 bool new_is_psp, old_is_psp = v7m_using_psp(env);
410 uint32_t tmp;
411
412 env->v7m.exception = new_exc;
413
414 new_is_psp = v7m_using_psp(env);
415
416 if (old_is_psp != new_is_psp) {
417 tmp = env->v7m.other_sp;
418 env->v7m.other_sp = env->regs[13];
419 env->regs[13] = tmp;
420 }
421}
422
423/* Switch M profile security state between NS and S */
424static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
425{
426 uint32_t new_ss_msp, new_ss_psp;
427
428 if (env->v7m.secure == new_secstate) {
429 return;
430 }
431
432 /*
433 * All the banked state is accessed by looking at env->v7m.secure
434 * except for the stack pointer; rearrange the SP appropriately.
435 */
436 new_ss_msp = env->v7m.other_ss_msp;
437 new_ss_psp = env->v7m.other_ss_psp;
438
439 if (v7m_using_psp(env)) {
440 env->v7m.other_ss_psp = env->regs[13];
441 env->v7m.other_ss_msp = env->v7m.other_sp;
442 } else {
443 env->v7m.other_ss_msp = env->regs[13];
444 env->v7m.other_ss_psp = env->v7m.other_sp;
445 }
446
447 env->v7m.secure = new_secstate;
448
449 if (v7m_using_psp(env)) {
450 env->regs[13] = new_ss_psp;
451 env->v7m.other_sp = new_ss_msp;
452 } else {
453 env->regs[13] = new_ss_msp;
454 env->v7m.other_sp = new_ss_psp;
455 }
456}
457
458void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
459{
460 /*
461 * Handle v7M BXNS:
462 * - if the return value is a magic value, do exception return (like BX)
463 * - otherwise bit 0 of the return value is the target security state
464 */
465 uint32_t min_magic;
466
467 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
468 /* Covers FNC_RETURN and EXC_RETURN magic */
469 min_magic = FNC_RETURN_MIN_MAGIC;
470 } else {
471 /* EXC_RETURN magic only */
472 min_magic = EXC_RETURN_MIN_MAGIC;
473 }
474
475 if (dest >= min_magic) {
476 /*
477 * This is an exception return magic value; put it where
478 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
479 * Note that if we ever add gen_ss_advance() singlestep support to
480 * M profile this should count as an "instruction execution complete"
481 * event (compare gen_bx_excret_final_code()).
482 */
483 env->regs[15] = dest & ~1;
484 env->thumb = dest & 1;
485 HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
486 /* notreached */
487 }
488
489 /* translate.c should have made BXNS UNDEF unless we're secure */
490 assert(env->v7m.secure);
491
492 if (!(dest & 1)) {
493 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
494 }
495 switch_v7m_security_state(env, dest & 1);
496 env->thumb = 1;
497 env->regs[15] = dest & ~1;
498}
499
500void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
501{
502 /*
503 * Handle v7M BLXNS:
504 * - bit 0 of the destination address is the target security state
505 */
506
507 /* At this point regs[15] is the address just after the BLXNS */
508 uint32_t nextinst = env->regs[15] | 1;
509 uint32_t sp = env->regs[13] - 8;
510 uint32_t saved_psr;
511
512 /* translate.c will have made BLXNS UNDEF unless we're secure */
513 assert(env->v7m.secure);
514
515 if (dest & 1) {
516 /*
517 * Target is Secure, so this is just a normal BLX,
518 * except that the low bit doesn't indicate Thumb/not.
519 */
520 env->regs[14] = nextinst;
521 env->thumb = 1;
522 env->regs[15] = dest & ~1;
523 return;
524 }
525
526 /* Target is non-secure: first push a stack frame */
527 if (!QEMU_IS_ALIGNED(sp, 8)) {
528 qemu_log_mask(LOG_GUEST_ERROR,
529 "BLXNS with misaligned SP is UNPREDICTABLE\n");
530 }
531
532 if (sp < v7m_sp_limit(env)) {
533 raise_exception(env, EXCP_STKOF, 0, 1);
534 }
535
536 saved_psr = env->v7m.exception;
537 if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
538 saved_psr |= XPSR_SFPA;
539 }
540
541 /* Note that these stores can throw exceptions on MPU faults */
2884fbb6
PM
542 cpu_stl_data_ra(env, sp, nextinst, GETPC());
543 cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
7aab5a8c
PMD
544
545 env->regs[13] = sp;
546 env->regs[14] = 0xfeffffff;
547 if (arm_v7m_is_handler_mode(env)) {
548 /*
549 * Write a dummy value to IPSR, to avoid leaking the current secure
550 * exception number to non-secure code. This is guaranteed not
551 * to cause write_v7m_exception() to actually change stacks.
552 */
553 write_v7m_exception(env, 1);
554 }
555 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
556 switch_v7m_security_state(env, 0);
557 env->thumb = 1;
558 env->regs[15] = dest;
559}
560
561static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
562 bool spsel)
563{
564 /*
565 * Return a pointer to the location where we currently store the
566 * stack pointer for the requested security state and thread mode.
567 * This pointer will become invalid if the CPU state is updated
568 * such that the stack pointers are switched around (eg changing
569 * the SPSEL control bit).
570 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
571 * Unlike that pseudocode, we require the caller to pass us in the
572 * SPSEL control bit value; this is because we also use this
573 * function in handling of pushing of the callee-saves registers
574 * part of the v8M stack frame (pseudocode PushCalleeStack()),
575 * and in the tailchain codepath the SPSEL bit comes from the exception
576 * return magic LR value from the previous exception. The pseudocode
577 * opencodes the stack-selection in PushCalleeStack(), but we prefer
578 * to make this utility function generic enough to do the job.
579 */
580 bool want_psp = threadmode && spsel;
581
582 if (secure == env->v7m.secure) {
583 if (want_psp == v7m_using_psp(env)) {
584 return &env->regs[13];
585 } else {
586 return &env->v7m.other_sp;
587 }
588 } else {
589 if (want_psp) {
590 return &env->v7m.other_ss_psp;
591 } else {
592 return &env->v7m.other_ss_msp;
593 }
594 }
595}
596
597static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
598 uint32_t *pvec)
599{
600 CPUState *cs = CPU(cpu);
601 CPUARMState *env = &cpu->env;
602 MemTxResult result;
603 uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
604 uint32_t vector_entry;
605 MemTxAttrs attrs = {};
606 ARMMMUIdx mmu_idx;
607 bool exc_secure;
608
609 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
610
611 /*
612 * We don't do a get_phys_addr() here because the rules for vector
613 * loads are special: they always use the default memory map, and
614 * the default memory map permits reads from all addresses.
615 * Since there's no easy way to pass through to pmsav8_mpu_lookup()
616 * that we want this special case which would always say "yes",
617 * we just do the SAU lookup here followed by a direct physical load.
618 */
619 attrs.secure = targets_secure;
620 attrs.user = false;
621
622 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
623 V8M_SAttributes sattrs = {};
624
625 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
626 if (sattrs.ns) {
627 attrs.secure = false;
628 } else if (!targets_secure) {
51c9122e
PM
629 /*
630 * NS access to S memory: the underlying exception which we escalate
631 * to HardFault is SecureFault, which always targets Secure.
632 */
633 exc_secure = true;
7aab5a8c
PMD
634 goto load_fail;
635 }
636 }
637
638 vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
639 attrs, &result);
640 if (result != MEMTX_OK) {
51c9122e
PM
641 /*
642 * Underlying exception is BusFault: its target security state
643 * depends on BFHFNMINS.
644 */
645 exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
7aab5a8c
PMD
646 goto load_fail;
647 }
648 *pvec = vector_entry;
649 return true;
650
651load_fail:
652 /*
653 * All vector table fetch fails are reported as HardFault, with
654 * HFSR.VECTTBL and .FORCED set. (FORCED is set because
51c9122e 655 * technically the underlying exception is a SecureFault or BusFault
7aab5a8c
PMD
656 * that is escalated to HardFault.) This is a terminal exception,
657 * so we will either take the HardFault immediately or else enter
658 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
51c9122e
PM
659 * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
660 * secure); otherwise it targets the same security state as the
661 * underlying exception.
7aab5a8c 662 */
51c9122e
PM
663 if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
664 exc_secure = true;
665 }
7aab5a8c
PMD
666 env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
667 armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
668 return false;
669}
670
671static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
672{
673 /*
674 * Return the integrity signature value for the callee-saves
675 * stack frame section. @lr is the exception return payload/LR value
676 * whose FType bit forms bit 0 of the signature if FP is present.
677 */
678 uint32_t sig = 0xfefa125a;
679
680 if (!arm_feature(env, ARM_FEATURE_VFP) || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
681 sig |= 1;
682 }
683 return sig;
684}
685
686static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
687 bool ignore_faults)
688{
689 /*
690 * For v8M, push the callee-saves register part of the stack frame.
691 * Compare the v8M pseudocode PushCalleeStack().
692 * In the tailchaining case this may not be the current stack.
693 */
694 CPUARMState *env = &cpu->env;
695 uint32_t *frame_sp_p;
696 uint32_t frameptr;
697 ARMMMUIdx mmu_idx;
698 bool stacked_ok;
699 uint32_t limit;
700 bool want_psp;
701 uint32_t sig;
702 StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
703
704 if (dotailchain) {
705 bool mode = lr & R_V7M_EXCRET_MODE_MASK;
706 bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
707 !mode;
708
709 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
710 frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
711 lr & R_V7M_EXCRET_SPSEL_MASK);
712 want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
713 if (want_psp) {
714 limit = env->v7m.psplim[M_REG_S];
715 } else {
716 limit = env->v7m.msplim[M_REG_S];
717 }
718 } else {
719 mmu_idx = arm_mmu_idx(env);
720 frame_sp_p = &env->regs[13];
721 limit = v7m_sp_limit(env);
722 }
723
724 frameptr = *frame_sp_p - 0x28;
725 if (frameptr < limit) {
726 /*
727 * Stack limit failure: set SP to the limit value, and generate
728 * STKOF UsageFault. Stack pushes below the limit must not be
729 * performed. It is IMPDEF whether pushes above the limit are
730 * performed; we choose not to.
731 */
732 qemu_log_mask(CPU_LOG_INT,
733 "...STKOF during callee-saves register stacking\n");
734 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
735 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
736 env->v7m.secure);
737 *frame_sp_p = limit;
738 return true;
739 }
740
741 /*
742 * Write as much of the stack frame as we can. A write failure may
743 * cause us to pend a derived exception.
744 */
745 sig = v7m_integrity_sig(env, lr);
746 stacked_ok =
747 v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
748 v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
749 v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
750 v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
751 v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
752 v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
753 v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
754 v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
755 v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
756
757 /* Update SP regardless of whether any of the stack accesses failed. */
758 *frame_sp_p = frameptr;
759
760 return !stacked_ok;
761}
762
763static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
764 bool ignore_stackfaults)
765{
766 /*
767 * Do the "take the exception" parts of exception entry,
768 * but not the pushing of state to the stack. This is
769 * similar to the pseudocode ExceptionTaken() function.
770 */
771 CPUARMState *env = &cpu->env;
772 uint32_t addr;
773 bool targets_secure;
774 int exc;
775 bool push_failed = false;
776
777 armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
778 qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
779 targets_secure ? "secure" : "nonsecure", exc);
780
781 if (dotailchain) {
782 /* Sanitize LR FType and PREFIX bits */
783 if (!arm_feature(env, ARM_FEATURE_VFP)) {
784 lr |= R_V7M_EXCRET_FTYPE_MASK;
785 }
786 lr = deposit32(lr, 24, 8, 0xff);
787 }
788
789 if (arm_feature(env, ARM_FEATURE_V8)) {
790 if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
791 (lr & R_V7M_EXCRET_S_MASK)) {
792 /*
793 * The background code (the owner of the registers in the
794 * exception frame) is Secure. This means it may either already
795 * have or now needs to push callee-saves registers.
796 */
797 if (targets_secure) {
798 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
799 /*
800 * We took an exception from Secure to NonSecure
801 * (which means the callee-saved registers got stacked)
802 * and are now tailchaining to a Secure exception.
803 * Clear DCRS so eventual return from this Secure
804 * exception unstacks the callee-saved registers.
805 */
806 lr &= ~R_V7M_EXCRET_DCRS_MASK;
807 }
808 } else {
809 /*
810 * We're going to a non-secure exception; push the
811 * callee-saves registers to the stack now, if they're
812 * not already saved.
813 */
814 if (lr & R_V7M_EXCRET_DCRS_MASK &&
815 !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
816 push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
817 ignore_stackfaults);
818 }
819 lr |= R_V7M_EXCRET_DCRS_MASK;
820 }
821 }
822
823 lr &= ~R_V7M_EXCRET_ES_MASK;
824 if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
825 lr |= R_V7M_EXCRET_ES_MASK;
826 }
827 lr &= ~R_V7M_EXCRET_SPSEL_MASK;
828 if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
829 lr |= R_V7M_EXCRET_SPSEL_MASK;
830 }
831
832 /*
833 * Clear registers if necessary to prevent non-secure exception
834 * code being able to see register values from secure code.
835 * Where register values become architecturally UNKNOWN we leave
836 * them with their previous values.
837 */
838 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
839 if (!targets_secure) {
840 /*
841 * Always clear the caller-saved registers (they have been
842 * pushed to the stack earlier in v7m_push_stack()).
843 * Clear callee-saved registers if the background code is
844 * Secure (in which case these regs were saved in
845 * v7m_push_callee_stack()).
846 */
847 int i;
848
849 for (i = 0; i < 13; i++) {
850 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
851 if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
852 env->regs[i] = 0;
853 }
854 }
855 /* Clear EAPSR */
856 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
857 }
858 }
859 }
860
861 if (push_failed && !ignore_stackfaults) {
862 /*
863 * Derived exception on callee-saves register stacking:
864 * we might now want to take a different exception which
865 * targets a different security state, so try again from the top.
866 */
867 qemu_log_mask(CPU_LOG_INT,
868 "...derived exception on callee-saves register stacking");
869 v7m_exception_taken(cpu, lr, true, true);
870 return;
871 }
872
873 if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
874 /* Vector load failed: derived exception */
875 qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
876 v7m_exception_taken(cpu, lr, true, true);
877 return;
878 }
879
880 /*
881 * Now we've done everything that might cause a derived exception
882 * we can go ahead and activate whichever exception we're going to
883 * take (which might now be the derived exception).
884 */
885 armv7m_nvic_acknowledge_irq(env->nvic);
886
887 /* Switch to target security state -- must do this before writing SPSEL */
888 switch_v7m_security_state(env, targets_secure);
889 write_v7m_control_spsel(env, 0);
890 arm_clear_exclusive(env);
891 /* Clear SFPA and FPCA (has no effect if no FPU) */
892 env->v7m.control[M_REG_S] &=
893 ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
894 /* Clear IT bits */
895 env->condexec_bits = 0;
896 env->regs[14] = lr;
897 env->regs[15] = addr & 0xfffffffe;
898 env->thumb = addr & 1;
899}
900
901static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
902 bool apply_splim)
903{
904 /*
905 * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
906 * that we will need later in order to do lazy FP reg stacking.
907 */
908 bool is_secure = env->v7m.secure;
909 void *nvic = env->nvic;
910 /*
911 * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
912 * are banked and we want to update the bit in the bank for the
913 * current security state; and in one case we want to specifically
914 * update the NS banked version of a bit even if we are secure.
915 */
916 uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
917 uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
918 uint32_t *fpccr = &env->v7m.fpccr[is_secure];
919 bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
920
921 env->v7m.fpcar[is_secure] = frameptr & ~0x7;
922
923 if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
924 bool splimviol;
925 uint32_t splim = v7m_sp_limit(env);
926 bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
927 (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
928
929 splimviol = !ign && frameptr < splim;
930 *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
931 }
932
933 *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
934
935 *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
936
937 *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
938
939 *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
940 !arm_v7m_is_handler_mode(env));
941
942 hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
943 *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
944
945 bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
946 *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
947
948 mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
949 *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
950
951 ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
952 *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
953
954 monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
955 *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
956
957 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
958 s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
959 *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
960
961 sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
962 *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
963 }
964}
965
966void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
967{
968 /* fptr is the value of Rn, the frame pointer we store the FP regs to */
969 bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
970 bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
2884fbb6 971 uintptr_t ra = GETPC();
7aab5a8c
PMD
972
973 assert(env->v7m.secure);
974
975 if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
976 return;
977 }
978
979 /* Check access to the coprocessor is permitted */
980 if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
981 raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
982 }
983
984 if (lspact) {
985 /* LSPACT should not be active when there is active FP state */
986 raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
987 }
988
989 if (fptr & 7) {
990 raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
991 }
992
993 /*
994 * Note that we do not use v7m_stack_write() here, because the
995 * accesses should not set the FSR bits for stacking errors if they
996 * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
2884fbb6 997 * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
7aab5a8c
PMD
998 * and longjmp out.
999 */
1000 if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1001 bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1002 int i;
1003
1004 for (i = 0; i < (ts ? 32 : 16); i += 2) {
1005 uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1006 uint32_t faddr = fptr + 4 * i;
1007 uint32_t slo = extract64(dn, 0, 32);
1008 uint32_t shi = extract64(dn, 32, 32);
1009
1010 if (i >= 16) {
1011 faddr += 8; /* skip the slot for the FPSCR */
1012 }
2884fbb6
PM
1013 cpu_stl_data_ra(env, faddr, slo, ra);
1014 cpu_stl_data_ra(env, faddr + 4, shi, ra);
7aab5a8c 1015 }
2884fbb6 1016 cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
7aab5a8c
PMD
1017
1018 /*
1019 * If TS is 0 then s0 to s15 and FPSCR are UNKNOWN; we choose to
1020 * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1021 */
1022 if (ts) {
1023 for (i = 0; i < 32; i += 2) {
1024 *aa32_vfp_dreg(env, i / 2) = 0;
1025 }
1026 vfp_set_fpscr(env, 0);
1027 }
1028 } else {
1029 v7m_update_fpccr(env, fptr, false);
1030 }
1031
1032 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1033}
1034
1035void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1036{
2884fbb6
PM
1037 uintptr_t ra = GETPC();
1038
7aab5a8c
PMD
1039 /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1040 assert(env->v7m.secure);
1041
1042 if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1043 return;
1044 }
1045
1046 /* Check access to the coprocessor is permitted */
1047 if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1048 raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1049 }
1050
1051 if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1052 /* State in FP is still valid */
1053 env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1054 } else {
1055 bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1056 int i;
1057 uint32_t fpscr;
1058
1059 if (fptr & 7) {
1060 raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1061 }
1062
1063 for (i = 0; i < (ts ? 32 : 16); i += 2) {
1064 uint32_t slo, shi;
1065 uint64_t dn;
1066 uint32_t faddr = fptr + 4 * i;
1067
1068 if (i >= 16) {
1069 faddr += 8; /* skip the slot for the FPSCR */
1070 }
1071
2884fbb6
PM
1072 slo = cpu_ldl_data_ra(env, faddr, ra);
1073 shi = cpu_ldl_data_ra(env, faddr + 4, ra);
7aab5a8c
PMD
1074
1075 dn = (uint64_t) shi << 32 | slo;
1076 *aa32_vfp_dreg(env, i / 2) = dn;
1077 }
2884fbb6 1078 fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
7aab5a8c
PMD
1079 vfp_set_fpscr(env, fpscr);
1080 }
1081
1082 env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1083}
1084
1085static bool v7m_push_stack(ARMCPU *cpu)
1086{
1087 /*
1088 * Do the "set up stack frame" part of exception entry,
1089 * similar to pseudocode PushStack().
1090 * Return true if we generate a derived exception (and so
1091 * should ignore further stack faults trying to process
1092 * that derived exception.)
1093 */
1094 bool stacked_ok = true, limitviol = false;
1095 CPUARMState *env = &cpu->env;
1096 uint32_t xpsr = xpsr_read(env);
1097 uint32_t frameptr = env->regs[13];
1098 ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1099 uint32_t framesize;
1100 bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1101
1102 if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1103 (env->v7m.secure || nsacr_cp10)) {
1104 if (env->v7m.secure &&
1105 env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1106 framesize = 0xa8;
1107 } else {
1108 framesize = 0x68;
1109 }
1110 } else {
1111 framesize = 0x20;
1112 }
1113
1114 /* Align stack pointer if the guest wants that */
1115 if ((frameptr & 4) &&
1116 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1117 frameptr -= 4;
1118 xpsr |= XPSR_SPREALIGN;
1119 }
1120
1121 xpsr &= ~XPSR_SFPA;
1122 if (env->v7m.secure &&
1123 (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1124 xpsr |= XPSR_SFPA;
1125 }
1126
1127 frameptr -= framesize;
1128
1129 if (arm_feature(env, ARM_FEATURE_V8)) {
1130 uint32_t limit = v7m_sp_limit(env);
1131
1132 if (frameptr < limit) {
1133 /*
1134 * Stack limit failure: set SP to the limit value, and generate
1135 * STKOF UsageFault. Stack pushes below the limit must not be
1136 * performed. It is IMPDEF whether pushes above the limit are
1137 * performed; we choose not to.
1138 */
1139 qemu_log_mask(CPU_LOG_INT,
1140 "...STKOF during stacking\n");
1141 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1142 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1143 env->v7m.secure);
1144 env->regs[13] = limit;
1145 /*
1146 * We won't try to perform any further memory accesses but
1147 * we must continue through the following code to check for
1148 * permission faults during FPU state preservation, and we
1149 * must update FPCCR if lazy stacking is enabled.
1150 */
1151 limitviol = true;
1152 stacked_ok = false;
1153 }
1154 }
1155
1156 /*
1157 * Write as much of the stack frame as we can. If we fail a stack
1158 * write this will result in a derived exception being pended
1159 * (which may be taken in preference to the one we started with
1160 * if it has higher priority).
1161 */
1162 stacked_ok = stacked_ok &&
1163 v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1164 v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1165 mmu_idx, STACK_NORMAL) &&
1166 v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1167 mmu_idx, STACK_NORMAL) &&
1168 v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1169 mmu_idx, STACK_NORMAL) &&
1170 v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1171 mmu_idx, STACK_NORMAL) &&
1172 v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1173 mmu_idx, STACK_NORMAL) &&
1174 v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1175 mmu_idx, STACK_NORMAL) &&
1176 v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1177
1178 if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1179 /* FPU is active, try to save its registers */
1180 bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1181 bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1182
1183 if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1184 qemu_log_mask(CPU_LOG_INT,
1185 "...SecureFault because LSPACT and FPCA both set\n");
1186 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1187 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1188 } else if (!env->v7m.secure && !nsacr_cp10) {
1189 qemu_log_mask(CPU_LOG_INT,
1190 "...Secure UsageFault with CFSR.NOCP because "
1191 "NSACR.CP10 prevents stacking FP regs\n");
1192 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1193 env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1194 } else {
1195 if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1196 /* Lazy stacking disabled, save registers now */
1197 int i;
1198 bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1199 arm_current_el(env) != 0);
1200
1201 if (stacked_ok && !cpacr_pass) {
1202 /*
1203 * Take UsageFault if CPACR forbids access. The pseudocode
1204 * here does a full CheckCPEnabled() but we know the NSACR
1205 * check can never fail as we have already handled that.
1206 */
1207 qemu_log_mask(CPU_LOG_INT,
1208 "...UsageFault with CFSR.NOCP because "
1209 "CPACR.CP10 prevents stacking FP regs\n");
1210 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1211 env->v7m.secure);
1212 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1213 stacked_ok = false;
1214 }
1215
1216 for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1217 uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1218 uint32_t faddr = frameptr + 0x20 + 4 * i;
1219 uint32_t slo = extract64(dn, 0, 32);
1220 uint32_t shi = extract64(dn, 32, 32);
1221
1222 if (i >= 16) {
1223 faddr += 8; /* skip the slot for the FPSCR */
1224 }
1225 stacked_ok = stacked_ok &&
1226 v7m_stack_write(cpu, faddr, slo,
1227 mmu_idx, STACK_NORMAL) &&
1228 v7m_stack_write(cpu, faddr + 4, shi,
1229 mmu_idx, STACK_NORMAL);
1230 }
1231 stacked_ok = stacked_ok &&
1232 v7m_stack_write(cpu, frameptr + 0x60,
1233 vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1234 if (cpacr_pass) {
1235 for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1236 *aa32_vfp_dreg(env, i / 2) = 0;
1237 }
1238 vfp_set_fpscr(env, 0);
1239 }
1240 } else {
1241 /* Lazy stacking enabled, save necessary info to stack later */
1242 v7m_update_fpccr(env, frameptr + 0x20, true);
1243 }
1244 }
1245 }
1246
1247 /*
1248 * If we broke a stack limit then SP was already updated earlier;
1249 * otherwise we update SP regardless of whether any of the stack
1250 * accesses failed or we took some other kind of fault.
1251 */
1252 if (!limitviol) {
1253 env->regs[13] = frameptr;
1254 }
1255
1256 return !stacked_ok;
1257}
1258
1259static void do_v7m_exception_exit(ARMCPU *cpu)
1260{
1261 CPUARMState *env = &cpu->env;
1262 uint32_t excret;
1263 uint32_t xpsr, xpsr_mask;
1264 bool ufault = false;
1265 bool sfault = false;
1266 bool return_to_sp_process;
1267 bool return_to_handler;
1268 bool rettobase = false;
1269 bool exc_secure = false;
1270 bool return_to_secure;
1271 bool ftype;
1272 bool restore_s16_s31;
1273
1274 /*
1275 * If we're not in Handler mode then jumps to magic exception-exit
1276 * addresses don't have magic behaviour. However for the v8M
1277 * security extensions the magic secure-function-return has to
1278 * work in thread mode too, so to avoid doing an extra check in
1279 * the generated code we allow exception-exit magic to also cause the
1280 * internal exception and bring us here in thread mode. Correct code
1281 * will never try to do this (the following insn fetch will always
1282 * fault) so we the overhead of having taken an unnecessary exception
1283 * doesn't matter.
1284 */
1285 if (!arm_v7m_is_handler_mode(env)) {
1286 return;
1287 }
1288
1289 /*
1290 * In the spec pseudocode ExceptionReturn() is called directly
1291 * from BXWritePC() and gets the full target PC value including
1292 * bit zero. In QEMU's implementation we treat it as a normal
1293 * jump-to-register (which is then caught later on), and so split
1294 * the target value up between env->regs[15] and env->thumb in
1295 * gen_bx(). Reconstitute it.
1296 */
1297 excret = env->regs[15];
1298 if (env->thumb) {
1299 excret |= 1;
1300 }
1301
1302 qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1303 " previous exception %d\n",
1304 excret, env->v7m.exception);
1305
1306 if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1307 qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1308 "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1309 excret);
1310 }
1311
1312 ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1313
1314 if (!arm_feature(env, ARM_FEATURE_VFP) && !ftype) {
1315 qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1316 "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1317 "if FPU not present\n",
1318 excret);
1319 ftype = true;
1320 }
1321
1322 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1323 /*
1324 * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1325 * we pick which FAULTMASK to clear.
1326 */
1327 if (!env->v7m.secure &&
1328 ((excret & R_V7M_EXCRET_ES_MASK) ||
1329 !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1330 sfault = 1;
1331 /* For all other purposes, treat ES as 0 (R_HXSR) */
1332 excret &= ~R_V7M_EXCRET_ES_MASK;
1333 }
1334 exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1335 }
1336
1337 if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1338 /*
1339 * Auto-clear FAULTMASK on return from other than NMI.
1340 * If the security extension is implemented then this only
1341 * happens if the raw execution priority is >= 0; the
1342 * value of the ES bit in the exception return value indicates
1343 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1344 */
1345 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1346 if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1347 env->v7m.faultmask[exc_secure] = 0;
1348 }
1349 } else {
1350 env->v7m.faultmask[M_REG_NS] = 0;
1351 }
1352 }
1353
1354 switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1355 exc_secure)) {
1356 case -1:
1357 /* attempt to exit an exception that isn't active */
1358 ufault = true;
1359 break;
1360 case 0:
1361 /* still an irq active now */
1362 break;
1363 case 1:
1364 /*
1365 * We returned to base exception level, no nesting.
1366 * (In the pseudocode this is written using "NestedActivation != 1"
1367 * where we have 'rettobase == false'.)
1368 */
1369 rettobase = true;
1370 break;
1371 default:
1372 g_assert_not_reached();
1373 }
1374
1375 return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1376 return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1377 return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1378 (excret & R_V7M_EXCRET_S_MASK);
1379
1380 if (arm_feature(env, ARM_FEATURE_V8)) {
1381 if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1382 /*
1383 * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1384 * we choose to take the UsageFault.
1385 */
1386 if ((excret & R_V7M_EXCRET_S_MASK) ||
1387 (excret & R_V7M_EXCRET_ES_MASK) ||
1388 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1389 ufault = true;
1390 }
1391 }
1392 if (excret & R_V7M_EXCRET_RES0_MASK) {
1393 ufault = true;
1394 }
1395 } else {
1396 /* For v7M we only recognize certain combinations of the low bits */
1397 switch (excret & 0xf) {
1398 case 1: /* Return to Handler */
1399 break;
1400 case 13: /* Return to Thread using Process stack */
1401 case 9: /* Return to Thread using Main stack */
1402 /*
1403 * We only need to check NONBASETHRDENA for v7M, because in
1404 * v8M this bit does not exist (it is RES1).
1405 */
1406 if (!rettobase &&
1407 !(env->v7m.ccr[env->v7m.secure] &
1408 R_V7M_CCR_NONBASETHRDENA_MASK)) {
1409 ufault = true;
1410 }
1411 break;
1412 default:
1413 ufault = true;
1414 }
1415 }
1416
1417 /*
1418 * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1419 * Handler mode (and will be until we write the new XPSR.Interrupt
1420 * field) this does not switch around the current stack pointer.
1421 * We must do this before we do any kind of tailchaining, including
1422 * for the derived exceptions on integrity check failures, or we will
1423 * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1424 */
1425 write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1426
1427 /*
1428 * Clear scratch FP values left in caller saved registers; this
1429 * must happen before any kind of tail chaining.
1430 */
1431 if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1432 (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1433 if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1434 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1435 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1436 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1437 "stackframe: error during lazy state deactivation\n");
1438 v7m_exception_taken(cpu, excret, true, false);
1439 return;
1440 } else {
1441 /* Clear s0..s15 and FPSCR */
1442 int i;
1443
1444 for (i = 0; i < 16; i += 2) {
1445 *aa32_vfp_dreg(env, i / 2) = 0;
1446 }
1447 vfp_set_fpscr(env, 0);
1448 }
1449 }
1450
1451 if (sfault) {
1452 env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1453 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1454 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1455 "stackframe: failed EXC_RETURN.ES validity check\n");
1456 v7m_exception_taken(cpu, excret, true, false);
1457 return;
1458 }
1459
1460 if (ufault) {
1461 /*
1462 * Bad exception return: instead of popping the exception
1463 * stack, directly take a usage fault on the current stack.
1464 */
1465 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1466 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1467 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1468 "stackframe: failed exception return integrity check\n");
1469 v7m_exception_taken(cpu, excret, true, false);
1470 return;
1471 }
1472
1473 /*
1474 * Tailchaining: if there is currently a pending exception that
1475 * is high enough priority to preempt execution at the level we're
1476 * about to return to, then just directly take that exception now,
1477 * avoiding an unstack-and-then-stack. Note that now we have
1478 * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1479 * our current execution priority is already the execution priority we are
1480 * returning to -- none of the state we would unstack or set based on
1481 * the EXCRET value affects it.
1482 */
1483 if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1484 qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1485 v7m_exception_taken(cpu, excret, true, false);
1486 return;
1487 }
1488
1489 switch_v7m_security_state(env, return_to_secure);
1490
1491 {
1492 /*
1493 * The stack pointer we should be reading the exception frame from
1494 * depends on bits in the magic exception return type value (and
1495 * for v8M isn't necessarily the stack pointer we will eventually
1496 * end up resuming execution with). Get a pointer to the location
1497 * in the CPU state struct where the SP we need is currently being
1498 * stored; we will use and modify it in place.
1499 * We use this limited C variable scope so we don't accidentally
1500 * use 'frame_sp_p' after we do something that makes it invalid.
1501 */
1502 uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
1503 return_to_secure,
1504 !return_to_handler,
1505 return_to_sp_process);
1506 uint32_t frameptr = *frame_sp_p;
1507 bool pop_ok = true;
1508 ARMMMUIdx mmu_idx;
1509 bool return_to_priv = return_to_handler ||
1510 !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
1511
1512 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
1513 return_to_priv);
1514
1515 if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1516 arm_feature(env, ARM_FEATURE_V8)) {
1517 qemu_log_mask(LOG_GUEST_ERROR,
1518 "M profile exception return with non-8-aligned SP "
1519 "for destination state is UNPREDICTABLE\n");
1520 }
1521
1522 /* Do we need to pop callee-saved registers? */
1523 if (return_to_secure &&
1524 ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1525 (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1526 uint32_t actual_sig;
1527
1528 pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1529
1530 if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1531 /* Take a SecureFault on the current stack */
1532 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1533 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1534 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1535 "stackframe: failed exception return integrity "
1536 "signature check\n");
1537 v7m_exception_taken(cpu, excret, true, false);
1538 return;
1539 }
1540
1541 pop_ok = pop_ok &&
1542 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1543 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1544 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1545 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1546 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1547 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1548 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1549 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1550
1551 frameptr += 0x28;
1552 }
1553
1554 /* Pop registers */
1555 pop_ok = pop_ok &&
1556 v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1557 v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1558 v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1559 v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1560 v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1561 v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1562 v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1563 v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1564
1565 if (!pop_ok) {
1566 /*
1567 * v7m_stack_read() pended a fault, so take it (as a tail
1568 * chained exception on the same stack frame)
1569 */
1570 qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1571 v7m_exception_taken(cpu, excret, true, false);
1572 return;
1573 }
1574
1575 /*
1576 * Returning from an exception with a PC with bit 0 set is defined
1577 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1578 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1579 * the lsbit, and there are several RTOSes out there which incorrectly
1580 * assume the r15 in the stack frame should be a Thumb-style "lsbit
1581 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1582 * complain about the badly behaved guest.
1583 */
1584 if (env->regs[15] & 1) {
1585 env->regs[15] &= ~1U;
1586 if (!arm_feature(env, ARM_FEATURE_V8)) {
1587 qemu_log_mask(LOG_GUEST_ERROR,
1588 "M profile return from interrupt with misaligned "
1589 "PC is UNPREDICTABLE on v7M\n");
1590 }
1591 }
1592
1593 if (arm_feature(env, ARM_FEATURE_V8)) {
1594 /*
1595 * For v8M we have to check whether the xPSR exception field
1596 * matches the EXCRET value for return to handler/thread
1597 * before we commit to changing the SP and xPSR.
1598 */
1599 bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1600 if (return_to_handler != will_be_handler) {
1601 /*
1602 * Take an INVPC UsageFault on the current stack.
1603 * By this point we will have switched to the security state
1604 * for the background state, so this UsageFault will target
1605 * that state.
1606 */
1607 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1608 env->v7m.secure);
1609 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1610 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1611 "stackframe: failed exception return integrity "
1612 "check\n");
1613 v7m_exception_taken(cpu, excret, true, false);
1614 return;
1615 }
1616 }
1617
1618 if (!ftype) {
1619 /* FP present and we need to handle it */
1620 if (!return_to_secure &&
1621 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1622 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1623 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1624 qemu_log_mask(CPU_LOG_INT,
1625 "...taking SecureFault on existing stackframe: "
1626 "Secure LSPACT set but exception return is "
1627 "not to secure state\n");
1628 v7m_exception_taken(cpu, excret, true, false);
1629 return;
1630 }
1631
1632 restore_s16_s31 = return_to_secure &&
1633 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1634
1635 if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1636 /* State in FPU is still valid, just clear LSPACT */
1637 env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1638 } else {
1639 int i;
1640 uint32_t fpscr;
1641 bool cpacr_pass, nsacr_pass;
1642
1643 cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1644 return_to_priv);
1645 nsacr_pass = return_to_secure ||
1646 extract32(env->v7m.nsacr, 10, 1);
1647
1648 if (!cpacr_pass) {
1649 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1650 return_to_secure);
1651 env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1652 qemu_log_mask(CPU_LOG_INT,
1653 "...taking UsageFault on existing "
1654 "stackframe: CPACR.CP10 prevents unstacking "
1655 "FP regs\n");
1656 v7m_exception_taken(cpu, excret, true, false);
1657 return;
1658 } else if (!nsacr_pass) {
1659 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1660 env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1661 qemu_log_mask(CPU_LOG_INT,
1662 "...taking Secure UsageFault on existing "
1663 "stackframe: NSACR.CP10 prevents unstacking "
1664 "FP regs\n");
1665 v7m_exception_taken(cpu, excret, true, false);
1666 return;
1667 }
1668
1669 for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1670 uint32_t slo, shi;
1671 uint64_t dn;
1672 uint32_t faddr = frameptr + 0x20 + 4 * i;
1673
1674 if (i >= 16) {
1675 faddr += 8; /* Skip the slot for the FPSCR */
1676 }
1677
1678 pop_ok = pop_ok &&
1679 v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1680 v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1681
1682 if (!pop_ok) {
1683 break;
1684 }
1685
1686 dn = (uint64_t)shi << 32 | slo;
1687 *aa32_vfp_dreg(env, i / 2) = dn;
1688 }
1689 pop_ok = pop_ok &&
1690 v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1691 if (pop_ok) {
1692 vfp_set_fpscr(env, fpscr);
1693 }
1694 if (!pop_ok) {
1695 /*
1696 * These regs are 0 if security extension present;
1697 * otherwise merely UNKNOWN. We zero always.
1698 */
1699 for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1700 *aa32_vfp_dreg(env, i / 2) = 0;
1701 }
1702 vfp_set_fpscr(env, 0);
1703 }
1704 }
1705 }
1706 env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1707 V7M_CONTROL, FPCA, !ftype);
1708
1709 /* Commit to consuming the stack frame */
1710 frameptr += 0x20;
1711 if (!ftype) {
1712 frameptr += 0x48;
1713 if (restore_s16_s31) {
1714 frameptr += 0x40;
1715 }
1716 }
1717 /*
1718 * Undo stack alignment (the SPREALIGN bit indicates that the original
1719 * pre-exception SP was not 8-aligned and we added a padding word to
1720 * align it, so we undo this by ORing in the bit that increases it
1721 * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1722 * would work too but a logical OR is how the pseudocode specifies it.)
1723 */
1724 if (xpsr & XPSR_SPREALIGN) {
1725 frameptr |= 4;
1726 }
1727 *frame_sp_p = frameptr;
1728 }
1729
1730 xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1731 if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1732 xpsr_mask &= ~XPSR_GE;
1733 }
1734 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1735 xpsr_write(env, xpsr, xpsr_mask);
1736
1737 if (env->v7m.secure) {
1738 bool sfpa = xpsr & XPSR_SFPA;
1739
1740 env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1741 V7M_CONTROL, SFPA, sfpa);
1742 }
1743
1744 /*
1745 * The restored xPSR exception field will be zero if we're
1746 * resuming in Thread mode. If that doesn't match what the
1747 * exception return excret specified then this is a UsageFault.
1748 * v7M requires we make this check here; v8M did it earlier.
1749 */
1750 if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1751 /*
1752 * Take an INVPC UsageFault by pushing the stack again;
1753 * we know we're v7M so this is never a Secure UsageFault.
1754 */
1755 bool ignore_stackfaults;
1756
1757 assert(!arm_feature(env, ARM_FEATURE_V8));
1758 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1759 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1760 ignore_stackfaults = v7m_push_stack(cpu);
1761 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1762 "failed exception return integrity check\n");
1763 v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1764 return;
1765 }
1766
1767 /* Otherwise, we have a successful exception exit. */
1768 arm_clear_exclusive(env);
1769 qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1770}
1771
1772static bool do_v7m_function_return(ARMCPU *cpu)
1773{
1774 /*
1775 * v8M security extensions magic function return.
1776 * We may either:
1777 * (1) throw an exception (longjump)
1778 * (2) return true if we successfully handled the function return
1779 * (3) return false if we failed a consistency check and have
1780 * pended a UsageFault that needs to be taken now
1781 *
1782 * At this point the magic return value is split between env->regs[15]
1783 * and env->thumb. We don't bother to reconstitute it because we don't
1784 * need it (all values are handled the same way).
1785 */
1786 CPUARMState *env = &cpu->env;
1787 uint32_t newpc, newpsr, newpsr_exc;
1788
1789 qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1790
1791 {
1792 bool threadmode, spsel;
1793 TCGMemOpIdx oi;
1794 ARMMMUIdx mmu_idx;
1795 uint32_t *frame_sp_p;
1796 uint32_t frameptr;
1797
1798 /* Pull the return address and IPSR from the Secure stack */
1799 threadmode = !arm_v7m_is_handler_mode(env);
1800 spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1801
1802 frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
1803 frameptr = *frame_sp_p;
1804
1805 /*
1806 * These loads may throw an exception (for MPU faults). We want to
1807 * do them as secure, so work out what MMU index that is.
1808 */
1809 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1810 oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
1811 newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
1812 newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
1813
1814 /* Consistency checks on new IPSR */
1815 newpsr_exc = newpsr & XPSR_EXCP;
1816 if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1817 (env->v7m.exception == 1 && newpsr_exc != 0))) {
1818 /* Pend the fault and tell our caller to take it */
1819 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1820 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1821 env->v7m.secure);
1822 qemu_log_mask(CPU_LOG_INT,
1823 "...taking INVPC UsageFault: "
1824 "IPSR consistency check failed\n");
1825 return false;
1826 }
1827
1828 *frame_sp_p = frameptr + 8;
1829 }
1830
1831 /* This invalidates frame_sp_p */
1832 switch_v7m_security_state(env, true);
1833 env->v7m.exception = newpsr_exc;
1834 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1835 if (newpsr & XPSR_SFPA) {
1836 env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
1837 }
1838 xpsr_write(env, 0, XPSR_IT);
1839 env->thumb = newpc & 1;
1840 env->regs[15] = newpc & ~1;
1841
1842 qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
1843 return true;
1844}
1845
1846static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1847 uint32_t addr, uint16_t *insn)
1848{
1849 /*
1850 * Load a 16-bit portion of a v7M instruction, returning true on success,
1851 * or false on failure (in which case we will have pended the appropriate
1852 * exception).
1853 * We need to do the instruction fetch's MPU and SAU checks
1854 * like this because there is no MMU index that would allow
1855 * doing the load with a single function call. Instead we must
1856 * first check that the security attributes permit the load
1857 * and that they don't mismatch on the two halves of the instruction,
1858 * and then we do the load as a secure load (ie using the security
1859 * attributes of the address, not the CPU, as architecturally required).
1860 */
1861 CPUState *cs = CPU(cpu);
1862 CPUARMState *env = &cpu->env;
1863 V8M_SAttributes sattrs = {};
1864 MemTxAttrs attrs = {};
1865 ARMMMUFaultInfo fi = {};
1866 MemTxResult txres;
1867 target_ulong page_size;
1868 hwaddr physaddr;
1869 int prot;
1870
1871 v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
1872 if (!sattrs.nsc || sattrs.ns) {
1873 /*
1874 * This must be the second half of the insn, and it straddles a
1875 * region boundary with the second half not being S&NSC.
1876 */
1877 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
1878 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1879 qemu_log_mask(CPU_LOG_INT,
1880 "...really SecureFault with SFSR.INVEP\n");
1881 return false;
1882 }
1883 if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
1884 &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
1885 /* the MPU lookup failed */
1886 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
1887 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
1888 qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
1889 return false;
1890 }
1891 *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
1892 attrs, &txres);
1893 if (txres != MEMTX_OK) {
1894 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
1895 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
1896 qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
1897 return false;
1898 }
1899 return true;
1900}
1901
1902static bool v7m_handle_execute_nsc(ARMCPU *cpu)
1903{
1904 /*
1905 * Check whether this attempt to execute code in a Secure & NS-Callable
1906 * memory region is for an SG instruction; if so, then emulate the
1907 * effect of the SG instruction and return true. Otherwise pend
1908 * the correct kind of exception and return false.
1909 */
1910 CPUARMState *env = &cpu->env;
1911 ARMMMUIdx mmu_idx;
1912 uint16_t insn;
1913
1914 /*
1915 * We should never get here unless get_phys_addr_pmsav8() caused
1916 * an exception for NS executing in S&NSC memory.
1917 */
1918 assert(!env->v7m.secure);
1919 assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
1920
1921 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
1922 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1923
1924 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
1925 return false;
1926 }
1927
1928 if (!env->thumb) {
1929 goto gen_invep;
1930 }
1931
1932 if (insn != 0xe97f) {
1933 /*
1934 * Not an SG instruction first half (we choose the IMPDEF
1935 * early-SG-check option).
1936 */
1937 goto gen_invep;
1938 }
1939
1940 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
1941 return false;
1942 }
1943
1944 if (insn != 0xe97f) {
1945 /*
1946 * Not an SG instruction second half (yes, both halves of the SG
1947 * insn have the same hex value)
1948 */
1949 goto gen_invep;
1950 }
1951
1952 /*
1953 * OK, we have confirmed that we really have an SG instruction.
1954 * We know we're NS in S memory so don't need to repeat those checks.
1955 */
1956 qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
1957 ", executing it\n", env->regs[15]);
1958 env->regs[14] &= ~1;
1959 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1960 switch_v7m_security_state(env, true);
1961 xpsr_write(env, 0, XPSR_IT);
1962 env->regs[15] += 4;
1963 return true;
1964
1965gen_invep:
1966 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
1967 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1968 qemu_log_mask(CPU_LOG_INT,
1969 "...really SecureFault with SFSR.INVEP\n");
1970 return false;
1971}
1972
1973void arm_v7m_cpu_do_interrupt(CPUState *cs)
1974{
1975 ARMCPU *cpu = ARM_CPU(cs);
1976 CPUARMState *env = &cpu->env;
1977 uint32_t lr;
1978 bool ignore_stackfaults;
1979
1980 arm_log_exception(cs->exception_index);
1981
1982 /*
1983 * For exceptions we just mark as pending on the NVIC, and let that
1984 * handle it.
1985 */
1986 switch (cs->exception_index) {
1987 case EXCP_UDEF:
1988 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1989 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
1990 break;
1991 case EXCP_NOCP:
1992 {
1993 /*
1994 * NOCP might be directed to something other than the current
1995 * security state if this fault is because of NSACR; we indicate
1996 * the target security state using exception.target_el.
1997 */
1998 int target_secstate;
1999
2000 if (env->exception.target_el == 3) {
2001 target_secstate = M_REG_S;
2002 } else {
2003 target_secstate = env->v7m.secure;
2004 }
2005 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
2006 env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
2007 break;
2008 }
2009 case EXCP_INVSTATE:
2010 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2011 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
2012 break;
2013 case EXCP_STKOF:
2014 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2015 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2016 break;
2017 case EXCP_LSERR:
2018 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2019 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2020 break;
2021 case EXCP_UNALIGNED:
2022 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2023 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2024 break;
2025 case EXCP_SWI:
2026 /* The PC already points to the next instruction. */
2027 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2028 break;
2029 case EXCP_PREFETCH_ABORT:
2030 case EXCP_DATA_ABORT:
2031 /*
2032 * Note that for M profile we don't have a guest facing FSR, but
2033 * the env->exception.fsr will be populated by the code that
2034 * raises the fault, in the A profile short-descriptor format.
2035 */
2036 switch (env->exception.fsr & 0xf) {
2037 case M_FAKE_FSR_NSC_EXEC:
2038 /*
2039 * Exception generated when we try to execute code at an address
2040 * which is marked as Secure & Non-Secure Callable and the CPU
2041 * is in the Non-Secure state. The only instruction which can
2042 * be executed like this is SG (and that only if both halves of
2043 * the SG instruction have the same security attributes.)
2044 * Everything else must generate an INVEP SecureFault, so we
2045 * emulate the SG instruction here.
2046 */
2047 if (v7m_handle_execute_nsc(cpu)) {
2048 return;
2049 }
2050 break;
2051 case M_FAKE_FSR_SFAULT:
2052 /*
2053 * Various flavours of SecureFault for attempts to execute or
2054 * access data in the wrong security state.
2055 */
2056 switch (cs->exception_index) {
2057 case EXCP_PREFETCH_ABORT:
2058 if (env->v7m.secure) {
2059 env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2060 qemu_log_mask(CPU_LOG_INT,
2061 "...really SecureFault with SFSR.INVTRAN\n");
2062 } else {
2063 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2064 qemu_log_mask(CPU_LOG_INT,
2065 "...really SecureFault with SFSR.INVEP\n");
2066 }
2067 break;
2068 case EXCP_DATA_ABORT:
2069 /* This must be an NS access to S memory */
2070 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2071 qemu_log_mask(CPU_LOG_INT,
2072 "...really SecureFault with SFSR.AUVIOL\n");
2073 break;
2074 }
2075 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2076 break;
2077 case 0x8: /* External Abort */
2078 switch (cs->exception_index) {
2079 case EXCP_PREFETCH_ABORT:
2080 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2081 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2082 break;
2083 case EXCP_DATA_ABORT:
2084 env->v7m.cfsr[M_REG_NS] |=
2085 (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2086 env->v7m.bfar = env->exception.vaddress;
2087 qemu_log_mask(CPU_LOG_INT,
2088 "...with CFSR.PRECISERR and BFAR 0x%x\n",
2089 env->v7m.bfar);
2090 break;
2091 }
2092 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2093 break;
2094 default:
2095 /*
2096 * All other FSR values are either MPU faults or "can't happen
2097 * for M profile" cases.
2098 */
2099 switch (cs->exception_index) {
2100 case EXCP_PREFETCH_ABORT:
2101 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2102 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2103 break;
2104 case EXCP_DATA_ABORT:
2105 env->v7m.cfsr[env->v7m.secure] |=
2106 (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2107 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2108 qemu_log_mask(CPU_LOG_INT,
2109 "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2110 env->v7m.mmfar[env->v7m.secure]);
2111 break;
2112 }
2113 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2114 env->v7m.secure);
2115 break;
2116 }
2117 break;
2118 case EXCP_BKPT:
2119 if (semihosting_enabled()) {
2120 int nr;
2121 nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
2122 if (nr == 0xab) {
2123 env->regs[15] += 2;
2124 qemu_log_mask(CPU_LOG_INT,
2125 "...handling as semihosting call 0x%x\n",
2126 env->regs[0]);
2127 env->regs[0] = do_arm_semihosting(env);
2128 return;
2129 }
2130 }
2131 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2132 break;
2133 case EXCP_IRQ:
2134 break;
2135 case EXCP_EXCEPTION_EXIT:
2136 if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2137 /* Must be v8M security extension function return */
2138 assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2139 assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2140 if (do_v7m_function_return(cpu)) {
2141 return;
2142 }
2143 } else {
2144 do_v7m_exception_exit(cpu);
2145 return;
2146 }
2147 break;
2148 case EXCP_LAZYFP:
2149 /*
2150 * We already pended the specific exception in the NVIC in the
2151 * v7m_preserve_fp_state() helper function.
2152 */
2153 break;
2154 default:
2155 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2156 return; /* Never happens. Keep compiler happy. */
2157 }
2158
2159 if (arm_feature(env, ARM_FEATURE_V8)) {
2160 lr = R_V7M_EXCRET_RES1_MASK |
2161 R_V7M_EXCRET_DCRS_MASK;
2162 /*
2163 * The S bit indicates whether we should return to Secure
2164 * or NonSecure (ie our current state).
2165 * The ES bit indicates whether we're taking this exception
2166 * to Secure or NonSecure (ie our target state). We set it
2167 * later, in v7m_exception_taken().
2168 * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2169 * This corresponds to the ARM ARM pseudocode for v8M setting
2170 * some LR bits in PushStack() and some in ExceptionTaken();
2171 * the distinction matters for the tailchain cases where we
2172 * can take an exception without pushing the stack.
2173 */
2174 if (env->v7m.secure) {
2175 lr |= R_V7M_EXCRET_S_MASK;
2176 }
2177 if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2178 lr |= R_V7M_EXCRET_FTYPE_MASK;
2179 }
2180 } else {
2181 lr = R_V7M_EXCRET_RES1_MASK |
2182 R_V7M_EXCRET_S_MASK |
2183 R_V7M_EXCRET_DCRS_MASK |
2184 R_V7M_EXCRET_FTYPE_MASK |
2185 R_V7M_EXCRET_ES_MASK;
2186 if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2187 lr |= R_V7M_EXCRET_SPSEL_MASK;
2188 }
2189 }
2190 if (!arm_v7m_is_handler_mode(env)) {
2191 lr |= R_V7M_EXCRET_MODE_MASK;
2192 }
2193
2194 ignore_stackfaults = v7m_push_stack(cpu);
2195 v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2196}
2197
2198uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2199{
2200 uint32_t mask;
2201 unsigned el = arm_current_el(env);
2202
2203 /* First handle registers which unprivileged can read */
2204
2205 switch (reg) {
2206 case 0 ... 7: /* xPSR sub-fields */
2207 mask = 0;
2208 if ((reg & 1) && el) {
2209 mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
2210 }
2211 if (!(reg & 4)) {
2212 mask |= XPSR_NZCV | XPSR_Q; /* APSR */
2213 if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
2214 mask |= XPSR_GE;
2215 }
2216 }
2217 /* EPSR reads as zero */
2218 return xpsr_read(env) & mask;
2219 break;
2220 case 20: /* CONTROL */
2221 {
2222 uint32_t value = env->v7m.control[env->v7m.secure];
2223 if (!env->v7m.secure) {
2224 /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
2225 value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
2226 }
2227 return value;
2228 }
2229 case 0x94: /* CONTROL_NS */
2230 /*
2231 * We have to handle this here because unprivileged Secure code
2232 * can read the NS CONTROL register.
2233 */
2234 if (!env->v7m.secure) {
2235 return 0;
2236 }
2237 return env->v7m.control[M_REG_NS] |
2238 (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2239 }
2240
2241 if (el == 0) {
2242 return 0; /* unprivileged reads others as zero */
2243 }
2244
2245 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2246 switch (reg) {
2247 case 0x88: /* MSP_NS */
2248 if (!env->v7m.secure) {
2249 return 0;
2250 }
2251 return env->v7m.other_ss_msp;
2252 case 0x89: /* PSP_NS */
2253 if (!env->v7m.secure) {
2254 return 0;
2255 }
2256 return env->v7m.other_ss_psp;
2257 case 0x8a: /* MSPLIM_NS */
2258 if (!env->v7m.secure) {
2259 return 0;
2260 }
2261 return env->v7m.msplim[M_REG_NS];
2262 case 0x8b: /* PSPLIM_NS */
2263 if (!env->v7m.secure) {
2264 return 0;
2265 }
2266 return env->v7m.psplim[M_REG_NS];
2267 case 0x90: /* PRIMASK_NS */
2268 if (!env->v7m.secure) {
2269 return 0;
2270 }
2271 return env->v7m.primask[M_REG_NS];
2272 case 0x91: /* BASEPRI_NS */
2273 if (!env->v7m.secure) {
2274 return 0;
2275 }
2276 return env->v7m.basepri[M_REG_NS];
2277 case 0x93: /* FAULTMASK_NS */
2278 if (!env->v7m.secure) {
2279 return 0;
2280 }
2281 return env->v7m.faultmask[M_REG_NS];
2282 case 0x98: /* SP_NS */
2283 {
2284 /*
2285 * This gives the non-secure SP selected based on whether we're
2286 * currently in handler mode or not, using the NS CONTROL.SPSEL.
2287 */
2288 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2289
2290 if (!env->v7m.secure) {
2291 return 0;
2292 }
2293 if (!arm_v7m_is_handler_mode(env) && spsel) {
2294 return env->v7m.other_ss_psp;
2295 } else {
2296 return env->v7m.other_ss_msp;
2297 }
2298 }
2299 default:
2300 break;
2301 }
2302 }
2303
2304 switch (reg) {
2305 case 8: /* MSP */
2306 return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2307 case 9: /* PSP */
2308 return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2309 case 10: /* MSPLIM */
2310 if (!arm_feature(env, ARM_FEATURE_V8)) {
2311 goto bad_reg;
2312 }
2313 return env->v7m.msplim[env->v7m.secure];
2314 case 11: /* PSPLIM */
2315 if (!arm_feature(env, ARM_FEATURE_V8)) {
2316 goto bad_reg;
2317 }
2318 return env->v7m.psplim[env->v7m.secure];
2319 case 16: /* PRIMASK */
2320 return env->v7m.primask[env->v7m.secure];
2321 case 17: /* BASEPRI */
2322 case 18: /* BASEPRI_MAX */
2323 return env->v7m.basepri[env->v7m.secure];
2324 case 19: /* FAULTMASK */
2325 return env->v7m.faultmask[env->v7m.secure];
2326 default:
2327 bad_reg:
2328 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2329 " register %d\n", reg);
2330 return 0;
2331 }
2332}
2333
2334void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2335{
2336 /*
2337 * We're passed bits [11..0] of the instruction; extract
2338 * SYSm and the mask bits.
2339 * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2340 * we choose to treat them as if the mask bits were valid.
2341 * NB that the pseudocode 'mask' variable is bits [11..10],
2342 * whereas ours is [11..8].
2343 */
2344 uint32_t mask = extract32(maskreg, 8, 4);
2345 uint32_t reg = extract32(maskreg, 0, 8);
2346 int cur_el = arm_current_el(env);
2347
2348 if (cur_el == 0 && reg > 7 && reg != 20) {
2349 /*
2350 * only xPSR sub-fields and CONTROL.SFPA may be written by
2351 * unprivileged code
2352 */
2353 return;
2354 }
2355
2356 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2357 switch (reg) {
2358 case 0x88: /* MSP_NS */
2359 if (!env->v7m.secure) {
2360 return;
2361 }
2362 env->v7m.other_ss_msp = val;
2363 return;
2364 case 0x89: /* PSP_NS */
2365 if (!env->v7m.secure) {
2366 return;
2367 }
2368 env->v7m.other_ss_psp = val;
2369 return;
2370 case 0x8a: /* MSPLIM_NS */
2371 if (!env->v7m.secure) {
2372 return;
2373 }
2374 env->v7m.msplim[M_REG_NS] = val & ~7;
2375 return;
2376 case 0x8b: /* PSPLIM_NS */
2377 if (!env->v7m.secure) {
2378 return;
2379 }
2380 env->v7m.psplim[M_REG_NS] = val & ~7;
2381 return;
2382 case 0x90: /* PRIMASK_NS */
2383 if (!env->v7m.secure) {
2384 return;
2385 }
2386 env->v7m.primask[M_REG_NS] = val & 1;
2387 return;
2388 case 0x91: /* BASEPRI_NS */
2389 if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2390 return;
2391 }
2392 env->v7m.basepri[M_REG_NS] = val & 0xff;
2393 return;
2394 case 0x93: /* FAULTMASK_NS */
2395 if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
2396 return;
2397 }
2398 env->v7m.faultmask[M_REG_NS] = val & 1;
2399 return;
2400 case 0x94: /* CONTROL_NS */
2401 if (!env->v7m.secure) {
2402 return;
2403 }
2404 write_v7m_control_spsel_for_secstate(env,
2405 val & R_V7M_CONTROL_SPSEL_MASK,
2406 M_REG_NS);
2407 if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2408 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2409 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2410 }
2411 /*
2412 * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2413 * RES0 if the FPU is not present, and is stored in the S bank
2414 */
2415 if (arm_feature(env, ARM_FEATURE_VFP) &&
2416 extract32(env->v7m.nsacr, 10, 1)) {
2417 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2418 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2419 }
2420 return;
2421 case 0x98: /* SP_NS */
2422 {
2423 /*
2424 * This gives the non-secure SP selected based on whether we're
2425 * currently in handler mode or not, using the NS CONTROL.SPSEL.
2426 */
2427 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2428 bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2429 uint32_t limit;
2430
2431 if (!env->v7m.secure) {
2432 return;
2433 }
2434
2435 limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2436
2437 if (val < limit) {
2438 CPUState *cs = env_cpu(env);
2439
2440 cpu_restore_state(cs, GETPC(), true);
2441 raise_exception(env, EXCP_STKOF, 0, 1);
2442 }
2443
2444 if (is_psp) {
2445 env->v7m.other_ss_psp = val;
2446 } else {
2447 env->v7m.other_ss_msp = val;
2448 }
2449 return;
2450 }
2451 default:
2452 break;
2453 }
2454 }
2455
2456 switch (reg) {
2457 case 0 ... 7: /* xPSR sub-fields */
2458 /* only APSR is actually writable */
2459 if (!(reg & 4)) {
2460 uint32_t apsrmask = 0;
2461
2462 if (mask & 8) {
2463 apsrmask |= XPSR_NZCV | XPSR_Q;
2464 }
2465 if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
2466 apsrmask |= XPSR_GE;
2467 }
2468 xpsr_write(env, val, apsrmask);
2469 }
2470 break;
2471 case 8: /* MSP */
2472 if (v7m_using_psp(env)) {
2473 env->v7m.other_sp = val;
2474 } else {
2475 env->regs[13] = val;
2476 }
2477 break;
2478 case 9: /* PSP */
2479 if (v7m_using_psp(env)) {
2480 env->regs[13] = val;
2481 } else {
2482 env->v7m.other_sp = val;
2483 }
2484 break;
2485 case 10: /* MSPLIM */
2486 if (!arm_feature(env, ARM_FEATURE_V8)) {
2487 goto bad_reg;
2488 }
2489 env->v7m.msplim[env->v7m.secure] = val & ~7;
2490 break;
2491 case 11: /* PSPLIM */
2492 if (!arm_feature(env, ARM_FEATURE_V8)) {
2493 goto bad_reg;
2494 }
2495 env->v7m.psplim[env->v7m.secure] = val & ~7;
2496 break;
2497 case 16: /* PRIMASK */
2498 env->v7m.primask[env->v7m.secure] = val & 1;
2499 break;
2500 case 17: /* BASEPRI */
2501 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2502 goto bad_reg;
2503 }
2504 env->v7m.basepri[env->v7m.secure] = val & 0xff;
2505 break;
2506 case 18: /* BASEPRI_MAX */
2507 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2508 goto bad_reg;
2509 }
2510 val &= 0xff;
2511 if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2512 || env->v7m.basepri[env->v7m.secure] == 0)) {
2513 env->v7m.basepri[env->v7m.secure] = val;
2514 }
2515 break;
2516 case 19: /* FAULTMASK */
2517 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2518 goto bad_reg;
2519 }
2520 env->v7m.faultmask[env->v7m.secure] = val & 1;
2521 break;
2522 case 20: /* CONTROL */
2523 /*
2524 * Writing to the SPSEL bit only has an effect if we are in
2525 * thread mode; other bits can be updated by any privileged code.
2526 * write_v7m_control_spsel() deals with updating the SPSEL bit in
2527 * env->v7m.control, so we only need update the others.
2528 * For v7M, we must just ignore explicit writes to SPSEL in handler
2529 * mode; for v8M the write is permitted but will have no effect.
2530 * All these bits are writes-ignored from non-privileged code,
2531 * except for SFPA.
2532 */
2533 if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2534 !arm_v7m_is_handler_mode(env))) {
2535 write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2536 }
2537 if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2538 env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2539 env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2540 }
2541 if (arm_feature(env, ARM_FEATURE_VFP)) {
2542 /*
2543 * SFPA is RAZ/WI from NS or if no FPU.
2544 * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2545 * Both are stored in the S bank.
2546 */
2547 if (env->v7m.secure) {
2548 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2549 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2550 }
2551 if (cur_el > 0 &&
2552 (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2553 extract32(env->v7m.nsacr, 10, 1))) {
2554 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2555 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2556 }
2557 }
2558 break;
2559 default:
2560 bad_reg:
2561 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2562 " register %d\n", reg);
2563 return;
2564 }
2565}
2566
2567uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2568{
2569 /* Implement the TT instruction. op is bits [7:6] of the insn. */
2570 bool forceunpriv = op & 1;
2571 bool alt = op & 2;
2572 V8M_SAttributes sattrs = {};
2573 uint32_t tt_resp;
2574 bool r, rw, nsr, nsrw, mrvalid;
2575 int prot;
2576 ARMMMUFaultInfo fi = {};
2577 MemTxAttrs attrs = {};
2578 hwaddr phys_addr;
2579 ARMMMUIdx mmu_idx;
2580 uint32_t mregion;
2581 bool targetpriv;
2582 bool targetsec = env->v7m.secure;
2583 bool is_subpage;
2584
2585 /*
2586 * Work out what the security state and privilege level we're
2587 * interested in is...
2588 */
2589 if (alt) {
2590 targetsec = !targetsec;
2591 }
2592
2593 if (forceunpriv) {
2594 targetpriv = false;
2595 } else {
2596 targetpriv = arm_v7m_is_handler_mode(env) ||
2597 !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2598 }
2599
2600 /* ...and then figure out which MMU index this is */
2601 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2602
2603 /*
2604 * We know that the MPU and SAU don't care about the access type
2605 * for our purposes beyond that we don't want to claim to be
2606 * an insn fetch, so we arbitrarily call this a read.
2607 */
2608
2609 /*
2610 * MPU region info only available for privileged or if
2611 * inspecting the other MPU state.
2612 */
2613 if (arm_current_el(env) != 0 || alt) {
2614 /* We can ignore the return value as prot is always set */
2615 pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2616 &phys_addr, &attrs, &prot, &is_subpage,
2617 &fi, &mregion);
2618 if (mregion == -1) {
2619 mrvalid = false;
2620 mregion = 0;
2621 } else {
2622 mrvalid = true;
2623 }
2624 r = prot & PAGE_READ;
2625 rw = prot & PAGE_WRITE;
2626 } else {
2627 r = false;
2628 rw = false;
2629 mrvalid = false;
2630 mregion = 0;
2631 }
2632
2633 if (env->v7m.secure) {
2634 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
2635 nsr = sattrs.ns && r;
2636 nsrw = sattrs.ns && rw;
2637 } else {
2638 sattrs.ns = true;
2639 nsr = false;
2640 nsrw = false;
2641 }
2642
2643 tt_resp = (sattrs.iregion << 24) |
2644 (sattrs.irvalid << 23) |
2645 ((!sattrs.ns) << 22) |
2646 (nsrw << 21) |
2647 (nsr << 20) |
2648 (rw << 19) |
2649 (r << 18) |
2650 (sattrs.srvalid << 17) |
2651 (mrvalid << 16) |
2652 (sattrs.sregion << 8) |
2653 mregion;
2654
2655 return tt_resp;
2656}
2657
2658#endif /* !CONFIG_USER_ONLY */
2659
2660ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
2661 bool secstate, bool priv, bool negpri)
2662{
2663 ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
2664
2665 if (priv) {
2666 mmu_idx |= ARM_MMU_IDX_M_PRIV;
2667 }
2668
2669 if (negpri) {
2670 mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
2671 }
2672
2673 if (secstate) {
2674 mmu_idx |= ARM_MMU_IDX_M_S;
2675 }
2676
2677 return mmu_idx;
2678}
2679
2680ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
2681 bool secstate, bool priv)
2682{
2683 bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
2684
2685 return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
2686}
2687
2688/* Return the MMU index for a v7M CPU in the specified security state */
2689ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
2690{
2691 bool priv = arm_current_el(env) != 0;
2692
2693 return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
2694}
This page took 0.302416 seconds and 4 git commands to generate.