]> Git Repo - qemu.git/blame - target/arm/op_helper.c
Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20171109' into staging
[qemu.git] / target / arm / op_helper.c
CommitLineData
b7bcbe95
FB
1/*
2 * ARM helper routines
5fafdf24 3 *
9ee6e8bb 4 * Copyright (c) 2005-2007 CodeSourcery, LLC
b7bcbe95
FB
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
b7bcbe95 18 */
74c21bd0 19#include "qemu/osdep.h"
c9b61d9a 20#include "qemu/log.h"
8d04fb55 21#include "qemu/main-loop.h"
3e457172 22#include "cpu.h"
2ef6175a 23#include "exec/helper-proto.h"
ccd38087 24#include "internals.h"
63c91552 25#include "exec/exec-all.h"
f08b6170 26#include "exec/cpu_ldst.h"
b7bcbe95 27
ad69471c
PB
28#define SIGNBIT (uint32_t)0x80000000
29#define SIGNBIT64 ((uint64_t)1 << 63)
30
c6328599
PM
31static void raise_exception(CPUARMState *env, uint32_t excp,
32 uint32_t syndrome, uint32_t target_el)
b7bcbe95 33{
c6328599 34 CPUState *cs = CPU(arm_env_get_cpu(env));
27103424 35
c6328599
PM
36 assert(!excp_is_internal(excp));
37 cs->exception_index = excp;
38 env->exception.syndrome = syndrome;
39 env->exception.target_el = target_el;
5638d180 40 cpu_loop_exit(cs);
b7bcbe95
FB
41}
42
e3b1d480
GB
43static int exception_target_el(CPUARMState *env)
44{
45 int target_el = MAX(1, arm_current_el(env));
46
47 /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
48 * to EL3 in this case.
49 */
50 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
51 target_el = 3;
52 }
53
54 return target_el;
55}
56
9ef39277 57uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
8f8e3aa4 58 uint32_t rn, uint32_t maxindex)
9ee6e8bb
PB
59{
60 uint32_t val;
9ee6e8bb
PB
61 uint32_t tmp;
62 int index;
63 int shift;
64 uint64_t *table;
65 table = (uint64_t *)&env->vfp.regs[rn];
66 val = 0;
9ee6e8bb 67 for (shift = 0; shift < 32; shift += 8) {
8f8e3aa4
PB
68 index = (ireg >> shift) & 0xff;
69 if (index < maxindex) {
3018f259 70 tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
9ee6e8bb
PB
71 val |= tmp << shift;
72 } else {
8f8e3aa4 73 val |= def & (0xff << shift);
9ee6e8bb
PB
74 }
75 }
8f8e3aa4 76 return val;
9ee6e8bb
PB
77}
78
b5ff1b31
FB
79#if !defined(CONFIG_USER_ONLY)
80
aaa1f954
EI
81static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
82 unsigned int target_el,
c528af7a 83 bool same_el, bool ea,
b35399bb 84 bool s1ptw, bool is_write,
aaa1f954
EI
85 int fsc)
86{
87 uint32_t syn;
88
89 /* ISV is only set for data aborts routed to EL2 and
90 * never for stage-1 page table walks faulting on stage 2.
91 *
92 * Furthermore, ISV is only set for certain kinds of load/stores.
93 * If the template syndrome does not have ISV set, we should leave
94 * it cleared.
95 *
96 * See ARMv8 specs, D7-1974:
97 * ISS encoding for an exception from a Data Abort, the
98 * ISV field.
99 */
100 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
101 syn = syn_data_abort_no_iss(same_el,
c528af7a 102 ea, 0, s1ptw, is_write, fsc);
aaa1f954
EI
103 } else {
104 /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
105 * syndrome created at translation time.
106 * Now we create the runtime syndrome with the remaining fields.
107 */
108 syn = syn_data_abort_with_iss(same_el,
109 0, 0, 0, 0, 0,
c528af7a 110 ea, 0, s1ptw, is_write, fsc,
aaa1f954
EI
111 false);
112 /* Merge the runtime syndrome with the template syndrome. */
113 syn |= template_syn;
114 }
115 return syn;
116}
117
aac43da1
PM
118static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
119 uint32_t fsr, uint32_t fsc, ARMMMUFaultInfo *fi)
120{
121 CPUARMState *env = &cpu->env;
122 int target_el;
123 bool same_el;
124 uint32_t syn, exc;
125
126 target_el = exception_target_el(env);
127 if (fi->stage2) {
128 target_el = 2;
129 env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
130 }
131 same_el = (arm_current_el(env) == target_el);
132
133 if (fsc == 0x3f) {
134 /* Caller doesn't have a long-format fault status code. This
135 * should only happen if this fault will never actually be reported
136 * to an EL that uses a syndrome register. Check that here.
137 * 0x3f is a (currently) reserved FSC code, in case the constructed
138 * syndrome does leak into the guest somehow.
139 */
140 assert(target_el != 2 && !arm_el_is_aa64(env, target_el));
141 }
142
143 if (access_type == MMU_INST_FETCH) {
c528af7a 144 syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
aac43da1
PM
145 exc = EXCP_PREFETCH_ABORT;
146 } else {
147 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
c528af7a 148 same_el, fi->ea, fi->s1ptw,
aac43da1
PM
149 access_type == MMU_DATA_STORE,
150 fsc);
151 if (access_type == MMU_DATA_STORE
152 && arm_feature(env, ARM_FEATURE_V6)) {
153 fsr |= (1 << 11);
154 }
155 exc = EXCP_DATA_ABORT;
156 }
157
158 env->exception.vaddress = addr;
159 env->exception.fsr = fsr;
160 raise_exception(env, exc, syn, target_el);
161}
162
b5ff1b31 163/* try to fill the TLB and return an exception if error. If retaddr is
d5a11fef
AF
164 * NULL, it means that the function was called in C code (i.e. not
165 * from generated code or from helper.c)
166 */
b35399bb
SS
167void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
168 int mmu_idx, uintptr_t retaddr)
b5ff1b31 169{
b7cc4e82
PC
170 bool ret;
171 uint32_t fsr = 0;
e14b5a23 172 ARMMMUFaultInfo fi = {};
b5ff1b31 173
b35399bb 174 ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fsr, &fi);
551bd27f 175 if (unlikely(ret)) {
d5a11fef 176 ARMCPU *cpu = ARM_CPU(cs);
aac43da1 177 uint32_t fsc;
d5a11fef 178
b5ff1b31
FB
179 if (retaddr) {
180 /* now we have a real cpu fault */
3f38f309 181 cpu_restore_state(cs, retaddr);
b5ff1b31 182 }
8c6084bf 183
65ed2ed9
PM
184 if (fsr & (1 << 9)) {
185 /* LPAE format fault status register : bottom 6 bits are
186 * status code in the same form as needed for syndrome
187 */
188 fsc = extract32(fsr, 0, 6);
189 } else {
190 /* Short format FSR : this fault will never actually be reported
aac43da1
PM
191 * to an EL that uses a syndrome register. Use a (currently)
192 * reserved FSR code in case the constructed syndrome does leak
193 * into the guest somehow. deliver_fault will assert that
194 * we don't target an EL using the syndrome.
65ed2ed9 195 */
65ed2ed9
PM
196 fsc = 0x3f;
197 }
8c6084bf 198
aac43da1 199 deliver_fault(cpu, addr, access_type, fsr, fsc, &fi);
b5ff1b31 200 }
b5ff1b31 201}
30901475
AB
202
203/* Raise a data fault alignment exception for the specified virtual address */
b35399bb
SS
204void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
205 MMUAccessType access_type,
206 int mmu_idx, uintptr_t retaddr)
30901475
AB
207{
208 ARMCPU *cpu = ARM_CPU(cs);
209 CPUARMState *env = &cpu->env;
aac43da1
PM
210 uint32_t fsr, fsc;
211 ARMMMUFaultInfo fi = {};
8bd5c820 212 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
30901475
AB
213
214 if (retaddr) {
215 /* now we have a real cpu fault */
216 cpu_restore_state(cs, retaddr);
217 }
218
30901475
AB
219 /* the DFSR for an alignment fault depends on whether we're using
220 * the LPAE long descriptor format, or the short descriptor format
221 */
8bd5c820 222 if (arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
aac43da1 223 fsr = (1 << 9) | 0x21;
30901475 224 } else {
aac43da1 225 fsr = 0x1;
30901475 226 }
aac43da1 227 fsc = 0x21;
30901475 228
aac43da1 229 deliver_fault(cpu, vaddr, access_type, fsr, fsc, &fi);
30901475
AB
230}
231
c79c0a31
PM
232/* arm_cpu_do_transaction_failed: handle a memory system error response
233 * (eg "no device/memory present at address") by raising an external abort
234 * exception
235 */
236void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
237 vaddr addr, unsigned size,
238 MMUAccessType access_type,
239 int mmu_idx, MemTxAttrs attrs,
240 MemTxResult response, uintptr_t retaddr)
241{
242 ARMCPU *cpu = ARM_CPU(cs);
243 CPUARMState *env = &cpu->env;
244 uint32_t fsr, fsc;
245 ARMMMUFaultInfo fi = {};
246 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
247
248 if (retaddr) {
249 /* now we have a real cpu fault */
250 cpu_restore_state(cs, retaddr);
251 }
252
253 /* The EA bit in syndromes and fault status registers is an
254 * IMPDEF classification of external aborts. ARM implementations
255 * usually use this to indicate AXI bus Decode error (0) or
256 * Slave error (1); in QEMU we follow that.
257 */
258 fi.ea = (response != MEMTX_DECODE_ERROR);
259
260 /* The fault status register format depends on whether we're using
261 * the LPAE long descriptor format, or the short descriptor format.
262 */
263 if (arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
264 /* long descriptor form, STATUS 0b010000: synchronous ext abort */
265 fsr = (fi.ea << 12) | (1 << 9) | 0x10;
266 } else {
267 /* short descriptor form, FSR 0b01000 : synchronous ext abort */
268 fsr = (fi.ea << 12) | 0x8;
269 }
270 fsc = 0x10;
271
272 deliver_fault(cpu, addr, access_type, fsr, fsc, &fi);
273}
274
30901475 275#endif /* !defined(CONFIG_USER_ONLY) */
1497c961 276
9ef39277 277uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
278{
279 uint32_t res = a + b;
280 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
281 env->QF = 1;
282 return res;
283}
284
9ef39277 285uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
286{
287 uint32_t res = a + b;
288 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
289 env->QF = 1;
290 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
291 }
292 return res;
293}
294
9ef39277 295uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
296{
297 uint32_t res = a - b;
298 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
299 env->QF = 1;
300 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
301 }
302 return res;
303}
304
9ef39277 305uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
1497c961
PB
306{
307 uint32_t res;
308 if (val >= 0x40000000) {
309 res = ~SIGNBIT;
310 env->QF = 1;
311 } else if (val <= (int32_t)0xc0000000) {
312 res = SIGNBIT;
313 env->QF = 1;
314 } else {
315 res = val << 1;
316 }
317 return res;
318}
319
9ef39277 320uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
321{
322 uint32_t res = a + b;
323 if (res < a) {
324 env->QF = 1;
325 res = ~0;
326 }
327 return res;
328}
329
9ef39277 330uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
1497c961
PB
331{
332 uint32_t res = a - b;
333 if (res > a) {
334 env->QF = 1;
335 res = 0;
336 }
337 return res;
338}
339
6ddbc6e4 340/* Signed saturation. */
9ef39277 341static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
6ddbc6e4
PB
342{
343 int32_t top;
344 uint32_t mask;
345
6ddbc6e4
PB
346 top = val >> shift;
347 mask = (1u << shift) - 1;
348 if (top > 0) {
349 env->QF = 1;
350 return mask;
351 } else if (top < -1) {
352 env->QF = 1;
353 return ~mask;
354 }
355 return val;
356}
357
358/* Unsigned saturation. */
9ef39277 359static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
6ddbc6e4
PB
360{
361 uint32_t max;
362
6ddbc6e4
PB
363 max = (1u << shift) - 1;
364 if (val < 0) {
365 env->QF = 1;
366 return 0;
367 } else if (val > max) {
368 env->QF = 1;
369 return max;
370 }
371 return val;
372}
373
374/* Signed saturate. */
9ef39277 375uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4 376{
9ef39277 377 return do_ssat(env, x, shift);
6ddbc6e4
PB
378}
379
380/* Dual halfword signed saturate. */
9ef39277 381uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4
PB
382{
383 uint32_t res;
384
9ef39277
BS
385 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
386 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
6ddbc6e4
PB
387 return res;
388}
389
390/* Unsigned saturate. */
9ef39277 391uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4 392{
9ef39277 393 return do_usat(env, x, shift);
6ddbc6e4
PB
394}
395
396/* Dual halfword unsigned saturate. */
9ef39277 397uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
6ddbc6e4
PB
398{
399 uint32_t res;
400
9ef39277
BS
401 res = (uint16_t)do_usat(env, (int16_t)x, shift);
402 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
6ddbc6e4
PB
403 return res;
404}
d9ba4830 405
9886ecdf
PB
406void HELPER(setend)(CPUARMState *env)
407{
408 env->uncached_cpsr ^= CPSR_E;
409}
410
b1eced71
GB
411/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
412 * The function returns the target EL (1-3) if the instruction is to be trapped;
413 * otherwise it returns 0 indicating it is not trapped.
414 */
415static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
416{
417 int cur_el = arm_current_el(env);
418 uint64_t mask;
419
0e284568
PM
420 if (arm_feature(env, ARM_FEATURE_M)) {
421 /* M profile cores can never trap WFI/WFE. */
422 return 0;
423 }
424
b1eced71
GB
425 /* If we are currently in EL0 then we need to check if SCTLR is set up for
426 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
427 */
428 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
429 int target_el;
430
431 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
432 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
433 /* Secure EL0 and Secure PL1 is at EL3 */
434 target_el = 3;
435 } else {
436 target_el = 1;
437 }
438
439 if (!(env->cp15.sctlr_el[target_el] & mask)) {
440 return target_el;
441 }
442 }
443
444 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
445 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
446 * bits will be zero indicating no trap.
447 */
448 if (cur_el < 2 && !arm_is_secure(env)) {
449 mask = (is_wfe) ? HCR_TWE : HCR_TWI;
450 if (env->cp15.hcr_el2 & mask) {
451 return 2;
452 }
453 }
454
455 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
456 if (cur_el < 3) {
457 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
458 if (env->cp15.scr_el3 & mask) {
459 return 3;
460 }
461 }
462
463 return 0;
464}
465
58803318 466void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
d9ba4830 467{
259186a7 468 CPUState *cs = CPU(arm_env_get_cpu(env));
b1eced71 469 int target_el = check_wfx_trap(env, false);
259186a7 470
84549b6d
PM
471 if (cpu_has_work(cs)) {
472 /* Don't bother to go into our "low power state" if
473 * we would just wake up immediately.
474 */
475 return;
476 }
477
b1eced71 478 if (target_el) {
58803318
SS
479 env->pc -= insn_len;
480 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
481 target_el);
b1eced71
GB
482 }
483
27103424 484 cs->exception_index = EXCP_HLT;
259186a7 485 cs->halted = 1;
5638d180 486 cpu_loop_exit(cs);
d9ba4830
PB
487}
488
72c1d3af
PM
489void HELPER(wfe)(CPUARMState *env)
490{
049e24a1
PM
491 /* This is a hint instruction that is semantically different
492 * from YIELD even though we currently implement it identically.
493 * Don't actually halt the CPU, just yield back to top
b1eced71
GB
494 * level loop. This is not going into a "low power state"
495 * (ie halting until some event occurs), so we never take
496 * a configurable trap to a different exception level.
72c1d3af 497 */
049e24a1
PM
498 HELPER(yield)(env);
499}
500
501void HELPER(yield)(CPUARMState *env)
502{
503 ARMCPU *cpu = arm_env_get_cpu(env);
504 CPUState *cs = CPU(cpu);
505
506 /* This is a non-trappable hint instruction that generally indicates
507 * that the guest is currently busy-looping. Yield control back to the
508 * top level loop so that a more deserving VCPU has a chance to run.
509 */
27103424 510 cs->exception_index = EXCP_YIELD;
5638d180 511 cpu_loop_exit(cs);
72c1d3af
PM
512}
513
d4a2dc67
PM
514/* Raise an internal-to-QEMU exception. This is limited to only
515 * those EXCP values which are special cases for QEMU to interrupt
516 * execution and not to be used for exceptions which are passed to
517 * the guest (those must all have syndrome information and thus should
518 * use exception_with_syndrome).
519 */
520void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
521{
522 CPUState *cs = CPU(arm_env_get_cpu(env));
523
524 assert(excp_is_internal(excp));
525 cs->exception_index = excp;
526 cpu_loop_exit(cs);
527}
528
529/* Raise an exception with the specified syndrome register value */
530void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
73710361 531 uint32_t syndrome, uint32_t target_el)
d9ba4830 532{
c6328599 533 raise_exception(env, excp, syndrome, target_el);
d9ba4830
PB
534}
535
9ef39277 536uint32_t HELPER(cpsr_read)(CPUARMState *env)
d9ba4830 537{
4051e12c 538 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
d9ba4830
PB
539}
540
1ce94f81 541void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
d9ba4830 542{
50866ba5 543 cpsr_write(env, val, mask, CPSRWriteByInstr);
d9ba4830 544}
b0109805 545
235ea1f5
PM
546/* Write the CPSR for a 32-bit exception return */
547void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
548{
50866ba5 549 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
bd7d00fc 550
fb0e8e79
PM
551 /* Generated code has already stored the new PC value, but
552 * without masking out its low bits, because which bits need
553 * masking depends on whether we're returning to Thumb or ARM
554 * state. Do the masking now.
555 */
556 env->regs[15] &= (env->thumb ? ~1 : ~3);
557
8d04fb55 558 qemu_mutex_lock_iothread();
bd7d00fc 559 arm_call_el_change_hook(arm_env_get_cpu(env));
8d04fb55 560 qemu_mutex_unlock_iothread();
235ea1f5
PM
561}
562
b0109805 563/* Access to user mode registers from privileged modes. */
9ef39277 564uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
b0109805
PB
565{
566 uint32_t val;
567
568 if (regno == 13) {
99a99c1f 569 val = env->banked_r13[BANK_USRSYS];
b0109805 570 } else if (regno == 14) {
99a99c1f 571 val = env->banked_r14[BANK_USRSYS];
b0109805
PB
572 } else if (regno >= 8
573 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
574 val = env->usr_regs[regno - 8];
575 } else {
576 val = env->regs[regno];
577 }
578 return val;
579}
580
1ce94f81 581void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
b0109805
PB
582{
583 if (regno == 13) {
99a99c1f 584 env->banked_r13[BANK_USRSYS] = val;
b0109805 585 } else if (regno == 14) {
99a99c1f 586 env->banked_r14[BANK_USRSYS] = val;
b0109805
PB
587 } else if (regno >= 8
588 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
589 env->usr_regs[regno - 8] = val;
590 } else {
591 env->regs[regno] = val;
592 }
593}
4b6a83fb 594
72309cee
PM
595void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
596{
597 if ((env->uncached_cpsr & CPSR_M) == mode) {
598 env->regs[13] = val;
599 } else {
600 env->banked_r13[bank_number(mode)] = val;
601 }
602}
603
604uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
605{
f01377f5
PM
606 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
607 /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
608 * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
609 */
610 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
611 exception_target_el(env));
612 }
613
72309cee
PM
614 if ((env->uncached_cpsr & CPSR_M) == mode) {
615 return env->regs[13];
616 } else {
617 return env->banked_r13[bank_number(mode)];
618 }
619}
72309cee 620
8bfd0550
PM
621static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
622 uint32_t regno)
623{
624 /* Raise an exception if the requested access is one of the UNPREDICTABLE
625 * cases; otherwise return. This broadly corresponds to the pseudocode
626 * BankedRegisterAccessValid() and SPSRAccessValid(),
627 * except that we have already handled some cases at translate time.
628 */
629 int curmode = env->uncached_cpsr & CPSR_M;
630
631 if (curmode == tgtmode) {
632 goto undef;
633 }
634
635 if (tgtmode == ARM_CPU_MODE_USR) {
636 switch (regno) {
637 case 8 ... 12:
638 if (curmode != ARM_CPU_MODE_FIQ) {
639 goto undef;
640 }
641 break;
642 case 13:
643 if (curmode == ARM_CPU_MODE_SYS) {
644 goto undef;
645 }
646 break;
647 case 14:
648 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
649 goto undef;
650 }
651 break;
652 default:
653 break;
654 }
655 }
656
657 if (tgtmode == ARM_CPU_MODE_HYP) {
658 switch (regno) {
659 case 17: /* ELR_Hyp */
660 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
661 goto undef;
662 }
663 break;
664 default:
665 if (curmode != ARM_CPU_MODE_MON) {
666 goto undef;
667 }
668 break;
669 }
670 }
671
672 return;
673
674undef:
675 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
676 exception_target_el(env));
677}
678
679void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
680 uint32_t regno)
681{
682 msr_mrs_banked_exc_checks(env, tgtmode, regno);
683
684 switch (regno) {
685 case 16: /* SPSRs */
686 env->banked_spsr[bank_number(tgtmode)] = value;
687 break;
688 case 17: /* ELR_Hyp */
689 env->elr_el[2] = value;
690 break;
691 case 13:
692 env->banked_r13[bank_number(tgtmode)] = value;
693 break;
694 case 14:
695 env->banked_r14[bank_number(tgtmode)] = value;
696 break;
697 case 8 ... 12:
698 switch (tgtmode) {
699 case ARM_CPU_MODE_USR:
700 env->usr_regs[regno - 8] = value;
701 break;
702 case ARM_CPU_MODE_FIQ:
703 env->fiq_regs[regno - 8] = value;
704 break;
705 default:
706 g_assert_not_reached();
707 }
708 break;
709 default:
710 g_assert_not_reached();
711 }
712}
713
714uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
715{
716 msr_mrs_banked_exc_checks(env, tgtmode, regno);
717
718 switch (regno) {
719 case 16: /* SPSRs */
720 return env->banked_spsr[bank_number(tgtmode)];
721 case 17: /* ELR_Hyp */
722 return env->elr_el[2];
723 case 13:
724 return env->banked_r13[bank_number(tgtmode)];
725 case 14:
726 return env->banked_r14[bank_number(tgtmode)];
727 case 8 ... 12:
728 switch (tgtmode) {
729 case ARM_CPU_MODE_USR:
730 return env->usr_regs[regno - 8];
731 case ARM_CPU_MODE_FIQ:
732 return env->fiq_regs[regno - 8];
733 default:
734 g_assert_not_reached();
735 }
736 default:
737 g_assert_not_reached();
738 }
739}
740
3f208fd7
PM
741void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
742 uint32_t isread)
f59df3f2
PM
743{
744 const ARMCPRegInfo *ri = rip;
38836a2c 745 int target_el;
c0f4af17
PM
746
747 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
748 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
c6328599 749 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
c0f4af17
PM
750 }
751
752 if (!ri->accessfn) {
753 return;
754 }
755
3f208fd7 756 switch (ri->accessfn(env, ri, isread)) {
f59df3f2
PM
757 case CP_ACCESS_OK:
758 return;
759 case CP_ACCESS_TRAP:
38836a2c
PM
760 target_el = exception_target_el(env);
761 break;
762 case CP_ACCESS_TRAP_EL2:
763 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
764 * a bug in the access function.
765 */
3fc827d5 766 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
38836a2c
PM
767 target_el = 2;
768 break;
769 case CP_ACCESS_TRAP_EL3:
770 target_el = 3;
8bcbf37c 771 break;
f59df3f2 772 case CP_ACCESS_TRAP_UNCATEGORIZED:
38836a2c 773 target_el = exception_target_el(env);
c6328599 774 syndrome = syn_uncategorized();
f59df3f2 775 break;
e7615726
PM
776 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
777 target_el = 2;
778 syndrome = syn_uncategorized();
779 break;
780 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
781 target_el = 3;
782 syndrome = syn_uncategorized();
783 break;
f2cae609
PM
784 case CP_ACCESS_TRAP_FP_EL2:
785 target_el = 2;
786 /* Since we are an implementation that takes exceptions on a trapped
787 * conditional insn only if the insn has passed its condition code
788 * check, we take the IMPDEF choice to always report CV=1 COND=0xe
789 * (which is also the required value for AArch64 traps).
790 */
791 syndrome = syn_fp_access_trap(1, 0xe, false);
792 break;
793 case CP_ACCESS_TRAP_FP_EL3:
794 target_el = 3;
795 syndrome = syn_fp_access_trap(1, 0xe, false);
796 break;
f59df3f2
PM
797 default:
798 g_assert_not_reached();
799 }
c6328599 800
38836a2c 801 raise_exception(env, EXCP_UDEF, syndrome, target_el);
f59df3f2
PM
802}
803
4b6a83fb
PM
804void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
805{
806 const ARMCPRegInfo *ri = rip;
c4241c7d 807
8d04fb55
JK
808 if (ri->type & ARM_CP_IO) {
809 qemu_mutex_lock_iothread();
810 ri->writefn(env, ri, value);
811 qemu_mutex_unlock_iothread();
812 } else {
813 ri->writefn(env, ri, value);
814 }
4b6a83fb
PM
815}
816
817uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
818{
819 const ARMCPRegInfo *ri = rip;
8d04fb55 820 uint32_t res;
c4241c7d 821
8d04fb55
JK
822 if (ri->type & ARM_CP_IO) {
823 qemu_mutex_lock_iothread();
824 res = ri->readfn(env, ri);
825 qemu_mutex_unlock_iothread();
826 } else {
827 res = ri->readfn(env, ri);
828 }
829
830 return res;
4b6a83fb
PM
831}
832
833void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
834{
835 const ARMCPRegInfo *ri = rip;
c4241c7d 836
8d04fb55
JK
837 if (ri->type & ARM_CP_IO) {
838 qemu_mutex_lock_iothread();
839 ri->writefn(env, ri, value);
840 qemu_mutex_unlock_iothread();
841 } else {
842 ri->writefn(env, ri, value);
843 }
4b6a83fb
PM
844}
845
846uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
847{
848 const ARMCPRegInfo *ri = rip;
8d04fb55
JK
849 uint64_t res;
850
851 if (ri->type & ARM_CP_IO) {
852 qemu_mutex_lock_iothread();
853 res = ri->readfn(env, ri);
854 qemu_mutex_unlock_iothread();
855 } else {
856 res = ri->readfn(env, ri);
857 }
c4241c7d 858
8d04fb55 859 return res;
4b6a83fb 860}
b0109805 861
9cfa0b4e
PM
862void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
863{
864 /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
865 * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
866 * to catch that case at translate time.
867 */
137feaa9 868 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
c6328599
PM
869 uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
870 extract32(op, 3, 3), 4,
871 imm, 0x1f, 0);
872 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
9cfa0b4e
PM
873 }
874
875 switch (op) {
876 case 0x05: /* SPSel */
f502cfc2 877 update_spsel(env, imm);
9cfa0b4e
PM
878 break;
879 case 0x1e: /* DAIFSet */
880 env->daif |= (imm << 6) & PSTATE_DAIF;
881 break;
882 case 0x1f: /* DAIFClear */
883 env->daif &= ~((imm << 6) & PSTATE_DAIF);
884 break;
885 default:
886 g_assert_not_reached();
887 }
888}
889
7ea47fe7
PM
890void HELPER(clear_pstate_ss)(CPUARMState *env)
891{
892 env->pstate &= ~PSTATE_SS;
893}
894
35979d71
EI
895void HELPER(pre_hvc)(CPUARMState *env)
896{
98128601 897 ARMCPU *cpu = arm_env_get_cpu(env);
dcbff19b 898 int cur_el = arm_current_el(env);
35979d71
EI
899 /* FIXME: Use actual secure state. */
900 bool secure = false;
901 bool undef;
902
98128601
RH
903 if (arm_is_psci_call(cpu, EXCP_HVC)) {
904 /* If PSCI is enabled and this looks like a valid PSCI call then
905 * that overrides the architecturally mandated HVC behaviour.
906 */
907 return;
908 }
909
39404338
PM
910 if (!arm_feature(env, ARM_FEATURE_EL2)) {
911 /* If EL2 doesn't exist, HVC always UNDEFs */
912 undef = true;
913 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
914 /* EL3.HCE has priority over EL2.HCD. */
35979d71
EI
915 undef = !(env->cp15.scr_el3 & SCR_HCE);
916 } else {
917 undef = env->cp15.hcr_el2 & HCR_HCD;
918 }
919
920 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
921 * For ARMv8/AArch64, HVC is allowed in EL3.
922 * Note that we've already trapped HVC from EL0 at translation
923 * time.
924 */
925 if (secure && (!is_a64(env) || cur_el == 1)) {
926 undef = true;
927 }
928
929 if (undef) {
c6328599
PM
930 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
931 exception_target_el(env));
35979d71
EI
932 }
933}
934
e0d6e6a5
EI
935void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
936{
98128601 937 ARMCPU *cpu = arm_env_get_cpu(env);
dcbff19b 938 int cur_el = arm_current_el(env);
dbe9d163 939 bool secure = arm_is_secure(env);
e0d6e6a5 940 bool smd = env->cp15.scr_el3 & SCR_SMD;
f096e92b
PM
941 /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
942 * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
943 * extensions, SMD only applies to NS state.
944 * On ARMv7 without the Virtualization extensions, the SMD bit
945 * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
946 * so we need not special case this here.
e0d6e6a5 947 */
f096e92b 948 bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
e0d6e6a5 949
77077a83
JK
950 if (!arm_feature(env, ARM_FEATURE_EL3) &&
951 cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
952 /* If we have no EL3 then SMC always UNDEFs and can't be
953 * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
954 * firmware within QEMU, and we want an EL2 guest to be able
955 * to forbid its EL1 from making PSCI calls into QEMU's
956 * "firmware" via HCR.TSC, so for these purposes treat
957 * PSCI-via-SMC as implying an EL3.
98128601 958 */
39404338
PM
959 undef = true;
960 } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
77077a83
JK
961 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
962 * We also want an EL2 guest to be able to forbid its EL1 from
963 * making PSCI calls into QEMU's "firmware" via HCR.TSC.
964 */
c6328599 965 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
e0d6e6a5
EI
966 }
967
77077a83
JK
968 /* If PSCI is enabled and this looks like a valid PSCI call then
969 * suppress the UNDEF -- we'll catch the SMC exception and
970 * implement the PSCI call behaviour there.
971 */
972 if (undef && !arm_is_psci_call(cpu, EXCP_SMC)) {
c6328599
PM
973 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
974 exception_target_el(env));
e0d6e6a5
EI
975 }
976}
977
3809951b
PM
978static int el_from_spsr(uint32_t spsr)
979{
980 /* Return the exception level that this SPSR is requesting a return to,
981 * or -1 if it is invalid (an illegal return)
982 */
983 if (spsr & PSTATE_nRW) {
984 switch (spsr & CPSR_M) {
985 case ARM_CPU_MODE_USR:
986 return 0;
987 case ARM_CPU_MODE_HYP:
988 return 2;
989 case ARM_CPU_MODE_FIQ:
990 case ARM_CPU_MODE_IRQ:
991 case ARM_CPU_MODE_SVC:
992 case ARM_CPU_MODE_ABT:
993 case ARM_CPU_MODE_UND:
994 case ARM_CPU_MODE_SYS:
995 return 1;
996 case ARM_CPU_MODE_MON:
997 /* Returning to Mon from AArch64 is never possible,
998 * so this is an illegal return.
999 */
1000 default:
1001 return -1;
1002 }
1003 } else {
1004 if (extract32(spsr, 1, 1)) {
1005 /* Return with reserved M[1] bit set */
1006 return -1;
1007 }
1008 if (extract32(spsr, 0, 4) == 1) {
1009 /* return to EL0 with M[0] bit set */
1010 return -1;
1011 }
1012 return extract32(spsr, 2, 2);
1013 }
1014}
1015
52e60cdd
RH
1016void HELPER(exception_return)(CPUARMState *env)
1017{
dcbff19b 1018 int cur_el = arm_current_el(env);
db6c3cd0 1019 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
2a923c4d 1020 uint32_t spsr = env->banked_spsr[spsr_idx];
ce02049d 1021 int new_el;
3809951b 1022 bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
52e60cdd 1023
9208b961 1024 aarch64_save_sp(env, cur_el);
52e60cdd 1025
dc3c4c14 1026 arm_clear_exclusive(env);
52e60cdd 1027
3a298203
PM
1028 /* We must squash the PSTATE.SS bit to zero unless both of the
1029 * following hold:
1030 * 1. debug exceptions are currently disabled
1031 * 2. singlestep will be active in the EL we return to
1032 * We check 1 here and 2 after we've done the pstate/cpsr write() to
1033 * transition to the EL we're going to.
1034 */
1035 if (arm_generate_debug_exceptions(env)) {
1036 spsr &= ~PSTATE_SS;
1037 }
1038
3809951b
PM
1039 new_el = el_from_spsr(spsr);
1040 if (new_el == -1) {
1041 goto illegal_return;
1042 }
1043 if (new_el > cur_el
1044 || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
1045 /* Disallow return to an EL which is unimplemented or higher
1046 * than the current one.
1047 */
1048 goto illegal_return;
1049 }
1050
1051 if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
1052 /* Return to an EL which is configured for a different register width */
1053 goto illegal_return;
1054 }
1055
e393f339
PM
1056 if (new_el == 2 && arm_is_secure_below_el3(env)) {
1057 /* Return to the non-existent secure-EL2 */
1058 goto illegal_return;
1059 }
1060
1061 if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
1062 && !arm_is_secure_below_el3(env)) {
1063 goto illegal_return;
1064 }
1065
3809951b 1066 if (!return_to_aa64) {
52e60cdd 1067 env->aarch64 = 0;
f8c88bbc
PM
1068 /* We do a raw CPSR write because aarch64_sync_64_to_32()
1069 * will sort the register banks out for us, and we've already
1070 * caught all the bad-mode cases in el_from_spsr().
1071 */
50866ba5 1072 cpsr_write(env, spsr, ~0, CPSRWriteRaw);
3a298203
PM
1073 if (!arm_singlestep_active(env)) {
1074 env->uncached_cpsr &= ~PSTATE_SS;
1075 }
ce02049d 1076 aarch64_sync_64_to_32(env);
52e60cdd 1077
c1e03714
PM
1078 if (spsr & CPSR_T) {
1079 env->regs[15] = env->elr_el[cur_el] & ~0x1;
1080 } else {
1081 env->regs[15] = env->elr_el[cur_el] & ~0x3;
1082 }
c9b61d9a
PM
1083 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1084 "AArch32 EL%d PC 0x%" PRIx32 "\n",
1085 cur_el, new_el, env->regs[15]);
52e60cdd 1086 } else {
52e60cdd
RH
1087 env->aarch64 = 1;
1088 pstate_write(env, spsr);
3a298203
PM
1089 if (!arm_singlestep_active(env)) {
1090 env->pstate &= ~PSTATE_SS;
1091 }
98ea5615 1092 aarch64_restore_sp(env, new_el);
db6c3cd0 1093 env->pc = env->elr_el[cur_el];
c9b61d9a
PM
1094 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1095 "AArch64 EL%d PC 0x%" PRIx64 "\n",
1096 cur_el, new_el, env->pc);
52e60cdd
RH
1097 }
1098
8d04fb55 1099 qemu_mutex_lock_iothread();
bd7d00fc 1100 arm_call_el_change_hook(arm_env_get_cpu(env));
8d04fb55 1101 qemu_mutex_unlock_iothread();
bd7d00fc 1102
52e60cdd
RH
1103 return;
1104
1105illegal_return:
1106 /* Illegal return events of various kinds have architecturally
1107 * mandated behaviour:
1108 * restore NZCV and DAIF from SPSR_ELx
1109 * set PSTATE.IL
1110 * restore PC from ELR_ELx
1111 * no change to exception level, execution state or stack pointer
1112 */
1113 env->pstate |= PSTATE_IL;
db6c3cd0 1114 env->pc = env->elr_el[cur_el];
52e60cdd
RH
1115 spsr &= PSTATE_NZCV | PSTATE_DAIF;
1116 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
1117 pstate_write(env, spsr);
3a298203
PM
1118 if (!arm_singlestep_active(env)) {
1119 env->pstate &= ~PSTATE_SS;
1120 }
c9b61d9a
PM
1121 qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
1122 "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
52e60cdd
RH
1123}
1124
3ff6fc91
PM
1125/* Return true if the linked breakpoint entry lbn passes its checks */
1126static bool linked_bp_matches(ARMCPU *cpu, int lbn)
1127{
1128 CPUARMState *env = &cpu->env;
1129 uint64_t bcr = env->cp15.dbgbcr[lbn];
1130 int brps = extract32(cpu->dbgdidr, 24, 4);
1131 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
1132 int bt;
1133 uint32_t contextidr;
1134
1135 /* Links to unimplemented or non-context aware breakpoints are
1136 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
1137 * as if linked to an UNKNOWN context-aware breakpoint (in which
1138 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
1139 * We choose the former.
1140 */
1141 if (lbn > brps || lbn < (brps - ctx_cmps)) {
1142 return false;
1143 }
1144
1145 bcr = env->cp15.dbgbcr[lbn];
1146
1147 if (extract64(bcr, 0, 1) == 0) {
1148 /* Linked breakpoint disabled : generate no events */
1149 return false;
1150 }
1151
1152 bt = extract64(bcr, 20, 4);
1153
1154 /* We match the whole register even if this is AArch32 using the
1155 * short descriptor format (in which case it holds both PROCID and ASID),
1156 * since we don't implement the optional v7 context ID masking.
1157 */
54bf36ed 1158 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
3ff6fc91
PM
1159
1160 switch (bt) {
1161 case 3: /* linked context ID match */
dcbff19b 1162 if (arm_current_el(env) > 1) {
3ff6fc91
PM
1163 /* Context matches never fire in EL2 or (AArch64) EL3 */
1164 return false;
1165 }
1166 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1167 case 5: /* linked address mismatch (reserved in AArch64) */
1168 case 9: /* linked VMID match (reserved if no EL2) */
1169 case 11: /* linked context ID and VMID match (reserved if no EL2) */
1170 default:
1171 /* Links to Unlinked context breakpoints must generate no
1172 * events; we choose to do the same for reserved values too.
1173 */
1174 return false;
1175 }
1176
1177 return false;
1178}
1179
0eacea70 1180static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
3ff6fc91
PM
1181{
1182 CPUARMState *env = &cpu->env;
0eacea70 1183 uint64_t cr;
3ff6fc91 1184 int pac, hmc, ssc, wt, lbn;
ef7bab8d
PM
1185 /* Note that for watchpoints the check is against the CPU security
1186 * state, not the S/NS attribute on the offending data access.
1187 */
1188 bool is_secure = arm_is_secure(env);
9e1fc5bd 1189 int access_el = arm_current_el(env);
3ff6fc91 1190
0eacea70 1191 if (is_wp) {
9e1fc5bd
PM
1192 CPUWatchpoint *wp = env->cpu_watchpoint[n];
1193
1194 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
0eacea70
PM
1195 return false;
1196 }
1197 cr = env->cp15.dbgwcr[n];
9e1fc5bd
PM
1198 if (wp->hitattrs.user) {
1199 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1200 * match watchpoints as if they were accesses done at EL0, even if
1201 * the CPU is at EL1 or higher.
1202 */
1203 access_el = 0;
1204 }
0eacea70
PM
1205 } else {
1206 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
3ff6fc91 1207
0eacea70
PM
1208 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1209 return false;
1210 }
1211 cr = env->cp15.dbgbcr[n];
1212 }
3ff6fc91 1213 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
0eacea70
PM
1214 * enabled and that the address and access type match; for breakpoints
1215 * we know the address matched; check the remaining fields, including
1216 * linked breakpoints. We rely on WCR and BCR having the same layout
1217 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1218 * Note that some combinations of {PAC, HMC, SSC} are reserved and
3ff6fc91
PM
1219 * must act either like some valid combination or as if the watchpoint
1220 * were disabled. We choose the former, and use this together with
1221 * the fact that EL3 must always be Secure and EL2 must always be
1222 * Non-Secure to simplify the code slightly compared to the full
1223 * table in the ARM ARM.
1224 */
0eacea70
PM
1225 pac = extract64(cr, 1, 2);
1226 hmc = extract64(cr, 13, 1);
1227 ssc = extract64(cr, 14, 2);
3ff6fc91
PM
1228
1229 switch (ssc) {
1230 case 0:
1231 break;
1232 case 1:
1233 case 3:
1234 if (is_secure) {
1235 return false;
1236 }
1237 break;
1238 case 2:
1239 if (!is_secure) {
1240 return false;
1241 }
1242 break;
1243 }
1244
9e1fc5bd 1245 switch (access_el) {
3ff6fc91
PM
1246 case 3:
1247 case 2:
1248 if (!hmc) {
1249 return false;
1250 }
1251 break;
1252 case 1:
1253 if (extract32(pac, 0, 1) == 0) {
1254 return false;
1255 }
1256 break;
1257 case 0:
1258 if (extract32(pac, 1, 1) == 0) {
1259 return false;
1260 }
1261 break;
1262 default:
1263 g_assert_not_reached();
1264 }
1265
0eacea70
PM
1266 wt = extract64(cr, 20, 1);
1267 lbn = extract64(cr, 16, 4);
3ff6fc91
PM
1268
1269 if (wt && !linked_bp_matches(cpu, lbn)) {
1270 return false;
1271 }
1272
1273 return true;
1274}
1275
1276static bool check_watchpoints(ARMCPU *cpu)
1277{
1278 CPUARMState *env = &cpu->env;
1279 int n;
1280
1281 /* If watchpoints are disabled globally or we can't take debug
1282 * exceptions here then watchpoint firings are ignored.
1283 */
1284 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1285 || !arm_generate_debug_exceptions(env)) {
1286 return false;
1287 }
1288
1289 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
0eacea70
PM
1290 if (bp_wp_matches(cpu, n, true)) {
1291 return true;
1292 }
1293 }
1294 return false;
1295}
1296
1297static bool check_breakpoints(ARMCPU *cpu)
1298{
1299 CPUARMState *env = &cpu->env;
1300 int n;
1301
1302 /* If breakpoints are disabled globally or we can't take debug
1303 * exceptions here then breakpoint firings are ignored.
1304 */
1305 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1306 || !arm_generate_debug_exceptions(env)) {
1307 return false;
1308 }
1309
1310 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1311 if (bp_wp_matches(cpu, n, false)) {
3ff6fc91
PM
1312 return true;
1313 }
1314 }
1315 return false;
1316}
1317
5d98bf8f
SF
1318void HELPER(check_breakpoints)(CPUARMState *env)
1319{
1320 ARMCPU *cpu = arm_env_get_cpu(env);
1321
1322 if (check_breakpoints(cpu)) {
1323 HELPER(exception_internal(env, EXCP_DEBUG));
1324 }
1325}
1326
3826121d
SF
1327bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1328{
1329 /* Called by core code when a CPU watchpoint fires; need to check if this
1330 * is also an architectural watchpoint match.
1331 */
1332 ARMCPU *cpu = ARM_CPU(cs);
1333
1334 return check_watchpoints(cpu);
1335}
1336
40612000
JB
1337vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
1338{
1339 ARMCPU *cpu = ARM_CPU(cs);
1340 CPUARMState *env = &cpu->env;
1341
1342 /* In BE32 system mode, target memory is stored byteswapped (on a
1343 * little-endian host system), and by the time we reach here (via an
1344 * opcode helper) the addresses of subword accesses have been adjusted
1345 * to account for that, which means that watchpoints will not match.
1346 * Undo the adjustment here.
1347 */
1348 if (arm_sctlr_b(env)) {
1349 if (len == 1) {
1350 addr ^= 3;
1351 } else if (len == 2) {
1352 addr ^= 2;
1353 }
1354 }
1355
1356 return addr;
1357}
1358
3ff6fc91
PM
1359void arm_debug_excp_handler(CPUState *cs)
1360{
1361 /* Called by core code when a watchpoint or breakpoint fires;
1362 * need to check which one and raise the appropriate exception.
1363 */
1364 ARMCPU *cpu = ARM_CPU(cs);
1365 CPUARMState *env = &cpu->env;
1366 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1367
1368 if (wp_hit) {
1369 if (wp_hit->flags & BP_CPU) {
3826121d
SF
1370 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1371 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1372
3ff6fc91 1373 cs->watchpoint_hit = NULL;
3826121d
SF
1374
1375 if (extended_addresses_enabled(env)) {
1376 env->exception.fsr = (1 << 9) | 0x22;
3ff6fc91 1377 } else {
3826121d 1378 env->exception.fsr = 0x2;
3ff6fc91 1379 }
3826121d
SF
1380 env->exception.vaddress = wp_hit->hitaddr;
1381 raise_exception(env, EXCP_DATA_ABORT,
1382 syn_watchpoint(same_el, 0, wnr),
1383 arm_debug_target_el(env));
3ff6fc91 1384 }
0eacea70 1385 } else {
e63a2d4d 1386 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
5d98bf8f 1387 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
e63a2d4d 1388
5c629f4f
SF
1389 /* (1) GDB breakpoints should be handled first.
1390 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1391 * since singlestep is also done by generating a debug internal
1392 * exception.
1393 */
1394 if (cpu_breakpoint_test(cs, pc, BP_GDB)
1395 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
e63a2d4d
SF
1396 return;
1397 }
1398
5d98bf8f
SF
1399 if (extended_addresses_enabled(env)) {
1400 env->exception.fsr = (1 << 9) | 0x22;
1401 } else {
1402 env->exception.fsr = 0x2;
0eacea70 1403 }
5d98bf8f
SF
1404 /* FAR is UNKNOWN, so doesn't need setting */
1405 raise_exception(env, EXCP_PREFETCH_ABORT,
1406 syn_breakpoint(same_el),
1407 arm_debug_target_el(env));
3ff6fc91
PM
1408 }
1409}
1410
8984bd2e
PB
1411/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1412 The only way to do that in TCG is a conditional branch, which clobbers
1413 all our temporaries. For now implement these as helper functions. */
1414
8984bd2e
PB
1415/* Similarly for variable shift instructions. */
1416
9ef39277 1417uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
1418{
1419 int shift = i & 0xff;
1420 if (shift >= 32) {
1421 if (shift == 32)
1422 env->CF = x & 1;
1423 else
1424 env->CF = 0;
1425 return 0;
1426 } else if (shift != 0) {
1427 env->CF = (x >> (32 - shift)) & 1;
1428 return x << shift;
1429 }
1430 return x;
1431}
1432
9ef39277 1433uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
1434{
1435 int shift = i & 0xff;
1436 if (shift >= 32) {
1437 if (shift == 32)
1438 env->CF = (x >> 31) & 1;
1439 else
1440 env->CF = 0;
1441 return 0;
1442 } else if (shift != 0) {
1443 env->CF = (x >> (shift - 1)) & 1;
1444 return x >> shift;
1445 }
1446 return x;
1447}
1448
9ef39277 1449uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
1450{
1451 int shift = i & 0xff;
1452 if (shift >= 32) {
1453 env->CF = (x >> 31) & 1;
1454 return (int32_t)x >> 31;
1455 } else if (shift != 0) {
1456 env->CF = (x >> (shift - 1)) & 1;
1457 return (int32_t)x >> shift;
1458 }
1459 return x;
1460}
1461
9ef39277 1462uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
8984bd2e
PB
1463{
1464 int shift1, shift;
1465 shift1 = i & 0xff;
1466 shift = shift1 & 0x1f;
1467 if (shift == 0) {
1468 if (shift1 != 0)
1469 env->CF = (x >> 31) & 1;
1470 return x;
1471 } else {
1472 env->CF = (x >> (shift - 1)) & 1;
1473 return ((uint32_t)x >> shift) | (x << (32 - shift));
1474 }
1475}
This page took 1.138208 seconds and 4 git commands to generate.