]>
Commit | Line | Data |
---|---|---|
b7bcbe95 FB |
1 | /* |
2 | * ARM helper routines | |
5fafdf24 | 3 | * |
9ee6e8bb | 4 | * Copyright (c) 2005-2007 CodeSourcery, LLC |
b7bcbe95 FB |
5 | * |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
b7bcbe95 | 18 | */ |
3e457172 | 19 | #include "cpu.h" |
2ef6175a | 20 | #include "exec/helper-proto.h" |
ccd38087 | 21 | #include "internals.h" |
f08b6170 | 22 | #include "exec/cpu_ldst.h" |
b7bcbe95 | 23 | |
ad69471c PB |
24 | #define SIGNBIT (uint32_t)0x80000000 |
25 | #define SIGNBIT64 ((uint64_t)1 << 63) | |
26 | ||
1ce94f81 | 27 | static void raise_exception(CPUARMState *env, int tt) |
b7bcbe95 | 28 | { |
27103424 AF |
29 | ARMCPU *cpu = arm_env_get_cpu(env); |
30 | CPUState *cs = CPU(cpu); | |
31 | ||
32 | cs->exception_index = tt; | |
5638d180 | 33 | cpu_loop_exit(cs); |
b7bcbe95 FB |
34 | } |
35 | ||
9ef39277 | 36 | uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def, |
8f8e3aa4 | 37 | uint32_t rn, uint32_t maxindex) |
9ee6e8bb PB |
38 | { |
39 | uint32_t val; | |
9ee6e8bb PB |
40 | uint32_t tmp; |
41 | int index; | |
42 | int shift; | |
43 | uint64_t *table; | |
44 | table = (uint64_t *)&env->vfp.regs[rn]; | |
45 | val = 0; | |
9ee6e8bb | 46 | for (shift = 0; shift < 32; shift += 8) { |
8f8e3aa4 PB |
47 | index = (ireg >> shift) & 0xff; |
48 | if (index < maxindex) { | |
3018f259 | 49 | tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff; |
9ee6e8bb PB |
50 | val |= tmp << shift; |
51 | } else { | |
8f8e3aa4 | 52 | val |= def & (0xff << shift); |
9ee6e8bb PB |
53 | } |
54 | } | |
8f8e3aa4 | 55 | return val; |
9ee6e8bb PB |
56 | } |
57 | ||
b5ff1b31 FB |
58 | #if !defined(CONFIG_USER_ONLY) |
59 | ||
b5ff1b31 | 60 | /* try to fill the TLB and return an exception if error. If retaddr is |
d5a11fef AF |
61 | * NULL, it means that the function was called in C code (i.e. not |
62 | * from generated code or from helper.c) | |
63 | */ | |
64 | void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, | |
20503968 | 65 | uintptr_t retaddr) |
b5ff1b31 | 66 | { |
b5ff1b31 FB |
67 | int ret; |
68 | ||
27103424 | 69 | ret = arm_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx); |
551bd27f | 70 | if (unlikely(ret)) { |
d5a11fef AF |
71 | ARMCPU *cpu = ARM_CPU(cs); |
72 | CPUARMState *env = &cpu->env; | |
73 | ||
b5ff1b31 FB |
74 | if (retaddr) { |
75 | /* now we have a real cpu fault */ | |
3f38f309 | 76 | cpu_restore_state(cs, retaddr); |
b5ff1b31 | 77 | } |
27103424 | 78 | raise_exception(env, cs->exception_index); |
b5ff1b31 | 79 | } |
b5ff1b31 | 80 | } |
b5ff1b31 | 81 | #endif |
1497c961 | 82 | |
9ef39277 | 83 | uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b) |
1497c961 PB |
84 | { |
85 | uint32_t res = a + b; | |
86 | if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) | |
87 | env->QF = 1; | |
88 | return res; | |
89 | } | |
90 | ||
9ef39277 | 91 | uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b) |
1497c961 PB |
92 | { |
93 | uint32_t res = a + b; | |
94 | if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) { | |
95 | env->QF = 1; | |
96 | res = ~(((int32_t)a >> 31) ^ SIGNBIT); | |
97 | } | |
98 | return res; | |
99 | } | |
100 | ||
9ef39277 | 101 | uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b) |
1497c961 PB |
102 | { |
103 | uint32_t res = a - b; | |
104 | if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) { | |
105 | env->QF = 1; | |
106 | res = ~(((int32_t)a >> 31) ^ SIGNBIT); | |
107 | } | |
108 | return res; | |
109 | } | |
110 | ||
9ef39277 | 111 | uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val) |
1497c961 PB |
112 | { |
113 | uint32_t res; | |
114 | if (val >= 0x40000000) { | |
115 | res = ~SIGNBIT; | |
116 | env->QF = 1; | |
117 | } else if (val <= (int32_t)0xc0000000) { | |
118 | res = SIGNBIT; | |
119 | env->QF = 1; | |
120 | } else { | |
121 | res = val << 1; | |
122 | } | |
123 | return res; | |
124 | } | |
125 | ||
9ef39277 | 126 | uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b) |
1497c961 PB |
127 | { |
128 | uint32_t res = a + b; | |
129 | if (res < a) { | |
130 | env->QF = 1; | |
131 | res = ~0; | |
132 | } | |
133 | return res; | |
134 | } | |
135 | ||
9ef39277 | 136 | uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b) |
1497c961 PB |
137 | { |
138 | uint32_t res = a - b; | |
139 | if (res > a) { | |
140 | env->QF = 1; | |
141 | res = 0; | |
142 | } | |
143 | return res; | |
144 | } | |
145 | ||
6ddbc6e4 | 146 | /* Signed saturation. */ |
9ef39277 | 147 | static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift) |
6ddbc6e4 PB |
148 | { |
149 | int32_t top; | |
150 | uint32_t mask; | |
151 | ||
6ddbc6e4 PB |
152 | top = val >> shift; |
153 | mask = (1u << shift) - 1; | |
154 | if (top > 0) { | |
155 | env->QF = 1; | |
156 | return mask; | |
157 | } else if (top < -1) { | |
158 | env->QF = 1; | |
159 | return ~mask; | |
160 | } | |
161 | return val; | |
162 | } | |
163 | ||
164 | /* Unsigned saturation. */ | |
9ef39277 | 165 | static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift) |
6ddbc6e4 PB |
166 | { |
167 | uint32_t max; | |
168 | ||
6ddbc6e4 PB |
169 | max = (1u << shift) - 1; |
170 | if (val < 0) { | |
171 | env->QF = 1; | |
172 | return 0; | |
173 | } else if (val > max) { | |
174 | env->QF = 1; | |
175 | return max; | |
176 | } | |
177 | return val; | |
178 | } | |
179 | ||
180 | /* Signed saturate. */ | |
9ef39277 | 181 | uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift) |
6ddbc6e4 | 182 | { |
9ef39277 | 183 | return do_ssat(env, x, shift); |
6ddbc6e4 PB |
184 | } |
185 | ||
186 | /* Dual halfword signed saturate. */ | |
9ef39277 | 187 | uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift) |
6ddbc6e4 PB |
188 | { |
189 | uint32_t res; | |
190 | ||
9ef39277 BS |
191 | res = (uint16_t)do_ssat(env, (int16_t)x, shift); |
192 | res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16; | |
6ddbc6e4 PB |
193 | return res; |
194 | } | |
195 | ||
196 | /* Unsigned saturate. */ | |
9ef39277 | 197 | uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift) |
6ddbc6e4 | 198 | { |
9ef39277 | 199 | return do_usat(env, x, shift); |
6ddbc6e4 PB |
200 | } |
201 | ||
202 | /* Dual halfword unsigned saturate. */ | |
9ef39277 | 203 | uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift) |
6ddbc6e4 PB |
204 | { |
205 | uint32_t res; | |
206 | ||
9ef39277 BS |
207 | res = (uint16_t)do_usat(env, (int16_t)x, shift); |
208 | res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16; | |
6ddbc6e4 PB |
209 | return res; |
210 | } | |
d9ba4830 | 211 | |
1ce94f81 | 212 | void HELPER(wfi)(CPUARMState *env) |
d9ba4830 | 213 | { |
259186a7 AF |
214 | CPUState *cs = CPU(arm_env_get_cpu(env)); |
215 | ||
27103424 | 216 | cs->exception_index = EXCP_HLT; |
259186a7 | 217 | cs->halted = 1; |
5638d180 | 218 | cpu_loop_exit(cs); |
d9ba4830 PB |
219 | } |
220 | ||
72c1d3af PM |
221 | void HELPER(wfe)(CPUARMState *env) |
222 | { | |
27103424 AF |
223 | CPUState *cs = CPU(arm_env_get_cpu(env)); |
224 | ||
72c1d3af PM |
225 | /* Don't actually halt the CPU, just yield back to top |
226 | * level loop | |
227 | */ | |
27103424 | 228 | cs->exception_index = EXCP_YIELD; |
5638d180 | 229 | cpu_loop_exit(cs); |
72c1d3af PM |
230 | } |
231 | ||
d4a2dc67 PM |
232 | /* Raise an internal-to-QEMU exception. This is limited to only |
233 | * those EXCP values which are special cases for QEMU to interrupt | |
234 | * execution and not to be used for exceptions which are passed to | |
235 | * the guest (those must all have syndrome information and thus should | |
236 | * use exception_with_syndrome). | |
237 | */ | |
238 | void HELPER(exception_internal)(CPUARMState *env, uint32_t excp) | |
239 | { | |
240 | CPUState *cs = CPU(arm_env_get_cpu(env)); | |
241 | ||
242 | assert(excp_is_internal(excp)); | |
243 | cs->exception_index = excp; | |
244 | cpu_loop_exit(cs); | |
245 | } | |
246 | ||
247 | /* Raise an exception with the specified syndrome register value */ | |
248 | void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp, | |
249 | uint32_t syndrome) | |
d9ba4830 | 250 | { |
27103424 AF |
251 | CPUState *cs = CPU(arm_env_get_cpu(env)); |
252 | ||
d4a2dc67 | 253 | assert(!excp_is_internal(excp)); |
27103424 | 254 | cs->exception_index = excp; |
d4a2dc67 | 255 | env->exception.syndrome = syndrome; |
5638d180 | 256 | cpu_loop_exit(cs); |
d9ba4830 PB |
257 | } |
258 | ||
9ef39277 | 259 | uint32_t HELPER(cpsr_read)(CPUARMState *env) |
d9ba4830 | 260 | { |
4051e12c | 261 | return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED); |
d9ba4830 PB |
262 | } |
263 | ||
1ce94f81 | 264 | void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask) |
d9ba4830 PB |
265 | { |
266 | cpsr_write(env, val, mask); | |
267 | } | |
b0109805 PB |
268 | |
269 | /* Access to user mode registers from privileged modes. */ | |
9ef39277 | 270 | uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno) |
b0109805 PB |
271 | { |
272 | uint32_t val; | |
273 | ||
274 | if (regno == 13) { | |
275 | val = env->banked_r13[0]; | |
276 | } else if (regno == 14) { | |
277 | val = env->banked_r14[0]; | |
278 | } else if (regno >= 8 | |
279 | && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { | |
280 | val = env->usr_regs[regno - 8]; | |
281 | } else { | |
282 | val = env->regs[regno]; | |
283 | } | |
284 | return val; | |
285 | } | |
286 | ||
1ce94f81 | 287 | void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val) |
b0109805 PB |
288 | { |
289 | if (regno == 13) { | |
290 | env->banked_r13[0] = val; | |
291 | } else if (regno == 14) { | |
292 | env->banked_r14[0] = val; | |
293 | } else if (regno >= 8 | |
294 | && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { | |
295 | env->usr_regs[regno - 8] = val; | |
296 | } else { | |
297 | env->regs[regno] = val; | |
298 | } | |
299 | } | |
4b6a83fb | 300 | |
8bcbf37c | 301 | void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome) |
f59df3f2 PM |
302 | { |
303 | const ARMCPRegInfo *ri = rip; | |
c0f4af17 PM |
304 | |
305 | if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14 | |
306 | && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) { | |
307 | env->exception.syndrome = syndrome; | |
308 | raise_exception(env, EXCP_UDEF); | |
309 | } | |
310 | ||
311 | if (!ri->accessfn) { | |
312 | return; | |
313 | } | |
314 | ||
f59df3f2 PM |
315 | switch (ri->accessfn(env, ri)) { |
316 | case CP_ACCESS_OK: | |
317 | return; | |
318 | case CP_ACCESS_TRAP: | |
8bcbf37c PM |
319 | env->exception.syndrome = syndrome; |
320 | break; | |
f59df3f2 | 321 | case CP_ACCESS_TRAP_UNCATEGORIZED: |
8bcbf37c | 322 | env->exception.syndrome = syn_uncategorized(); |
f59df3f2 PM |
323 | break; |
324 | default: | |
325 | g_assert_not_reached(); | |
326 | } | |
327 | raise_exception(env, EXCP_UDEF); | |
328 | } | |
329 | ||
4b6a83fb PM |
330 | void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value) |
331 | { | |
332 | const ARMCPRegInfo *ri = rip; | |
c4241c7d PM |
333 | |
334 | ri->writefn(env, ri, value); | |
4b6a83fb PM |
335 | } |
336 | ||
337 | uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip) | |
338 | { | |
339 | const ARMCPRegInfo *ri = rip; | |
c4241c7d PM |
340 | |
341 | return ri->readfn(env, ri); | |
4b6a83fb PM |
342 | } |
343 | ||
344 | void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value) | |
345 | { | |
346 | const ARMCPRegInfo *ri = rip; | |
c4241c7d PM |
347 | |
348 | ri->writefn(env, ri, value); | |
4b6a83fb PM |
349 | } |
350 | ||
351 | uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip) | |
352 | { | |
353 | const ARMCPRegInfo *ri = rip; | |
c4241c7d PM |
354 | |
355 | return ri->readfn(env, ri); | |
4b6a83fb | 356 | } |
b0109805 | 357 | |
9cfa0b4e PM |
358 | void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm) |
359 | { | |
360 | /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set. | |
361 | * Note that SPSel is never OK from EL0; we rely on handle_msr_i() | |
362 | * to catch that case at translate time. | |
363 | */ | |
137feaa9 | 364 | if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) { |
9cfa0b4e PM |
365 | raise_exception(env, EXCP_UDEF); |
366 | } | |
367 | ||
368 | switch (op) { | |
369 | case 0x05: /* SPSel */ | |
f502cfc2 | 370 | update_spsel(env, imm); |
9cfa0b4e PM |
371 | break; |
372 | case 0x1e: /* DAIFSet */ | |
373 | env->daif |= (imm << 6) & PSTATE_DAIF; | |
374 | break; | |
375 | case 0x1f: /* DAIFClear */ | |
376 | env->daif &= ~((imm << 6) & PSTATE_DAIF); | |
377 | break; | |
378 | default: | |
379 | g_assert_not_reached(); | |
380 | } | |
381 | } | |
382 | ||
7ea47fe7 PM |
383 | void HELPER(clear_pstate_ss)(CPUARMState *env) |
384 | { | |
385 | env->pstate &= ~PSTATE_SS; | |
386 | } | |
387 | ||
35979d71 EI |
388 | void HELPER(pre_hvc)(CPUARMState *env) |
389 | { | |
98128601 | 390 | ARMCPU *cpu = arm_env_get_cpu(env); |
dcbff19b | 391 | int cur_el = arm_current_el(env); |
35979d71 EI |
392 | /* FIXME: Use actual secure state. */ |
393 | bool secure = false; | |
394 | bool undef; | |
395 | ||
98128601 RH |
396 | if (arm_is_psci_call(cpu, EXCP_HVC)) { |
397 | /* If PSCI is enabled and this looks like a valid PSCI call then | |
398 | * that overrides the architecturally mandated HVC behaviour. | |
399 | */ | |
400 | return; | |
401 | } | |
402 | ||
39404338 PM |
403 | if (!arm_feature(env, ARM_FEATURE_EL2)) { |
404 | /* If EL2 doesn't exist, HVC always UNDEFs */ | |
405 | undef = true; | |
406 | } else if (arm_feature(env, ARM_FEATURE_EL3)) { | |
407 | /* EL3.HCE has priority over EL2.HCD. */ | |
35979d71 EI |
408 | undef = !(env->cp15.scr_el3 & SCR_HCE); |
409 | } else { | |
410 | undef = env->cp15.hcr_el2 & HCR_HCD; | |
411 | } | |
412 | ||
413 | /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state. | |
414 | * For ARMv8/AArch64, HVC is allowed in EL3. | |
415 | * Note that we've already trapped HVC from EL0 at translation | |
416 | * time. | |
417 | */ | |
418 | if (secure && (!is_a64(env) || cur_el == 1)) { | |
419 | undef = true; | |
420 | } | |
421 | ||
422 | if (undef) { | |
423 | env->exception.syndrome = syn_uncategorized(); | |
424 | raise_exception(env, EXCP_UDEF); | |
425 | } | |
426 | } | |
427 | ||
e0d6e6a5 EI |
428 | void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) |
429 | { | |
98128601 | 430 | ARMCPU *cpu = arm_env_get_cpu(env); |
dcbff19b | 431 | int cur_el = arm_current_el(env); |
dbe9d163 | 432 | bool secure = arm_is_secure(env); |
e0d6e6a5 EI |
433 | bool smd = env->cp15.scr_el3 & SCR_SMD; |
434 | /* On ARMv8 AArch32, SMD only applies to NS state. | |
435 | * On ARMv7 SMD only applies to NS state and only if EL2 is available. | |
436 | * For ARMv7 non EL2, we force SMD to zero so we don't need to re-check | |
437 | * the EL2 condition here. | |
438 | */ | |
439 | bool undef = is_a64(env) ? smd : (!secure && smd); | |
440 | ||
98128601 RH |
441 | if (arm_is_psci_call(cpu, EXCP_SMC)) { |
442 | /* If PSCI is enabled and this looks like a valid PSCI call then | |
443 | * that overrides the architecturally mandated SMC behaviour. | |
444 | */ | |
445 | return; | |
446 | } | |
447 | ||
39404338 PM |
448 | if (!arm_feature(env, ARM_FEATURE_EL3)) { |
449 | /* If we have no EL3 then SMC always UNDEFs */ | |
450 | undef = true; | |
451 | } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) { | |
452 | /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */ | |
e0d6e6a5 EI |
453 | env->exception.syndrome = syndrome; |
454 | raise_exception(env, EXCP_HYP_TRAP); | |
455 | } | |
456 | ||
e0d6e6a5 EI |
457 | if (undef) { |
458 | env->exception.syndrome = syn_uncategorized(); | |
459 | raise_exception(env, EXCP_UDEF); | |
460 | } | |
461 | } | |
462 | ||
52e60cdd RH |
463 | void HELPER(exception_return)(CPUARMState *env) |
464 | { | |
dcbff19b | 465 | int cur_el = arm_current_el(env); |
db6c3cd0 | 466 | unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el); |
2a923c4d | 467 | uint32_t spsr = env->banked_spsr[spsr_idx]; |
ce02049d | 468 | int new_el; |
52e60cdd | 469 | |
9208b961 | 470 | aarch64_save_sp(env, cur_el); |
52e60cdd RH |
471 | |
472 | env->exclusive_addr = -1; | |
473 | ||
3a298203 PM |
474 | /* We must squash the PSTATE.SS bit to zero unless both of the |
475 | * following hold: | |
476 | * 1. debug exceptions are currently disabled | |
477 | * 2. singlestep will be active in the EL we return to | |
478 | * We check 1 here and 2 after we've done the pstate/cpsr write() to | |
479 | * transition to the EL we're going to. | |
480 | */ | |
481 | if (arm_generate_debug_exceptions(env)) { | |
482 | spsr &= ~PSTATE_SS; | |
483 | } | |
484 | ||
52e60cdd | 485 | if (spsr & PSTATE_nRW) { |
db6c3cd0 | 486 | /* TODO: We currently assume EL1/2/3 are running in AArch64. */ |
52e60cdd RH |
487 | env->aarch64 = 0; |
488 | new_el = 0; | |
489 | env->uncached_cpsr = 0x10; | |
490 | cpsr_write(env, spsr, ~0); | |
3a298203 PM |
491 | if (!arm_singlestep_active(env)) { |
492 | env->uncached_cpsr &= ~PSTATE_SS; | |
493 | } | |
ce02049d | 494 | aarch64_sync_64_to_32(env); |
52e60cdd | 495 | |
6947f059 | 496 | env->regs[15] = env->elr_el[1] & ~0x1; |
52e60cdd RH |
497 | } else { |
498 | new_el = extract32(spsr, 2, 2); | |
7ab6c10d EI |
499 | if (new_el > cur_el |
500 | || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) { | |
501 | /* Disallow return to an EL which is unimplemented or higher | |
502 | * than the current one. | |
503 | */ | |
52e60cdd RH |
504 | goto illegal_return; |
505 | } | |
506 | if (extract32(spsr, 1, 1)) { | |
507 | /* Return with reserved M[1] bit set */ | |
508 | goto illegal_return; | |
509 | } | |
510 | if (new_el == 0 && (spsr & PSTATE_SP)) { | |
37f0806e | 511 | /* Return to EL0 with M[0] bit set */ |
52e60cdd RH |
512 | goto illegal_return; |
513 | } | |
514 | env->aarch64 = 1; | |
515 | pstate_write(env, spsr); | |
3a298203 PM |
516 | if (!arm_singlestep_active(env)) { |
517 | env->pstate &= ~PSTATE_SS; | |
518 | } | |
98ea5615 | 519 | aarch64_restore_sp(env, new_el); |
db6c3cd0 | 520 | env->pc = env->elr_el[cur_el]; |
52e60cdd RH |
521 | } |
522 | ||
523 | return; | |
524 | ||
525 | illegal_return: | |
526 | /* Illegal return events of various kinds have architecturally | |
527 | * mandated behaviour: | |
528 | * restore NZCV and DAIF from SPSR_ELx | |
529 | * set PSTATE.IL | |
530 | * restore PC from ELR_ELx | |
531 | * no change to exception level, execution state or stack pointer | |
532 | */ | |
533 | env->pstate |= PSTATE_IL; | |
db6c3cd0 | 534 | env->pc = env->elr_el[cur_el]; |
52e60cdd RH |
535 | spsr &= PSTATE_NZCV | PSTATE_DAIF; |
536 | spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF); | |
537 | pstate_write(env, spsr); | |
3a298203 PM |
538 | if (!arm_singlestep_active(env)) { |
539 | env->pstate &= ~PSTATE_SS; | |
540 | } | |
52e60cdd RH |
541 | } |
542 | ||
3ff6fc91 PM |
543 | /* Return true if the linked breakpoint entry lbn passes its checks */ |
544 | static bool linked_bp_matches(ARMCPU *cpu, int lbn) | |
545 | { | |
546 | CPUARMState *env = &cpu->env; | |
547 | uint64_t bcr = env->cp15.dbgbcr[lbn]; | |
548 | int brps = extract32(cpu->dbgdidr, 24, 4); | |
549 | int ctx_cmps = extract32(cpu->dbgdidr, 20, 4); | |
550 | int bt; | |
551 | uint32_t contextidr; | |
552 | ||
553 | /* Links to unimplemented or non-context aware breakpoints are | |
554 | * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or | |
555 | * as if linked to an UNKNOWN context-aware breakpoint (in which | |
556 | * case DBGWCR<n>_EL1.LBN must indicate that breakpoint). | |
557 | * We choose the former. | |
558 | */ | |
559 | if (lbn > brps || lbn < (brps - ctx_cmps)) { | |
560 | return false; | |
561 | } | |
562 | ||
563 | bcr = env->cp15.dbgbcr[lbn]; | |
564 | ||
565 | if (extract64(bcr, 0, 1) == 0) { | |
566 | /* Linked breakpoint disabled : generate no events */ | |
567 | return false; | |
568 | } | |
569 | ||
570 | bt = extract64(bcr, 20, 4); | |
571 | ||
572 | /* We match the whole register even if this is AArch32 using the | |
573 | * short descriptor format (in which case it holds both PROCID and ASID), | |
574 | * since we don't implement the optional v7 context ID masking. | |
575 | */ | |
54bf36ed | 576 | contextidr = extract64(env->cp15.contextidr_el[1], 0, 32); |
3ff6fc91 PM |
577 | |
578 | switch (bt) { | |
579 | case 3: /* linked context ID match */ | |
dcbff19b | 580 | if (arm_current_el(env) > 1) { |
3ff6fc91 PM |
581 | /* Context matches never fire in EL2 or (AArch64) EL3 */ |
582 | return false; | |
583 | } | |
584 | return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32)); | |
585 | case 5: /* linked address mismatch (reserved in AArch64) */ | |
586 | case 9: /* linked VMID match (reserved if no EL2) */ | |
587 | case 11: /* linked context ID and VMID match (reserved if no EL2) */ | |
588 | default: | |
589 | /* Links to Unlinked context breakpoints must generate no | |
590 | * events; we choose to do the same for reserved values too. | |
591 | */ | |
592 | return false; | |
593 | } | |
594 | ||
595 | return false; | |
596 | } | |
597 | ||
0eacea70 | 598 | static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp) |
3ff6fc91 PM |
599 | { |
600 | CPUARMState *env = &cpu->env; | |
0eacea70 | 601 | uint64_t cr; |
3ff6fc91 | 602 | int pac, hmc, ssc, wt, lbn; |
ef7bab8d PM |
603 | /* Note that for watchpoints the check is against the CPU security |
604 | * state, not the S/NS attribute on the offending data access. | |
605 | */ | |
606 | bool is_secure = arm_is_secure(env); | |
9e1fc5bd | 607 | int access_el = arm_current_el(env); |
3ff6fc91 | 608 | |
0eacea70 | 609 | if (is_wp) { |
9e1fc5bd PM |
610 | CPUWatchpoint *wp = env->cpu_watchpoint[n]; |
611 | ||
612 | if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) { | |
0eacea70 PM |
613 | return false; |
614 | } | |
615 | cr = env->cp15.dbgwcr[n]; | |
9e1fc5bd PM |
616 | if (wp->hitattrs.user) { |
617 | /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should | |
618 | * match watchpoints as if they were accesses done at EL0, even if | |
619 | * the CPU is at EL1 or higher. | |
620 | */ | |
621 | access_el = 0; | |
622 | } | |
0eacea70 PM |
623 | } else { |
624 | uint64_t pc = is_a64(env) ? env->pc : env->regs[15]; | |
3ff6fc91 | 625 | |
0eacea70 PM |
626 | if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) { |
627 | return false; | |
628 | } | |
629 | cr = env->cp15.dbgbcr[n]; | |
630 | } | |
3ff6fc91 | 631 | /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is |
0eacea70 PM |
632 | * enabled and that the address and access type match; for breakpoints |
633 | * we know the address matched; check the remaining fields, including | |
634 | * linked breakpoints. We rely on WCR and BCR having the same layout | |
635 | * for the LBN, SSC, HMC, PAC/PMC and is-linked fields. | |
636 | * Note that some combinations of {PAC, HMC, SSC} are reserved and | |
3ff6fc91 PM |
637 | * must act either like some valid combination or as if the watchpoint |
638 | * were disabled. We choose the former, and use this together with | |
639 | * the fact that EL3 must always be Secure and EL2 must always be | |
640 | * Non-Secure to simplify the code slightly compared to the full | |
641 | * table in the ARM ARM. | |
642 | */ | |
0eacea70 PM |
643 | pac = extract64(cr, 1, 2); |
644 | hmc = extract64(cr, 13, 1); | |
645 | ssc = extract64(cr, 14, 2); | |
3ff6fc91 PM |
646 | |
647 | switch (ssc) { | |
648 | case 0: | |
649 | break; | |
650 | case 1: | |
651 | case 3: | |
652 | if (is_secure) { | |
653 | return false; | |
654 | } | |
655 | break; | |
656 | case 2: | |
657 | if (!is_secure) { | |
658 | return false; | |
659 | } | |
660 | break; | |
661 | } | |
662 | ||
9e1fc5bd | 663 | switch (access_el) { |
3ff6fc91 PM |
664 | case 3: |
665 | case 2: | |
666 | if (!hmc) { | |
667 | return false; | |
668 | } | |
669 | break; | |
670 | case 1: | |
671 | if (extract32(pac, 0, 1) == 0) { | |
672 | return false; | |
673 | } | |
674 | break; | |
675 | case 0: | |
676 | if (extract32(pac, 1, 1) == 0) { | |
677 | return false; | |
678 | } | |
679 | break; | |
680 | default: | |
681 | g_assert_not_reached(); | |
682 | } | |
683 | ||
0eacea70 PM |
684 | wt = extract64(cr, 20, 1); |
685 | lbn = extract64(cr, 16, 4); | |
3ff6fc91 PM |
686 | |
687 | if (wt && !linked_bp_matches(cpu, lbn)) { | |
688 | return false; | |
689 | } | |
690 | ||
691 | return true; | |
692 | } | |
693 | ||
694 | static bool check_watchpoints(ARMCPU *cpu) | |
695 | { | |
696 | CPUARMState *env = &cpu->env; | |
697 | int n; | |
698 | ||
699 | /* If watchpoints are disabled globally or we can't take debug | |
700 | * exceptions here then watchpoint firings are ignored. | |
701 | */ | |
702 | if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 | |
703 | || !arm_generate_debug_exceptions(env)) { | |
704 | return false; | |
705 | } | |
706 | ||
707 | for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) { | |
0eacea70 PM |
708 | if (bp_wp_matches(cpu, n, true)) { |
709 | return true; | |
710 | } | |
711 | } | |
712 | return false; | |
713 | } | |
714 | ||
715 | static bool check_breakpoints(ARMCPU *cpu) | |
716 | { | |
717 | CPUARMState *env = &cpu->env; | |
718 | int n; | |
719 | ||
720 | /* If breakpoints are disabled globally or we can't take debug | |
721 | * exceptions here then breakpoint firings are ignored. | |
722 | */ | |
723 | if (extract32(env->cp15.mdscr_el1, 15, 1) == 0 | |
724 | || !arm_generate_debug_exceptions(env)) { | |
725 | return false; | |
726 | } | |
727 | ||
728 | for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) { | |
729 | if (bp_wp_matches(cpu, n, false)) { | |
3ff6fc91 PM |
730 | return true; |
731 | } | |
732 | } | |
733 | return false; | |
734 | } | |
735 | ||
736 | void arm_debug_excp_handler(CPUState *cs) | |
737 | { | |
738 | /* Called by core code when a watchpoint or breakpoint fires; | |
739 | * need to check which one and raise the appropriate exception. | |
740 | */ | |
741 | ARMCPU *cpu = ARM_CPU(cs); | |
742 | CPUARMState *env = &cpu->env; | |
743 | CPUWatchpoint *wp_hit = cs->watchpoint_hit; | |
744 | ||
745 | if (wp_hit) { | |
746 | if (wp_hit->flags & BP_CPU) { | |
747 | cs->watchpoint_hit = NULL; | |
748 | if (check_watchpoints(cpu)) { | |
749 | bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0; | |
dcbff19b | 750 | bool same_el = arm_debug_target_el(env) == arm_current_el(env); |
3ff6fc91 PM |
751 | |
752 | env->exception.syndrome = syn_watchpoint(same_el, 0, wnr); | |
753 | if (extended_addresses_enabled(env)) { | |
754 | env->exception.fsr = (1 << 9) | 0x22; | |
755 | } else { | |
756 | env->exception.fsr = 0x2; | |
757 | } | |
758 | env->exception.vaddress = wp_hit->hitaddr; | |
759 | raise_exception(env, EXCP_DATA_ABORT); | |
760 | } else { | |
761 | cpu_resume_from_signal(cs, NULL); | |
762 | } | |
763 | } | |
0eacea70 PM |
764 | } else { |
765 | if (check_breakpoints(cpu)) { | |
dcbff19b | 766 | bool same_el = (arm_debug_target_el(env) == arm_current_el(env)); |
0eacea70 PM |
767 | env->exception.syndrome = syn_breakpoint(same_el); |
768 | if (extended_addresses_enabled(env)) { | |
769 | env->exception.fsr = (1 << 9) | 0x22; | |
770 | } else { | |
771 | env->exception.fsr = 0x2; | |
772 | } | |
773 | /* FAR is UNKNOWN, so doesn't need setting */ | |
774 | raise_exception(env, EXCP_PREFETCH_ABORT); | |
775 | } | |
3ff6fc91 PM |
776 | } |
777 | } | |
778 | ||
8984bd2e PB |
779 | /* ??? Flag setting arithmetic is awkward because we need to do comparisons. |
780 | The only way to do that in TCG is a conditional branch, which clobbers | |
781 | all our temporaries. For now implement these as helper functions. */ | |
782 | ||
8984bd2e PB |
783 | /* Similarly for variable shift instructions. */ |
784 | ||
9ef39277 | 785 | uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i) |
8984bd2e PB |
786 | { |
787 | int shift = i & 0xff; | |
788 | if (shift >= 32) { | |
789 | if (shift == 32) | |
790 | env->CF = x & 1; | |
791 | else | |
792 | env->CF = 0; | |
793 | return 0; | |
794 | } else if (shift != 0) { | |
795 | env->CF = (x >> (32 - shift)) & 1; | |
796 | return x << shift; | |
797 | } | |
798 | return x; | |
799 | } | |
800 | ||
9ef39277 | 801 | uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i) |
8984bd2e PB |
802 | { |
803 | int shift = i & 0xff; | |
804 | if (shift >= 32) { | |
805 | if (shift == 32) | |
806 | env->CF = (x >> 31) & 1; | |
807 | else | |
808 | env->CF = 0; | |
809 | return 0; | |
810 | } else if (shift != 0) { | |
811 | env->CF = (x >> (shift - 1)) & 1; | |
812 | return x >> shift; | |
813 | } | |
814 | return x; | |
815 | } | |
816 | ||
9ef39277 | 817 | uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i) |
8984bd2e PB |
818 | { |
819 | int shift = i & 0xff; | |
820 | if (shift >= 32) { | |
821 | env->CF = (x >> 31) & 1; | |
822 | return (int32_t)x >> 31; | |
823 | } else if (shift != 0) { | |
824 | env->CF = (x >> (shift - 1)) & 1; | |
825 | return (int32_t)x >> shift; | |
826 | } | |
827 | return x; | |
828 | } | |
829 | ||
9ef39277 | 830 | uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i) |
8984bd2e PB |
831 | { |
832 | int shift1, shift; | |
833 | shift1 = i & 0xff; | |
834 | shift = shift1 & 0x1f; | |
835 | if (shift == 0) { | |
836 | if (shift1 != 0) | |
837 | env->CF = (x >> 31) & 1; | |
838 | return x; | |
839 | } else { | |
840 | env->CF = (x >> (shift - 1)) & 1; | |
841 | return ((uint32_t)x >> shift) | (x << (32 - shift)); | |
842 | } | |
843 | } |