2 * AArch64 specific helpers
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "exec/gdbstub.h"
22 #include "exec/helper-proto.h"
23 #include "qemu/host-utils.h"
24 #include "sysemu/sysemu.h"
25 #include "qemu/bitops.h"
26 #include "internals.h"
28 /* C2.4.7 Multiply and divide */
29 /* special cases for 0 and LLONG_MIN are mandated by the standard */
30 uint64_t HELPER(udiv64)(uint64_t num, uint64_t den)
38 int64_t HELPER(sdiv64)(int64_t num, int64_t den)
43 if (num == LLONG_MIN && den == -1) {
49 uint64_t HELPER(clz64)(uint64_t x)
54 uint64_t HELPER(cls64)(uint64_t x)
59 uint32_t HELPER(cls32)(uint32_t x)
64 uint32_t HELPER(clz32)(uint32_t x)
69 uint64_t HELPER(rbit64)(uint64_t x)
71 /* assign the correct byte position */
74 /* assign the correct nibble position */
75 x = ((x & 0xf0f0f0f0f0f0f0f0ULL) >> 4)
76 | ((x & 0x0f0f0f0f0f0f0f0fULL) << 4);
78 /* assign the correct bit position */
79 x = ((x & 0x8888888888888888ULL) >> 3)
80 | ((x & 0x4444444444444444ULL) >> 1)
81 | ((x & 0x2222222222222222ULL) << 1)
82 | ((x & 0x1111111111111111ULL) << 3);
87 /* Convert a softfloat float_relation_ (as returned by
88 * the float*_compare functions) to the correct ARM
91 static inline uint32_t float_rel_to_flags(int res)
95 case float_relation_equal:
96 flags = PSTATE_Z | PSTATE_C;
98 case float_relation_less:
101 case float_relation_greater:
104 case float_relation_unordered:
106 flags = PSTATE_C | PSTATE_V;
112 uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status)
114 return float_rel_to_flags(float32_compare_quiet(x, y, fp_status));
117 uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status)
119 return float_rel_to_flags(float32_compare(x, y, fp_status));
122 uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status)
124 return float_rel_to_flags(float64_compare_quiet(x, y, fp_status));
127 uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status)
129 return float_rel_to_flags(float64_compare(x, y, fp_status));
132 float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp)
134 float_status *fpst = fpstp;
136 if ((float32_is_zero(a) && float32_is_infinity(b)) ||
137 (float32_is_infinity(a) && float32_is_zero(b))) {
138 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
139 return make_float32((1U << 30) |
140 ((float32_val(a) ^ float32_val(b)) & (1U << 31)));
142 return float32_mul(a, b, fpst);
145 float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp)
147 float_status *fpst = fpstp;
149 if ((float64_is_zero(a) && float64_is_infinity(b)) ||
150 (float64_is_infinity(a) && float64_is_zero(b))) {
151 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
152 return make_float64((1ULL << 62) |
153 ((float64_val(a) ^ float64_val(b)) & (1ULL << 63)));
155 return float64_mul(a, b, fpst);
158 uint64_t HELPER(simd_tbl)(CPUARMState *env, uint64_t result, uint64_t indices,
159 uint32_t rn, uint32_t numregs)
161 /* Helper function for SIMD TBL and TBX. We have to do the table
162 * lookup part for the 64 bits worth of indices we're passed in.
163 * result is the initial results vector (either zeroes for TBL
164 * or some guest values for TBX), rn the register number where
165 * the table starts, and numregs the number of registers in the table.
166 * We return the results of the lookups.
170 for (shift = 0; shift < 64; shift += 8) {
171 int index = extract64(indices, shift, 8);
172 if (index < 16 * numregs) {
173 /* Convert index (a byte offset into the virtual table
174 * which is a series of 128-bit vectors concatenated)
175 * into the correct vfp.regs[] element plus a bit offset
176 * into that element, bearing in mind that the table
177 * can wrap around from V31 to V0.
179 int elt = (rn * 2 + (index >> 3)) % 64;
180 int bitidx = (index & 7) * 8;
181 uint64_t val = extract64(env->vfp.regs[elt], bitidx, 8);
183 result = deposit64(result, shift, 8, val);
189 /* 64bit/double versions of the neon float compare functions */
190 uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp)
192 float_status *fpst = fpstp;
193 return -float64_eq_quiet(a, b, fpst);
196 uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, void *fpstp)
198 float_status *fpst = fpstp;
199 return -float64_le(b, a, fpst);
202 uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp)
204 float_status *fpst = fpstp;
205 return -float64_lt(b, a, fpst);
208 /* Reciprocal step and sqrt step. Note that unlike the A32/T32
209 * versions, these do a fully fused multiply-add or
210 * multiply-add-and-halve.
212 #define float32_two make_float32(0x40000000)
213 #define float32_three make_float32(0x40400000)
214 #define float32_one_point_five make_float32(0x3fc00000)
216 #define float64_two make_float64(0x4000000000000000ULL)
217 #define float64_three make_float64(0x4008000000000000ULL)
218 #define float64_one_point_five make_float64(0x3FF8000000000000ULL)
220 float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp)
222 float_status *fpst = fpstp;
225 if ((float32_is_infinity(a) && float32_is_zero(b)) ||
226 (float32_is_infinity(b) && float32_is_zero(a))) {
229 return float32_muladd(a, b, float32_two, 0, fpst);
232 float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp)
234 float_status *fpst = fpstp;
237 if ((float64_is_infinity(a) && float64_is_zero(b)) ||
238 (float64_is_infinity(b) && float64_is_zero(a))) {
241 return float64_muladd(a, b, float64_two, 0, fpst);
244 float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp)
246 float_status *fpst = fpstp;
249 if ((float32_is_infinity(a) && float32_is_zero(b)) ||
250 (float32_is_infinity(b) && float32_is_zero(a))) {
251 return float32_one_point_five;
253 return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
256 float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp)
258 float_status *fpst = fpstp;
261 if ((float64_is_infinity(a) && float64_is_zero(b)) ||
262 (float64_is_infinity(b) && float64_is_zero(a))) {
263 return float64_one_point_five;
265 return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
268 /* Pairwise long add: add pairs of adjacent elements into
269 * double-width elements in the result (eg _s8 is an 8x8->16 op)
271 uint64_t HELPER(neon_addlp_s8)(uint64_t a)
273 uint64_t nsignmask = 0x0080008000800080ULL;
274 uint64_t wsignmask = 0x8000800080008000ULL;
275 uint64_t elementmask = 0x00ff00ff00ff00ffULL;
277 uint64_t res, signres;
279 /* Extract odd elements, sign extend each to a 16 bit field */
280 tmp1 = a & elementmask;
283 tmp1 = (tmp1 - nsignmask) ^ wsignmask;
284 /* Ditto for the even elements */
285 tmp2 = (a >> 8) & elementmask;
288 tmp2 = (tmp2 - nsignmask) ^ wsignmask;
290 /* calculate the result by summing bits 0..14, 16..22, etc,
291 * and then adjusting the sign bits 15, 23, etc manually.
292 * This ensures the addition can't overflow the 16 bit field.
294 signres = (tmp1 ^ tmp2) & wsignmask;
295 res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask);
301 uint64_t HELPER(neon_addlp_u8)(uint64_t a)
305 tmp = a & 0x00ff00ff00ff00ffULL;
306 tmp += (a >> 8) & 0x00ff00ff00ff00ffULL;
310 uint64_t HELPER(neon_addlp_s16)(uint64_t a)
312 int32_t reslo, reshi;
314 reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16);
315 reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48);
317 return (uint32_t)reslo | (((uint64_t)reshi) << 32);
320 uint64_t HELPER(neon_addlp_u16)(uint64_t a)
324 tmp = a & 0x0000ffff0000ffffULL;
325 tmp += (a >> 16) & 0x0000ffff0000ffffULL;
329 /* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
330 float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
332 float_status *fpst = fpstp;
333 uint32_t val32, sbit;
336 if (float32_is_any_nan(a)) {
338 if (float32_is_signaling_nan(a)) {
339 float_raise(float_flag_invalid, fpst);
340 nan = float32_maybe_silence_nan(a);
342 if (fpst->default_nan_mode) {
343 nan = float32_default_nan;
348 val32 = float32_val(a);
349 sbit = 0x80000000ULL & val32;
350 exp = extract32(val32, 23, 8);
353 return make_float32(sbit | (0xfe << 23));
355 return make_float32(sbit | (~exp & 0xff) << 23);
359 float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
361 float_status *fpst = fpstp;
362 uint64_t val64, sbit;
365 if (float64_is_any_nan(a)) {
367 if (float64_is_signaling_nan(a)) {
368 float_raise(float_flag_invalid, fpst);
369 nan = float64_maybe_silence_nan(a);
371 if (fpst->default_nan_mode) {
372 nan = float64_default_nan;
377 val64 = float64_val(a);
378 sbit = 0x8000000000000000ULL & val64;
379 exp = extract64(float64_val(a), 52, 11);
382 return make_float64(sbit | (0x7feULL << 52));
384 return make_float64(sbit | (~exp & 0x7ffULL) << 52);
388 float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env)
390 /* Von Neumann rounding is implemented by using round-to-zero
391 * and then setting the LSB of the result if Inexact was raised.
394 float_status *fpst = &env->vfp.fp_status;
395 float_status tstat = *fpst;
398 set_float_rounding_mode(float_round_to_zero, &tstat);
399 set_float_exception_flags(0, &tstat);
400 r = float64_to_float32(a, &tstat);
401 r = float32_maybe_silence_nan(r);
402 exflags = get_float_exception_flags(&tstat);
403 if (exflags & float_flag_inexact) {
404 r = make_float32(float32_val(r) | 1);
406 exflags |= get_float_exception_flags(fpst);
407 set_float_exception_flags(exflags, fpst);
411 /* Handle a CPU exception. */
412 void aarch64_cpu_do_interrupt(CPUState *cs)
414 ARMCPU *cpu = ARM_CPU(cs);
415 CPUARMState *env = &cpu->env;
416 target_ulong addr = env->cp15.vbar_el[1];
419 if (arm_current_pl(env) == 0) {
425 } else if (pstate_read(env) & PSTATE_SP) {
429 arm_log_exception(cs->exception_index);
430 qemu_log_mask(CPU_LOG_INT, "...from EL%d\n", arm_current_pl(env));
431 if (qemu_loglevel_mask(CPU_LOG_INT)
432 && !excp_is_internal(cs->exception_index)) {
433 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%" PRIx32 "\n",
434 env->exception.syndrome);
437 env->cp15.esr_el[1] = env->exception.syndrome;
438 env->cp15.far_el1 = env->exception.vaddress;
440 switch (cs->exception_index) {
441 case EXCP_PREFETCH_ABORT:
442 case EXCP_DATA_ABORT:
443 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
457 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
461 env->banked_spsr[aarch64_banked_spsr_index(1)] = pstate_read(env);
462 env->sp_el[arm_current_pl(env)] = env->xregs[31];
463 env->xregs[31] = env->sp_el[1];
464 env->elr_el[1] = env->pc;
466 env->banked_spsr[0] = cpsr_read(env);
468 env->cp15.esr_el[1] |= 1 << 25;
470 env->elr_el[1] = env->regs[15];
472 for (i = 0; i < 15; i++) {
473 env->xregs[i] = env->regs[i];
476 env->condexec_bits = 0;
479 pstate_write(env, PSTATE_DAIF | PSTATE_MODE_EL1h);
483 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;