4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #define SIGNBIT (uint32_t)0x80000000
24 #define SIGNBIT64 ((uint64_t)1 << 63)
26 void raise_exception(int tt)
28 env->exception_index = tt;
34 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
38 spin_lock(&global_cpu_lock);
43 spin_unlock(&global_cpu_lock);
46 uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def,
47 uint32_t rn, uint32_t maxindex)
54 table = (uint64_t *)&env->vfp.regs[rn];
56 for (shift = 0; shift < 32; shift += 8) {
57 index = (ireg >> shift) & 0xff;
58 if (index < maxindex) {
59 tmp = (table[index >> 3] >> (index & 7)) & 0xff;
62 val |= def & (0xff << shift);
68 #if !defined(CONFIG_USER_ONLY)
70 #define MMUSUFFIX _mmu
72 # define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
74 # define GETPC() (__builtin_return_address(0))
78 #include "softmmu_template.h"
81 #include "softmmu_template.h"
84 #include "softmmu_template.h"
87 #include "softmmu_template.h"
89 /* try to fill the TLB and return an exception if error. If retaddr is
90 NULL, it means that the function was called in C code (i.e. not
91 from generated code or from helper.c) */
92 /* XXX: fix it to restore all registers */
93 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
100 /* XXX: hack to restore env in all cases, even if not called from
103 env = cpu_single_env;
104 ret = cpu_arm_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
105 if (__builtin_expect(ret, 0)) {
107 /* now we have a real cpu fault */
108 pc = (unsigned long)retaddr;
111 /* the PC is inside the translated code. It means that we have
112 a virtual CPU fault */
113 cpu_restore_state(tb, env, pc, NULL);
116 raise_exception(env->exception_index);
122 /* FIXME: Pass an axplicit pointer to QF to CPUState, and move saturating
123 instructions into helper.c */
124 uint32_t HELPER(add_setq)(uint32_t a, uint32_t b)
126 uint32_t res = a + b;
127 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
132 uint32_t HELPER(add_saturate)(uint32_t a, uint32_t b)
134 uint32_t res = a + b;
135 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
137 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
142 uint32_t HELPER(sub_saturate)(uint32_t a, uint32_t b)
144 uint32_t res = a - b;
145 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
147 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
152 uint32_t HELPER(double_saturate)(int32_t val)
155 if (val >= 0x40000000) {
158 } else if (val <= (int32_t)0xc0000000) {
167 uint32_t HELPER(add_usaturate)(uint32_t a, uint32_t b)
169 uint32_t res = a + b;
177 uint32_t HELPER(sub_usaturate)(uint32_t a, uint32_t b)
179 uint32_t res = a - b;
187 /* Signed saturation. */
188 static inline uint32_t do_ssat(int32_t val, int shift)
195 mask = (1u << shift) - 1;
199 } else if (top < -1) {
206 /* Unsigned saturation. */
207 static inline uint32_t do_usat(int32_t val, int shift)
212 max = (1u << shift) - 1;
216 } else if (val > max) {
223 /* Signed saturate. */
224 uint32_t HELPER(ssat)(uint32_t x, uint32_t shift)
226 return do_ssat(x, shift);
229 /* Dual halfword signed saturate. */
230 uint32_t HELPER(ssat16)(uint32_t x, uint32_t shift)
234 res = (uint16_t)do_ssat((int16_t)x, shift);
235 res |= do_ssat(((int32_t)x) >> 16, shift) << 16;
239 /* Unsigned saturate. */
240 uint32_t HELPER(usat)(uint32_t x, uint32_t shift)
242 return do_usat(x, shift);
245 /* Dual halfword unsigned saturate. */
246 uint32_t HELPER(usat16)(uint32_t x, uint32_t shift)
250 res = (uint16_t)do_usat((int16_t)x, shift);
251 res |= do_usat(((int32_t)x) >> 16, shift) << 16;
255 void HELPER(wfi)(void)
257 env->exception_index = EXCP_HLT;
262 void HELPER(exception)(uint32_t excp)
264 env->exception_index = excp;
268 uint32_t HELPER(cpsr_read)(void)
270 return cpsr_read(env) & ~CPSR_EXEC;
273 void HELPER(cpsr_write)(uint32_t val, uint32_t mask)
275 cpsr_write(env, val, mask);
278 /* Access to user mode registers from privileged modes. */
279 uint32_t HELPER(get_user_reg)(uint32_t regno)
284 val = env->banked_r13[0];
285 } else if (regno == 14) {
286 val = env->banked_r14[0];
287 } else if (regno >= 8
288 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
289 val = env->usr_regs[regno - 8];
291 val = env->regs[regno];
296 void HELPER(set_user_reg)(uint32_t regno, uint32_t val)
299 env->banked_r13[0] = val;
300 } else if (regno == 14) {
301 env->banked_r14[0] = val;
302 } else if (regno >= 8
303 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
304 env->usr_regs[regno - 8] = val;
306 env->regs[regno] = val;
310 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
311 The only way to do that in TCG is a conditional branch, which clobbers
312 all our temporaries. For now implement these as helper functions. */
314 uint32_t HELPER (add_cc)(uint32_t a, uint32_t b)
318 env->NF = env->ZF = result;
319 env->CF = result < a;
320 env->VF = (a ^ b ^ -1) & (a ^ result);
324 uint32_t HELPER(adc_cc)(uint32_t a, uint32_t b)
329 env->CF = result < a;
332 env->CF = result <= a;
334 env->VF = (a ^ b ^ -1) & (a ^ result);
335 env->NF = env->ZF = result;
339 uint32_t HELPER(sub_cc)(uint32_t a, uint32_t b)
343 env->NF = env->ZF = result;
345 env->VF = (a ^ b) & (a ^ result);
349 uint32_t HELPER(sbc_cc)(uint32_t a, uint32_t b)
359 env->VF = (a ^ b) & (a ^ result);
360 env->NF = env->ZF = result;
364 /* Similarly for variable shift instructions. */
366 uint32_t HELPER(shl)(uint32_t x, uint32_t i)
368 int shift = i & 0xff;
374 uint32_t HELPER(shr)(uint32_t x, uint32_t i)
376 int shift = i & 0xff;
379 return (uint32_t)x >> shift;
382 uint32_t HELPER(sar)(uint32_t x, uint32_t i)
384 int shift = i & 0xff;
387 return (int32_t)x >> shift;
390 uint32_t HELPER(ror)(uint32_t x, uint32_t i)
392 int shift = i & 0xff;
395 return (x >> shift) | (x << (32 - shift));
398 uint32_t HELPER(shl_cc)(uint32_t x, uint32_t i)
400 int shift = i & 0xff;
407 } else if (shift != 0) {
408 env->CF = (x >> (32 - shift)) & 1;
414 uint32_t HELPER(shr_cc)(uint32_t x, uint32_t i)
416 int shift = i & 0xff;
419 env->CF = (x >> 31) & 1;
423 } else if (shift != 0) {
424 env->CF = (x >> (shift - 1)) & 1;
430 uint32_t HELPER(sar_cc)(uint32_t x, uint32_t i)
432 int shift = i & 0xff;
434 env->CF = (x >> 31) & 1;
435 return (int32_t)x >> 31;
436 } else if (shift != 0) {
437 env->CF = (x >> (shift - 1)) & 1;
438 return (int32_t)x >> shift;
443 uint32_t HELPER(ror_cc)(uint32_t x, uint32_t i)
447 shift = shift1 & 0x1f;
450 env->CF = (x >> 31) & 1;
453 env->CF = (x >> (shift - 1)) & 1;
454 return ((uint32_t)x >> shift) | (x << (32 - shift));
458 uint64_t HELPER(neon_add_saturate_s64)(uint64_t src1, uint64_t src2)
463 if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) {
465 res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
470 uint64_t HELPER(neon_add_saturate_u64)(uint64_t src1, uint64_t src2)
482 uint64_t HELPER(neon_sub_saturate_s64)(uint64_t src1, uint64_t src2)
487 if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) {
489 res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
494 uint64_t HELPER(neon_sub_saturate_u64)(uint64_t src1, uint64_t src2)
507 /* These need to return a pair of value, so still use T0/T1. */
508 /* Transpose. Argument order is rather strange to avoid special casing
510 On input T0 = rm, T1 = rd. On output T0 = rd, T1 = rm */
511 void HELPER(neon_trn_u8)(void)
515 rd = ((T0 & 0x00ff00ff) << 8) | (T1 & 0x00ff00ff);
516 rm = ((T1 & 0xff00ff00) >> 8) | (T0 & 0xff00ff00);
522 void HELPER(neon_trn_u16)(void)
526 rd = (T0 << 16) | (T1 & 0xffff);
527 rm = (T1 >> 16) | (T0 & 0xffff0000);
533 /* Worker routines for zip and unzip. */
534 void HELPER(neon_unzip_u8)(void)
538 rd = (T0 & 0xff) | ((T0 >> 8) & 0xff00)
539 | ((T1 << 16) & 0xff0000) | ((T1 << 8) & 0xff000000);
540 rm = ((T0 >> 8) & 0xff) | ((T0 >> 16) & 0xff00)
541 | ((T1 << 8) & 0xff0000) | (T1 & 0xff000000);
547 void HELPER(neon_zip_u8)(void)
551 rd = (T0 & 0xff) | ((T1 << 8) & 0xff00)
552 | ((T0 << 16) & 0xff0000) | ((T1 << 24) & 0xff000000);
553 rm = ((T0 >> 16) & 0xff) | ((T1 >> 8) & 0xff00)
554 | ((T0 >> 8) & 0xff0000) | (T1 & 0xff000000);
560 void HELPER(neon_zip_u16)(void)
564 tmp = (T0 & 0xffff) | (T1 << 16);
565 T1 = (T1 & 0xffff0000) | (T0 >> 16);