2 * Alpha emulation cpu micro-operations helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include "host-utils.h"
23 #include "softfloat.h"
25 #include "op_helper.h"
27 #define MEMSUFFIX _raw
28 #include "op_helper_mem.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #define MEMSUFFIX _kernel
32 #include "op_helper_mem.h"
34 #define MEMSUFFIX _executive
35 #include "op_helper_mem.h"
37 #define MEMSUFFIX _supervisor
38 #include "op_helper_mem.h"
40 #define MEMSUFFIX _user
41 #include "op_helper_mem.h"
43 /* This is used for pal modes */
44 #define MEMSUFFIX _data
45 #include "op_helper_mem.h"
48 void helper_tb_flush (void)
53 void cpu_dump_EA (target_ulong EA);
54 void helper_print_mem_EA (target_ulong EA)
59 /*****************************************************************************/
60 /* Exceptions processing helpers */
61 void helper_excp (int excp, int error)
63 env->exception_index = excp;
64 env->error_code = error;
68 uint64_t helper_amask (uint64_t arg)
70 switch (env->implver) {
72 /* EV4, EV45, LCA, LCA45 & EV5 */
83 uint64_t helper_load_pcc (void)
89 uint64_t helper_load_implver (void)
94 void helper_load_fpcr (void)
97 #ifdef CONFIG_SOFTFLOAT
98 T0 |= env->fp_status.float_exception_flags << 52;
99 if (env->fp_status.float_exception_flags)
101 env->ipr[IPR_EXC_SUM] &= ~0x3E:
102 env->ipr[IPR_EXC_SUM] |= env->fp_status.float_exception_flags << 1;
104 switch (env->fp_status.float_rounding_mode) {
105 case float_round_nearest_even:
108 case float_round_down:
114 case float_round_to_zero:
119 void helper_store_fpcr (void)
121 #ifdef CONFIG_SOFTFLOAT
122 set_float_exception_flags((T0 >> 52) & 0x3F, &FP_STATUS);
124 switch ((T0 >> 58) & 3) {
126 set_float_rounding_mode(float_round_to_zero, &FP_STATUS);
129 set_float_rounding_mode(float_round_down, &FP_STATUS);
132 set_float_rounding_mode(float_round_nearest_even, &FP_STATUS);
135 set_float_rounding_mode(float_round_up, &FP_STATUS);
140 spinlock_t intr_cpu_lock = SPIN_LOCK_UNLOCKED;
142 uint64_t helper_rs(void)
146 spin_lock(&intr_cpu_lock);
147 tmp = env->intr_flag;
149 spin_unlock(&intr_cpu_lock);
154 uint64_t helper_rc(void)
158 spin_lock(&intr_cpu_lock);
159 tmp = env->intr_flag;
161 spin_unlock(&intr_cpu_lock);
166 uint64_t helper_addqv (uint64_t op1, uint64_t op2)
170 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
171 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
176 uint64_t helper_addlv (uint64_t op1, uint64_t op2)
179 op1 = (uint32_t)(op1 + op2);
180 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
181 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
186 uint64_t helper_subqv (uint64_t op1, uint64_t op2)
190 if (unlikely(((~tmp) ^ op1 ^ (-1ULL)) & ((~tmp) ^ op2) & (1ULL << 63))) {
191 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
196 uint64_t helper_sublv (uint64_t op1, uint64_t op2)
199 op1 = (uint32_t)(op1 - op2);
200 if (unlikely(((~tmp) ^ op1 ^ (-1UL)) & ((~tmp) ^ op2) & (1UL << 31))) {
201 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
206 uint64_t helper_mullv (uint64_t op1, uint64_t op2)
208 int64_t res = (int64_t)op1 * (int64_t)op2;
210 if (unlikely((int32_t)res != res)) {
211 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
213 return (int64_t)((int32_t)res);
216 uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
220 muls64(&tl, &th, op1, op2);
221 /* If th != 0 && th != -1, then we had an overflow */
222 if (unlikely((th + 1) > 1)) {
223 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
228 uint64_t helper_umulh (uint64_t op1, uint64_t op2)
232 mulu64(&tl, &th, op1, op2);
236 uint64_t helper_ctpop (uint64_t arg)
241 uint64_t helper_ctlz (uint64_t arg)
246 uint64_t helper_cttz (uint64_t arg)
251 static always_inline uint64_t byte_zap (uint64_t op, uint8_t mskb)
256 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
257 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
258 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
259 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
260 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
261 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
262 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
263 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
268 uint64_t helper_mskbl(uint64_t val, uint64_t mask)
270 return byte_zap(val, 0x01 << (mask & 7));
273 uint64_t helper_insbl(uint64_t val, uint64_t mask)
275 val <<= (mask & 7) * 8;
276 return byte_zap(val, ~(0x01 << (mask & 7)));
279 uint64_t helper_mskwl(uint64_t val, uint64_t mask)
281 return byte_zap(val, 0x03 << (mask & 7));
284 uint64_t helper_inswl(uint64_t val, uint64_t mask)
286 val <<= (mask & 7) * 8;
287 return byte_zap(val, ~(0x03 << (mask & 7)));
290 uint64_t helper_mskll(uint64_t val, uint64_t mask)
292 return byte_zap(val, 0x0F << (mask & 7));
295 uint64_t helper_insll(uint64_t val, uint64_t mask)
297 val <<= (mask & 7) * 8;
298 return byte_zap(val, ~(0x0F << (mask & 7)));
301 uint64_t helper_zap(uint64_t val, uint64_t mask)
303 return byte_zap(val, mask);
306 uint64_t helper_zapnot(uint64_t val, uint64_t mask)
308 return byte_zap(val, ~mask);
311 uint64_t helper_mskql(uint64_t val, uint64_t mask)
313 return byte_zap(val, 0xFF << (mask & 7));
316 uint64_t helper_insql(uint64_t val, uint64_t mask)
318 val <<= (mask & 7) * 8;
319 return byte_zap(val, ~(0xFF << (mask & 7)));
322 uint64_t helper_mskwh(uint64_t val, uint64_t mask)
324 return byte_zap(val, (0x03 << (mask & 7)) >> 8);
327 uint64_t helper_inswh(uint64_t val, uint64_t mask)
329 val >>= 64 - ((mask & 7) * 8);
330 return byte_zap(val, ~((0x03 << (mask & 7)) >> 8));
333 uint64_t helper_msklh(uint64_t val, uint64_t mask)
335 return byte_zap(val, (0x0F << (mask & 7)) >> 8);
338 uint64_t helper_inslh(uint64_t val, uint64_t mask)
340 val >>= 64 - ((mask & 7) * 8);
341 return byte_zap(val, ~((0x0F << (mask & 7)) >> 8));
344 uint64_t helper_mskqh(uint64_t val, uint64_t mask)
346 return byte_zap(val, (0xFF << (mask & 7)) >> 8);
349 uint64_t helper_insqh(uint64_t val, uint64_t mask)
351 val >>= 64 - ((mask & 7) * 8);
352 return byte_zap(val, ~((0xFF << (mask & 7)) >> 8));
355 uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
357 uint8_t opa, opb, res;
361 for (i = 0; i < 7; i++) {
362 opa = op1 >> (i * 8);
363 opb = op2 >> (i * 8);
370 void helper_cmov_fir (int freg)
373 env->fir[freg] = FT1;
376 void helper_sqrts (void)
378 FT0 = float32_sqrt(FT0, &FP_STATUS);
381 void helper_cpys (void)
390 r.i = p.i & 0x8000000000000000ULL;
391 r.i |= q.i & ~0x8000000000000000ULL;
395 void helper_cpysn (void)
404 r.i = (~p.i) & 0x8000000000000000ULL;
405 r.i |= q.i & ~0x8000000000000000ULL;
409 void helper_cpyse (void)
418 r.i = p.i & 0xFFF0000000000000ULL;
419 r.i |= q.i & ~0xFFF0000000000000ULL;
423 void helper_itofs (void)
431 FT0 = int64_to_float32(p.i, &FP_STATUS);
434 void helper_ftois (void)
441 p.i = float32_to_int64(FT0, &FP_STATUS);
445 void helper_sqrtt (void)
447 FT0 = float64_sqrt(FT0, &FP_STATUS);
450 void helper_cmptun (void)
458 if (float64_is_nan(FT0) || float64_is_nan(FT1))
459 p.i = 0x4000000000000000ULL;
463 void helper_cmpteq (void)
471 if (float64_eq(FT0, FT1, &FP_STATUS))
472 p.i = 0x4000000000000000ULL;
476 void helper_cmptle (void)
484 if (float64_le(FT0, FT1, &FP_STATUS))
485 p.i = 0x4000000000000000ULL;
489 void helper_cmptlt (void)
497 if (float64_lt(FT0, FT1, &FP_STATUS))
498 p.i = 0x4000000000000000ULL;
502 void helper_itoft (void)
510 FT0 = int64_to_float64(p.i, &FP_STATUS);
513 void helper_ftoit (void)
520 p.i = float64_to_int64(FT0, &FP_STATUS);
524 static always_inline int vaxf_is_valid (float ff)
533 exp = (p.i >> 23) & 0xFF;
534 mant = p.i & 0x007FFFFF;
535 if (exp == 0 && ((p.i & 0x80000000) || mant != 0)) {
536 /* Reserved operands / Dirty zero */
543 static always_inline float vaxf_to_ieee32 (float ff)
552 exp = (p.i >> 23) & 0xFF;
563 static always_inline float ieee32_to_vaxf (float fi)
572 exp = (p.i >> 23) & 0xFF;
573 mant = p.i & 0x007FFFFF;
575 /* NaN or infinity */
577 } else if (exp == 0) {
597 void helper_addf (void)
601 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
604 ft0 = vaxf_to_ieee32(FT0);
605 ft1 = vaxf_to_ieee32(FT1);
606 ft2 = float32_add(ft0, ft1, &FP_STATUS);
607 FT0 = ieee32_to_vaxf(ft2);
610 void helper_subf (void)
614 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
617 ft0 = vaxf_to_ieee32(FT0);
618 ft1 = vaxf_to_ieee32(FT1);
619 ft2 = float32_sub(ft0, ft1, &FP_STATUS);
620 FT0 = ieee32_to_vaxf(ft2);
623 void helper_mulf (void)
627 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
630 ft0 = vaxf_to_ieee32(FT0);
631 ft1 = vaxf_to_ieee32(FT1);
632 ft2 = float32_mul(ft0, ft1, &FP_STATUS);
633 FT0 = ieee32_to_vaxf(ft2);
636 void helper_divf (void)
640 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
643 ft0 = vaxf_to_ieee32(FT0);
644 ft1 = vaxf_to_ieee32(FT1);
645 ft2 = float32_div(ft0, ft1, &FP_STATUS);
646 FT0 = ieee32_to_vaxf(ft2);
649 void helper_sqrtf (void)
653 if (!vaxf_is_valid(FT0) || !vaxf_is_valid(FT1)) {
656 ft0 = vaxf_to_ieee32(FT0);
657 ft1 = float32_sqrt(ft0, &FP_STATUS);
658 FT0 = ieee32_to_vaxf(ft1);
661 void helper_itoff (void)
666 static always_inline int vaxg_is_valid (double ff)
675 exp = (p.i >> 52) & 0x7FF;
676 mant = p.i & 0x000FFFFFFFFFFFFFULL;
677 if (exp == 0 && ((p.i & 0x8000000000000000ULL) || mant != 0)) {
678 /* Reserved operands / Dirty zero */
685 static always_inline double vaxg_to_ieee64 (double fg)
694 exp = (p.i >> 52) & 0x7FF;
705 static always_inline double ieee64_to_vaxg (double fi)
715 exp = (p.i >> 52) & 0x7FF;
716 mant = p.i & 0x000FFFFFFFFFFFFFULL;
718 /* NaN or infinity */
719 p.i = 1; /* VAX dirty zero */
720 } else if (exp == 0) {
731 p.i = 1; /* VAX dirty zero */
740 void helper_addg (void)
742 double ft0, ft1, ft2;
744 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
747 ft0 = vaxg_to_ieee64(FT0);
748 ft1 = vaxg_to_ieee64(FT1);
749 ft2 = float64_add(ft0, ft1, &FP_STATUS);
750 FT0 = ieee64_to_vaxg(ft2);
753 void helper_subg (void)
755 double ft0, ft1, ft2;
757 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
760 ft0 = vaxg_to_ieee64(FT0);
761 ft1 = vaxg_to_ieee64(FT1);
762 ft2 = float64_sub(ft0, ft1, &FP_STATUS);
763 FT0 = ieee64_to_vaxg(ft2);
766 void helper_mulg (void)
768 double ft0, ft1, ft2;
770 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
773 ft0 = vaxg_to_ieee64(FT0);
774 ft1 = vaxg_to_ieee64(FT1);
775 ft2 = float64_mul(ft0, ft1, &FP_STATUS);
776 FT0 = ieee64_to_vaxg(ft2);
779 void helper_divg (void)
781 double ft0, ft1, ft2;
783 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
786 ft0 = vaxg_to_ieee64(FT0);
787 ft1 = vaxg_to_ieee64(FT1);
788 ft2 = float64_div(ft0, ft1, &FP_STATUS);
789 FT0 = ieee64_to_vaxg(ft2);
792 void helper_sqrtg (void)
796 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
799 ft0 = vaxg_to_ieee64(FT0);
800 ft1 = float64_sqrt(ft0, &FP_STATUS);
801 FT0 = ieee64_to_vaxg(ft1);
804 void helper_cmpgeq (void)
812 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
815 ft0 = vaxg_to_ieee64(FT0);
816 ft1 = vaxg_to_ieee64(FT1);
818 if (float64_eq(ft0, ft1, &FP_STATUS))
819 p.u = 0x4000000000000000ULL;
823 void helper_cmpglt (void)
831 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
834 ft0 = vaxg_to_ieee64(FT0);
835 ft1 = vaxg_to_ieee64(FT1);
837 if (float64_lt(ft0, ft1, &FP_STATUS))
838 p.u = 0x4000000000000000ULL;
842 void helper_cmpgle (void)
850 if (!vaxg_is_valid(FT0) || !vaxg_is_valid(FT1)) {
853 ft0 = vaxg_to_ieee64(FT0);
854 ft1 = vaxg_to_ieee64(FT1);
856 if (float64_le(ft0, ft1, &FP_STATUS))
857 p.u = 0x4000000000000000ULL;
861 void helper_cvtqs (void)
872 void helper_cvttq (void)
883 void helper_cvtqt (void)
894 void helper_cvtqf (void)
902 FT0 = ieee32_to_vaxf(p.u);
905 void helper_cvtgf (void)
909 ft0 = vaxg_to_ieee64(FT0);
910 FT0 = ieee32_to_vaxf(ft0);
913 void helper_cvtgd (void)
918 void helper_cvtgq (void)
925 p.u = vaxg_to_ieee64(FT0);
929 void helper_cvtqg (void)
937 FT0 = ieee64_to_vaxg(p.u);
940 void helper_cvtdg (void)
945 void helper_cvtlq (void)
953 q.u = (p.u >> 29) & 0x3FFFFFFF;
955 q.u = (int64_t)((int32_t)q.u);
959 static always_inline void __helper_cvtql (int s, int v)
967 q.u = ((uint64_t)(p.u & 0xC0000000)) << 32;
968 q.u |= ((uint64_t)(p.u & 0x7FFFFFFF)) << 29;
970 if (v && (int64_t)((int32_t)p.u) != (int64_t)p.u) {
971 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
978 void helper_cvtql (void)
980 __helper_cvtql(0, 0);
983 void helper_cvtqlv (void)
985 __helper_cvtql(0, 1);
988 void helper_cvtqlsv (void)
990 __helper_cvtql(1, 1);
993 void helper_cmpfeq (void)
995 if (float64_eq(FT0, FT1, &FP_STATUS))
1001 void helper_cmpfne (void)
1003 if (float64_eq(FT0, FT1, &FP_STATUS))
1009 void helper_cmpflt (void)
1011 if (float64_lt(FT0, FT1, &FP_STATUS))
1017 void helper_cmpfle (void)
1019 if (float64_lt(FT0, FT1, &FP_STATUS))
1025 void helper_cmpfgt (void)
1027 if (float64_le(FT0, FT1, &FP_STATUS))
1033 void helper_cmpfge (void)
1035 if (float64_lt(FT0, FT1, &FP_STATUS))
1041 #if !defined (CONFIG_USER_ONLY)
1042 void helper_mfpr (int iprn)
1046 if (cpu_alpha_mfpr(env, iprn, &val) == 0)
1050 void helper_mtpr (int iprn)
1052 cpu_alpha_mtpr(env, iprn, T0, NULL);
1056 #if defined(HOST_SPARC) || defined(HOST_SPARC64)
1057 void helper_reset_FT0 (void)
1062 void helper_reset_FT1 (void)
1067 void helper_reset_FT2 (void)
1073 /*****************************************************************************/
1074 /* Softmmu support */
1075 #if !defined (CONFIG_USER_ONLY)
1077 /* XXX: the two following helpers are pure hacks.
1078 * Hopefully, we emulate the PALcode, then we should never see
1079 * HW_LD / HW_ST instructions.
1081 void helper_ld_phys_to_virt (void)
1083 uint64_t tlb_addr, physaddr;
1087 mmu_idx = cpu_mmu_index(env);
1088 index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1090 tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
1091 if ((T0 & TARGET_PAGE_MASK) ==
1092 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1093 physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1095 /* the page is not in the TLB : fill it */
1097 tlb_fill(T0, 0, mmu_idx, retaddr);
1103 void helper_st_phys_to_virt (void)
1105 uint64_t tlb_addr, physaddr;
1109 mmu_idx = cpu_mmu_index(env);
1110 index = (T0 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1112 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1113 if ((T0 & TARGET_PAGE_MASK) ==
1114 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1115 physaddr = T0 + env->tlb_table[mmu_idx][index].addend;
1117 /* the page is not in the TLB : fill it */
1119 tlb_fill(T0, 1, mmu_idx, retaddr);
1125 #define MMUSUFFIX _mmu
1128 #include "softmmu_template.h"
1131 #include "softmmu_template.h"
1134 #include "softmmu_template.h"
1137 #include "softmmu_template.h"
1139 /* try to fill the TLB and return an exception if error. If retaddr is
1140 NULL, it means that the function was called in C code (i.e. not
1141 from generated code or from helper.c) */
1142 /* XXX: fix it to restore all registers */
1143 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1145 TranslationBlock *tb;
1146 CPUState *saved_env;
1150 /* XXX: hack to restore env in all cases, even if not called from
1153 env = cpu_single_env;
1154 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1155 if (!likely(ret == 0)) {
1156 if (likely(retaddr)) {
1157 /* now we have a real cpu fault */
1158 pc = (unsigned long)retaddr;
1159 tb = tb_find_pc(pc);
1161 /* the PC is inside the translated code. It means that we have
1162 a virtual CPU fault */
1163 cpu_restore_state(tb, env, pc, NULL);
1166 /* Exception index and error code are already set */