2 * PowerPC emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "dyngen-exec.h"
22 #include "host-utils.h"
25 #include "helper_regs.h"
27 #if !defined(CONFIG_USER_ONLY)
28 #include "softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
32 //#define DEBUG_EXCEPTIONS
33 //#define DEBUG_SOFTWARE_TLB
35 #ifdef DEBUG_SOFTWARE_TLB
36 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
38 # define LOG_SWTLB(...) do { } while (0)
42 /*****************************************************************************/
43 /* Exceptions processing helpers */
45 void helper_raise_exception_err(uint32_t exception, uint32_t error_code)
48 printf("Raise exception %3x code : %d\n", exception, error_code);
50 env->exception_index = exception;
51 env->error_code = error_code;
55 void helper_raise_exception(uint32_t exception)
57 helper_raise_exception_err(exception, 0);
60 /*****************************************************************************/
62 void helper_load_dump_spr(uint32_t sprn)
64 qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
68 void helper_store_dump_spr(uint32_t sprn)
70 qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
74 target_ulong helper_load_tbl(void)
76 return (target_ulong)cpu_ppc_load_tbl(env);
79 target_ulong helper_load_tbu(void)
81 return cpu_ppc_load_tbu(env);
84 target_ulong helper_load_atbl(void)
86 return (target_ulong)cpu_ppc_load_atbl(env);
89 target_ulong helper_load_atbu(void)
91 return cpu_ppc_load_atbu(env);
94 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
95 target_ulong helper_load_purr(void)
97 return (target_ulong)cpu_ppc_load_purr(env);
101 target_ulong helper_load_601_rtcl(void)
103 return cpu_ppc601_load_rtcl(env);
106 target_ulong helper_load_601_rtcu(void)
108 return cpu_ppc601_load_rtcu(env);
111 #if !defined(CONFIG_USER_ONLY)
112 #if defined(TARGET_PPC64)
113 void helper_store_asr(target_ulong val)
115 ppc_store_asr(env, val);
119 void helper_store_sdr1(target_ulong val)
121 ppc_store_sdr1(env, val);
124 void helper_store_tbl(target_ulong val)
126 cpu_ppc_store_tbl(env, val);
129 void helper_store_tbu(target_ulong val)
131 cpu_ppc_store_tbu(env, val);
134 void helper_store_atbl(target_ulong val)
136 cpu_ppc_store_atbl(env, val);
139 void helper_store_atbu(target_ulong val)
141 cpu_ppc_store_atbu(env, val);
144 void helper_store_601_rtcl(target_ulong val)
146 cpu_ppc601_store_rtcl(env, val);
149 void helper_store_601_rtcu(target_ulong val)
151 cpu_ppc601_store_rtcu(env, val);
154 target_ulong helper_load_decr(void)
156 return cpu_ppc_load_decr(env);
159 void helper_store_decr(target_ulong val)
161 cpu_ppc_store_decr(env, val);
164 void helper_store_hid0_601(target_ulong val)
168 hid0 = env->spr[SPR_HID0];
169 if ((val ^ hid0) & 0x00000008) {
170 /* Change current endianness */
171 env->hflags &= ~(1 << MSR_LE);
172 env->hflags_nmsr &= ~(1 << MSR_LE);
173 env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
174 env->hflags |= env->hflags_nmsr;
175 qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
176 val & 0x8 ? 'l' : 'b', env->hflags);
178 env->spr[SPR_HID0] = (uint32_t)val;
181 void helper_store_403_pbr(uint32_t num, target_ulong value)
183 if (likely(env->pb[num] != value)) {
184 env->pb[num] = value;
185 /* Should be optimized */
190 target_ulong helper_load_40x_pit(void)
192 return load_40x_pit(env);
195 void helper_store_40x_pit(target_ulong val)
197 store_40x_pit(env, val);
200 void helper_store_40x_dbcr0(target_ulong val)
202 store_40x_dbcr0(env, val);
205 void helper_store_40x_sler(target_ulong val)
207 store_40x_sler(env, val);
210 void helper_store_booke_tcr(target_ulong val)
212 store_booke_tcr(env, val);
215 void helper_store_booke_tsr(target_ulong val)
217 store_booke_tsr(env, val);
220 void helper_store_ibatu(uint32_t nr, target_ulong val)
222 ppc_store_ibatu(env, nr, val);
225 void helper_store_ibatl(uint32_t nr, target_ulong val)
227 ppc_store_ibatl(env, nr, val);
230 void helper_store_dbatu(uint32_t nr, target_ulong val)
232 ppc_store_dbatu(env, nr, val);
235 void helper_store_dbatl(uint32_t nr, target_ulong val)
237 ppc_store_dbatl(env, nr, val);
240 void helper_store_601_batl(uint32_t nr, target_ulong val)
242 ppc_store_ibatl_601(env, nr, val);
245 void helper_store_601_batu(uint32_t nr, target_ulong val)
247 ppc_store_ibatu_601(env, nr, val);
251 /*****************************************************************************/
252 /* Memory load and stores */
254 static inline target_ulong addr_add(target_ulong addr, target_long arg)
256 #if defined(TARGET_PPC64)
258 return (uint32_t)(addr + arg);
266 void helper_lmw(target_ulong addr, uint32_t reg)
268 for (; reg < 32; reg++) {
270 env->gpr[reg] = bswap32(ldl(addr));
272 env->gpr[reg] = ldl(addr);
274 addr = addr_add(addr, 4);
278 void helper_stmw(target_ulong addr, uint32_t reg)
280 for (; reg < 32; reg++) {
282 stl(addr, bswap32((uint32_t)env->gpr[reg]));
284 stl(addr, (uint32_t)env->gpr[reg]);
286 addr = addr_add(addr, 4);
290 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
294 for (; nb > 3; nb -= 4) {
295 env->gpr[reg] = ldl(addr);
296 reg = (reg + 1) % 32;
297 addr = addr_add(addr, 4);
299 if (unlikely(nb > 0)) {
301 for (sh = 24; nb > 0; nb--, sh -= 8) {
302 env->gpr[reg] |= ldub(addr) << sh;
303 addr = addr_add(addr, 1);
307 /* PPC32 specification says we must generate an exception if
308 * rA is in the range of registers to be loaded.
309 * In an other hand, IBM says this is valid, but rA won't be loaded.
310 * For now, I'll follow the spec...
312 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
314 if (likely(xer_bc != 0)) {
315 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
316 (reg < rb && (reg + xer_bc) > rb))) {
317 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
319 POWERPC_EXCP_INVAL_LSWX);
321 helper_lsw(addr, xer_bc, reg);
326 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
330 for (; nb > 3; nb -= 4) {
331 stl(addr, env->gpr[reg]);
332 reg = (reg + 1) % 32;
333 addr = addr_add(addr, 4);
335 if (unlikely(nb > 0)) {
336 for (sh = 24; nb > 0; nb--, sh -= 8) {
337 stb(addr, (env->gpr[reg] >> sh) & 0xFF);
338 addr = addr_add(addr, 1);
343 static void do_dcbz(target_ulong addr, int dcache_line_size)
347 addr &= ~(dcache_line_size - 1);
348 for (i = 0; i < dcache_line_size; i += 4) {
351 if (env->reserve_addr == addr) {
352 env->reserve_addr = (target_ulong)-1ULL;
356 void helper_dcbz(target_ulong addr)
358 do_dcbz(addr, env->dcache_line_size);
361 void helper_dcbz_970(target_ulong addr)
363 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) {
366 do_dcbz(addr, env->dcache_line_size);
370 void helper_icbi(target_ulong addr)
372 addr &= ~(env->dcache_line_size - 1);
373 /* Invalidate one cache line :
374 * PowerPC specification says this is to be treated like a load
375 * (not a fetch) by the MMU. To be sure it will be so,
376 * do the load "by hand".
381 /* XXX: to be tested */
382 target_ulong helper_lscbx(target_ulong addr, uint32_t reg, uint32_t ra,
388 for (i = 0; i < xer_bc; i++) {
390 addr = addr_add(addr, 1);
391 /* ra (if not 0) and rb are never modified */
392 if (likely(reg != rb && (ra == 0 || reg != ra))) {
393 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
395 if (unlikely(c == xer_cmp)) {
398 if (likely(d != 0)) {
409 /*****************************************************************************/
410 /* Fixed point operations helpers */
411 #if defined(TARGET_PPC64)
413 /* multiply high word */
414 uint64_t helper_mulhd(uint64_t arg1, uint64_t arg2)
418 muls64(&tl, &th, arg1, arg2);
422 /* multiply high word unsigned */
423 uint64_t helper_mulhdu(uint64_t arg1, uint64_t arg2)
427 mulu64(&tl, &th, arg1, arg2);
431 uint64_t helper_mulldo(uint64_t arg1, uint64_t arg2)
436 muls64(&tl, (uint64_t *)&th, arg1, arg2);
437 /* If th != 0 && th != -1, then we had an overflow */
438 if (likely((uint64_t)(th + 1) <= 1)) {
439 env->xer &= ~(1 << XER_OV);
441 env->xer |= (1 << XER_OV) | (1 << XER_SO);
447 target_ulong helper_cntlzw(target_ulong t)
452 #if defined(TARGET_PPC64)
453 target_ulong helper_cntlzd(target_ulong t)
459 /* shift right arithmetic helper */
460 target_ulong helper_sraw(target_ulong value, target_ulong shift)
464 if (likely(!(shift & 0x20))) {
465 if (likely((uint32_t)shift != 0)) {
467 ret = (int32_t)value >> shift;
468 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
469 env->xer &= ~(1 << XER_CA);
471 env->xer |= (1 << XER_CA);
474 ret = (int32_t)value;
475 env->xer &= ~(1 << XER_CA);
478 ret = (int32_t)value >> 31;
480 env->xer |= (1 << XER_CA);
482 env->xer &= ~(1 << XER_CA);
485 return (target_long)ret;
488 #if defined(TARGET_PPC64)
489 target_ulong helper_srad(target_ulong value, target_ulong shift)
493 if (likely(!(shift & 0x40))) {
494 if (likely((uint64_t)shift != 0)) {
496 ret = (int64_t)value >> shift;
497 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
498 env->xer &= ~(1 << XER_CA);
500 env->xer |= (1 << XER_CA);
503 ret = (int64_t)value;
504 env->xer &= ~(1 << XER_CA);
507 ret = (int64_t)value >> 63;
509 env->xer |= (1 << XER_CA);
511 env->xer &= ~(1 << XER_CA);
518 #if defined(TARGET_PPC64)
519 target_ulong helper_popcntb(target_ulong val)
521 val = (val & 0x5555555555555555ULL) + ((val >> 1) &
522 0x5555555555555555ULL);
523 val = (val & 0x3333333333333333ULL) + ((val >> 2) &
524 0x3333333333333333ULL);
525 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
526 0x0f0f0f0f0f0f0f0fULL);
530 target_ulong helper_popcntw(target_ulong val)
532 val = (val & 0x5555555555555555ULL) + ((val >> 1) &
533 0x5555555555555555ULL);
534 val = (val & 0x3333333333333333ULL) + ((val >> 2) &
535 0x3333333333333333ULL);
536 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
537 0x0f0f0f0f0f0f0f0fULL);
538 val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) &
539 0x00ff00ff00ff00ffULL);
540 val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
541 0x0000ffff0000ffffULL);
545 target_ulong helper_popcntd(target_ulong val)
550 target_ulong helper_popcntb(target_ulong val)
552 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
553 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
554 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
558 target_ulong helper_popcntw(target_ulong val)
560 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
561 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
562 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
563 val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff);
564 val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff);
569 /*****************************************************************************/
570 /* Floating point operations helpers */
571 uint64_t helper_float32_to_float64(uint32_t arg)
577 d.d = float32_to_float64(f.f, &env->fp_status);
581 uint32_t helper_float64_to_float32(uint64_t arg)
587 f.f = float64_to_float32(d.d, &env->fp_status);
591 static inline int isden(float64 d)
597 return ((u.ll >> 52) & 0x7FF) == 0;
600 uint32_t helper_compute_fprf(uint64_t arg, uint32_t set_fprf)
607 isneg = float64_is_neg(farg.d);
608 if (unlikely(float64_is_any_nan(farg.d))) {
609 if (float64_is_signaling_nan(farg.d)) {
610 /* Signaling NaN: flags are undefined */
616 } else if (unlikely(float64_is_infinity(farg.d))) {
624 if (float64_is_zero(farg.d)) {
633 /* Denormalized numbers */
636 /* Normalized numbers */
647 /* We update FPSCR_FPRF */
648 env->fpscr &= ~(0x1F << FPSCR_FPRF);
649 env->fpscr |= ret << FPSCR_FPRF;
651 /* We just need fpcc to update Rc1 */
655 /* Floating-point invalid operations exception */
656 static inline uint64_t fload_invalid_op_excp(int op)
663 case POWERPC_EXCP_FP_VXSNAN:
664 env->fpscr |= 1 << FPSCR_VXSNAN;
666 case POWERPC_EXCP_FP_VXSOFT:
667 env->fpscr |= 1 << FPSCR_VXSOFT;
669 case POWERPC_EXCP_FP_VXISI:
670 /* Magnitude subtraction of infinities */
671 env->fpscr |= 1 << FPSCR_VXISI;
673 case POWERPC_EXCP_FP_VXIDI:
674 /* Division of infinity by infinity */
675 env->fpscr |= 1 << FPSCR_VXIDI;
677 case POWERPC_EXCP_FP_VXZDZ:
678 /* Division of zero by zero */
679 env->fpscr |= 1 << FPSCR_VXZDZ;
681 case POWERPC_EXCP_FP_VXIMZ:
682 /* Multiplication of zero by infinity */
683 env->fpscr |= 1 << FPSCR_VXIMZ;
685 case POWERPC_EXCP_FP_VXVC:
686 /* Ordered comparison of NaN */
687 env->fpscr |= 1 << FPSCR_VXVC;
688 env->fpscr &= ~(0xF << FPSCR_FPCC);
689 env->fpscr |= 0x11 << FPSCR_FPCC;
690 /* We must update the target FPR before raising the exception */
692 env->exception_index = POWERPC_EXCP_PROGRAM;
693 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
694 /* Update the floating-point enabled exception summary */
695 env->fpscr |= 1 << FPSCR_FEX;
696 /* Exception is differed */
700 case POWERPC_EXCP_FP_VXSQRT:
701 /* Square root of a negative number */
702 env->fpscr |= 1 << FPSCR_VXSQRT;
704 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
706 /* Set the result to quiet NaN */
707 ret = 0x7FF8000000000000ULL;
708 env->fpscr &= ~(0xF << FPSCR_FPCC);
709 env->fpscr |= 0x11 << FPSCR_FPCC;
712 case POWERPC_EXCP_FP_VXCVI:
713 /* Invalid conversion */
714 env->fpscr |= 1 << FPSCR_VXCVI;
715 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
717 /* Set the result to quiet NaN */
718 ret = 0x7FF8000000000000ULL;
719 env->fpscr &= ~(0xF << FPSCR_FPCC);
720 env->fpscr |= 0x11 << FPSCR_FPCC;
724 /* Update the floating-point invalid operation summary */
725 env->fpscr |= 1 << FPSCR_VX;
726 /* Update the floating-point exception summary */
727 env->fpscr |= 1 << FPSCR_FX;
729 /* Update the floating-point enabled exception summary */
730 env->fpscr |= 1 << FPSCR_FEX;
731 if (msr_fe0 != 0 || msr_fe1 != 0) {
732 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
733 POWERPC_EXCP_FP | op);
739 static inline void float_zero_divide_excp(void)
741 env->fpscr |= 1 << FPSCR_ZX;
742 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
743 /* Update the floating-point exception summary */
744 env->fpscr |= 1 << FPSCR_FX;
746 /* Update the floating-point enabled exception summary */
747 env->fpscr |= 1 << FPSCR_FEX;
748 if (msr_fe0 != 0 || msr_fe1 != 0) {
749 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
750 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
755 static inline void float_overflow_excp(void)
757 env->fpscr |= 1 << FPSCR_OX;
758 /* Update the floating-point exception summary */
759 env->fpscr |= 1 << FPSCR_FX;
761 /* XXX: should adjust the result */
762 /* Update the floating-point enabled exception summary */
763 env->fpscr |= 1 << FPSCR_FEX;
764 /* We must update the target FPR before raising the exception */
765 env->exception_index = POWERPC_EXCP_PROGRAM;
766 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
768 env->fpscr |= 1 << FPSCR_XX;
769 env->fpscr |= 1 << FPSCR_FI;
773 static inline void float_underflow_excp(void)
775 env->fpscr |= 1 << FPSCR_UX;
776 /* Update the floating-point exception summary */
777 env->fpscr |= 1 << FPSCR_FX;
779 /* XXX: should adjust the result */
780 /* Update the floating-point enabled exception summary */
781 env->fpscr |= 1 << FPSCR_FEX;
782 /* We must update the target FPR before raising the exception */
783 env->exception_index = POWERPC_EXCP_PROGRAM;
784 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
788 static inline void float_inexact_excp(void)
790 env->fpscr |= 1 << FPSCR_XX;
791 /* Update the floating-point exception summary */
792 env->fpscr |= 1 << FPSCR_FX;
794 /* Update the floating-point enabled exception summary */
795 env->fpscr |= 1 << FPSCR_FEX;
796 /* We must update the target FPR before raising the exception */
797 env->exception_index = POWERPC_EXCP_PROGRAM;
798 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
802 static inline void fpscr_set_rounding_mode(void)
806 /* Set rounding mode */
809 /* Best approximation (round to nearest) */
810 rnd_type = float_round_nearest_even;
813 /* Smaller magnitude (round toward zero) */
814 rnd_type = float_round_to_zero;
817 /* Round toward +infinite */
818 rnd_type = float_round_up;
822 /* Round toward -infinite */
823 rnd_type = float_round_down;
826 set_float_rounding_mode(rnd_type, &env->fp_status);
829 void helper_fpscr_clrbit(uint32_t bit)
833 prev = (env->fpscr >> bit) & 1;
834 env->fpscr &= ~(1 << bit);
839 fpscr_set_rounding_mode();
847 void helper_fpscr_setbit(uint32_t bit)
851 prev = (env->fpscr >> bit) & 1;
852 env->fpscr |= 1 << bit;
856 env->fpscr |= 1 << FPSCR_FX;
861 env->fpscr |= 1 << FPSCR_FX;
867 env->fpscr |= 1 << FPSCR_FX;
873 env->fpscr |= 1 << FPSCR_FX;
879 env->fpscr |= 1 << FPSCR_FX;
893 env->fpscr |= 1 << FPSCR_VX;
894 env->fpscr |= 1 << FPSCR_FX;
902 env->error_code = POWERPC_EXCP_FP;
904 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
907 env->error_code |= POWERPC_EXCP_FP_VXISI;
910 env->error_code |= POWERPC_EXCP_FP_VXIDI;
913 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
916 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
919 env->error_code |= POWERPC_EXCP_FP_VXVC;
922 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
925 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
928 env->error_code |= POWERPC_EXCP_FP_VXCVI;
936 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
943 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
950 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
957 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
963 fpscr_set_rounding_mode();
968 /* Update the floating-point enabled exception summary */
969 env->fpscr |= 1 << FPSCR_FEX;
970 /* We have to update Rc1 before raising the exception */
971 env->exception_index = POWERPC_EXCP_PROGRAM;
977 void helper_store_fpscr(uint64_t arg, uint32_t mask)
980 * We use only the 32 LSB of the incoming fpr
988 new |= prev & 0x60000000;
989 for (i = 0; i < 8; i++) {
990 if (mask & (1 << i)) {
991 env->fpscr &= ~(0xF << (4 * i));
992 env->fpscr |= new & (0xF << (4 * i));
995 /* Update VX and FEX */
997 env->fpscr |= 1 << FPSCR_VX;
999 env->fpscr &= ~(1 << FPSCR_VX);
1001 if ((fpscr_ex & fpscr_eex) != 0) {
1002 env->fpscr |= 1 << FPSCR_FEX;
1003 env->exception_index = POWERPC_EXCP_PROGRAM;
1004 /* XXX: we should compute it properly */
1005 env->error_code = POWERPC_EXCP_FP;
1007 env->fpscr &= ~(1 << FPSCR_FEX);
1009 fpscr_set_rounding_mode();
1012 void helper_float_check_status(void)
1014 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
1015 (env->error_code & POWERPC_EXCP_FP)) {
1016 /* Differred floating-point exception after target FPR update */
1017 if (msr_fe0 != 0 || msr_fe1 != 0) {
1018 helper_raise_exception_err(env->exception_index, env->error_code);
1021 int status = get_float_exception_flags(&env->fp_status);
1022 if (status & float_flag_divbyzero) {
1023 float_zero_divide_excp();
1024 } else if (status & float_flag_overflow) {
1025 float_overflow_excp();
1026 } else if (status & float_flag_underflow) {
1027 float_underflow_excp();
1028 } else if (status & float_flag_inexact) {
1029 float_inexact_excp();
1034 void helper_reset_fpstatus(void)
1036 set_float_exception_flags(0, &env->fp_status);
1040 uint64_t helper_fadd(uint64_t arg1, uint64_t arg2)
1042 CPU_DoubleU farg1, farg2;
1047 if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1048 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1049 /* Magnitude subtraction of infinities */
1050 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1052 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1053 float64_is_signaling_nan(farg2.d))) {
1055 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1057 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1064 uint64_t helper_fsub(uint64_t arg1, uint64_t arg2)
1066 CPU_DoubleU farg1, farg2;
1071 if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1072 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1073 /* Magnitude subtraction of infinities */
1074 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1076 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1077 float64_is_signaling_nan(farg2.d))) {
1078 /* sNaN subtraction */
1079 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1081 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1088 uint64_t helper_fmul(uint64_t arg1, uint64_t arg2)
1090 CPU_DoubleU farg1, farg2;
1095 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1096 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1097 /* Multiplication of zero by infinity */
1098 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1100 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1101 float64_is_signaling_nan(farg2.d))) {
1102 /* sNaN multiplication */
1103 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1105 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1112 uint64_t helper_fdiv(uint64_t arg1, uint64_t arg2)
1114 CPU_DoubleU farg1, farg2;
1119 if (unlikely(float64_is_infinity(farg1.d) &&
1120 float64_is_infinity(farg2.d))) {
1121 /* Division of infinity by infinity */
1122 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1123 } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1124 /* Division of zero by zero */
1125 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1127 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1128 float64_is_signaling_nan(farg2.d))) {
1130 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1132 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1139 uint64_t helper_fabs(uint64_t arg)
1144 farg.d = float64_abs(farg.d);
1149 uint64_t helper_fnabs(uint64_t arg)
1154 farg.d = float64_abs(farg.d);
1155 farg.d = float64_chs(farg.d);
1160 uint64_t helper_fneg(uint64_t arg)
1165 farg.d = float64_chs(farg.d);
1169 /* fctiw - fctiw. */
1170 uint64_t helper_fctiw(uint64_t arg)
1176 if (unlikely(float64_is_signaling_nan(farg.d))) {
1177 /* sNaN conversion */
1178 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1179 POWERPC_EXCP_FP_VXCVI);
1180 } else if (unlikely(float64_is_quiet_nan(farg.d) ||
1181 float64_is_infinity(farg.d))) {
1182 /* qNan / infinity conversion */
1183 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1185 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1186 /* XXX: higher bits are not supposed to be significant.
1187 * to make tests easier, return the same as a real PowerPC 750
1189 farg.ll |= 0xFFF80000ULL << 32;
1194 /* fctiwz - fctiwz. */
1195 uint64_t helper_fctiwz(uint64_t arg)
1201 if (unlikely(float64_is_signaling_nan(farg.d))) {
1202 /* sNaN conversion */
1203 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1204 POWERPC_EXCP_FP_VXCVI);
1205 } else if (unlikely(float64_is_quiet_nan(farg.d) ||
1206 float64_is_infinity(farg.d))) {
1207 /* qNan / infinity conversion */
1208 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1210 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1211 /* XXX: higher bits are not supposed to be significant.
1212 * to make tests easier, return the same as a real PowerPC 750
1214 farg.ll |= 0xFFF80000ULL << 32;
1219 #if defined(TARGET_PPC64)
1220 /* fcfid - fcfid. */
1221 uint64_t helper_fcfid(uint64_t arg)
1225 farg.d = int64_to_float64(arg, &env->fp_status);
1229 /* fctid - fctid. */
1230 uint64_t helper_fctid(uint64_t arg)
1236 if (unlikely(float64_is_signaling_nan(farg.d))) {
1237 /* sNaN conversion */
1238 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1239 POWERPC_EXCP_FP_VXCVI);
1240 } else if (unlikely(float64_is_quiet_nan(farg.d) ||
1241 float64_is_infinity(farg.d))) {
1242 /* qNan / infinity conversion */
1243 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1245 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1250 /* fctidz - fctidz. */
1251 uint64_t helper_fctidz(uint64_t arg)
1257 if (unlikely(float64_is_signaling_nan(farg.d))) {
1258 /* sNaN conversion */
1259 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1260 POWERPC_EXCP_FP_VXCVI);
1261 } else if (unlikely(float64_is_quiet_nan(farg.d) ||
1262 float64_is_infinity(farg.d))) {
1263 /* qNan / infinity conversion */
1264 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1266 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1273 static inline uint64_t do_fri(uint64_t arg, int rounding_mode)
1279 if (unlikely(float64_is_signaling_nan(farg.d))) {
1281 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1282 POWERPC_EXCP_FP_VXCVI);
1283 } else if (unlikely(float64_is_quiet_nan(farg.d) ||
1284 float64_is_infinity(farg.d))) {
1285 /* qNan / infinity round */
1286 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1288 set_float_rounding_mode(rounding_mode, &env->fp_status);
1289 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1290 /* Restore rounding mode from FPSCR */
1291 fpscr_set_rounding_mode();
1296 uint64_t helper_frin(uint64_t arg)
1298 return do_fri(arg, float_round_nearest_even);
1301 uint64_t helper_friz(uint64_t arg)
1303 return do_fri(arg, float_round_to_zero);
1306 uint64_t helper_frip(uint64_t arg)
1308 return do_fri(arg, float_round_up);
1311 uint64_t helper_frim(uint64_t arg)
1313 return do_fri(arg, float_round_down);
1316 /* fmadd - fmadd. */
1317 uint64_t helper_fmadd(uint64_t arg1, uint64_t arg2, uint64_t arg3)
1319 CPU_DoubleU farg1, farg2, farg3;
1325 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1326 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1327 /* Multiplication of zero by infinity */
1328 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1330 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1331 float64_is_signaling_nan(farg2.d) ||
1332 float64_is_signaling_nan(farg3.d))) {
1333 /* sNaN operation */
1334 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1336 /* This is the way the PowerPC specification defines it */
1337 float128 ft0_128, ft1_128;
1339 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1340 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1341 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1342 if (unlikely(float128_is_infinity(ft0_128) &&
1343 float64_is_infinity(farg3.d) &&
1344 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1345 /* Magnitude subtraction of infinities */
1346 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1348 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1349 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1350 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1357 /* fmsub - fmsub. */
1358 uint64_t helper_fmsub(uint64_t arg1, uint64_t arg2, uint64_t arg3)
1360 CPU_DoubleU farg1, farg2, farg3;
1366 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1367 (float64_is_zero(farg1.d) &&
1368 float64_is_infinity(farg2.d)))) {
1369 /* Multiplication of zero by infinity */
1370 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1372 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1373 float64_is_signaling_nan(farg2.d) ||
1374 float64_is_signaling_nan(farg3.d))) {
1375 /* sNaN operation */
1376 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1378 /* This is the way the PowerPC specification defines it */
1379 float128 ft0_128, ft1_128;
1381 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1382 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1383 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1384 if (unlikely(float128_is_infinity(ft0_128) &&
1385 float64_is_infinity(farg3.d) &&
1386 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1387 /* Magnitude subtraction of infinities */
1388 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1390 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1391 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1392 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1398 /* fnmadd - fnmadd. */
1399 uint64_t helper_fnmadd(uint64_t arg1, uint64_t arg2, uint64_t arg3)
1401 CPU_DoubleU farg1, farg2, farg3;
1407 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1408 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1409 /* Multiplication of zero by infinity */
1410 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1412 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1413 float64_is_signaling_nan(farg2.d) ||
1414 float64_is_signaling_nan(farg3.d))) {
1415 /* sNaN operation */
1416 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1418 /* This is the way the PowerPC specification defines it */
1419 float128 ft0_128, ft1_128;
1421 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1422 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1423 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1424 if (unlikely(float128_is_infinity(ft0_128) &&
1425 float64_is_infinity(farg3.d) &&
1426 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1427 /* Magnitude subtraction of infinities */
1428 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1430 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1431 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1432 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1434 if (likely(!float64_is_any_nan(farg1.d))) {
1435 farg1.d = float64_chs(farg1.d);
1441 /* fnmsub - fnmsub. */
1442 uint64_t helper_fnmsub(uint64_t arg1, uint64_t arg2, uint64_t arg3)
1444 CPU_DoubleU farg1, farg2, farg3;
1450 if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1451 (float64_is_zero(farg1.d) &&
1452 float64_is_infinity(farg2.d)))) {
1453 /* Multiplication of zero by infinity */
1454 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1456 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1457 float64_is_signaling_nan(farg2.d) ||
1458 float64_is_signaling_nan(farg3.d))) {
1459 /* sNaN operation */
1460 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1462 /* This is the way the PowerPC specification defines it */
1463 float128 ft0_128, ft1_128;
1465 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1466 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1467 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1468 if (unlikely(float128_is_infinity(ft0_128) &&
1469 float64_is_infinity(farg3.d) &&
1470 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1471 /* Magnitude subtraction of infinities */
1472 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1474 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1475 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1476 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1478 if (likely(!float64_is_any_nan(farg1.d))) {
1479 farg1.d = float64_chs(farg1.d);
1486 uint64_t helper_frsp(uint64_t arg)
1493 if (unlikely(float64_is_signaling_nan(farg.d))) {
1494 /* sNaN square root */
1495 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1497 f32 = float64_to_float32(farg.d, &env->fp_status);
1498 farg.d = float32_to_float64(f32, &env->fp_status);
1503 /* fsqrt - fsqrt. */
1504 uint64_t helper_fsqrt(uint64_t arg)
1510 if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1511 /* Square root of a negative nonzero number */
1512 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1514 if (unlikely(float64_is_signaling_nan(farg.d))) {
1515 /* sNaN square root */
1516 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1518 farg.d = float64_sqrt(farg.d, &env->fp_status);
1524 uint64_t helper_fre(uint64_t arg)
1530 if (unlikely(float64_is_signaling_nan(farg.d))) {
1531 /* sNaN reciprocal */
1532 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1534 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1539 uint64_t helper_fres(uint64_t arg)
1546 if (unlikely(float64_is_signaling_nan(farg.d))) {
1547 /* sNaN reciprocal */
1548 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1550 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1551 f32 = float64_to_float32(farg.d, &env->fp_status);
1552 farg.d = float32_to_float64(f32, &env->fp_status);
1557 /* frsqrte - frsqrte. */
1558 uint64_t helper_frsqrte(uint64_t arg)
1565 if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1566 /* Reciprocal square root of a negative nonzero number */
1567 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1569 if (unlikely(float64_is_signaling_nan(farg.d))) {
1570 /* sNaN reciprocal square root */
1571 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1573 farg.d = float64_sqrt(farg.d, &env->fp_status);
1574 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
1575 f32 = float64_to_float32(farg.d, &env->fp_status);
1576 farg.d = float32_to_float64(f32, &env->fp_status);
1582 uint64_t helper_fsel(uint64_t arg1, uint64_t arg2, uint64_t arg3)
1588 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
1589 !float64_is_any_nan(farg1.d)) {
1596 void helper_fcmpu(uint64_t arg1, uint64_t arg2, uint32_t crfD)
1598 CPU_DoubleU farg1, farg2;
1604 if (unlikely(float64_is_any_nan(farg1.d) ||
1605 float64_is_any_nan(farg2.d))) {
1607 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1609 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1615 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1616 env->fpscr |= ret << FPSCR_FPRF;
1617 env->crf[crfD] = ret;
1618 if (unlikely(ret == 0x01UL
1619 && (float64_is_signaling_nan(farg1.d) ||
1620 float64_is_signaling_nan(farg2.d)))) {
1621 /* sNaN comparison */
1622 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1626 void helper_fcmpo(uint64_t arg1, uint64_t arg2, uint32_t crfD)
1628 CPU_DoubleU farg1, farg2;
1634 if (unlikely(float64_is_any_nan(farg1.d) ||
1635 float64_is_any_nan(farg2.d))) {
1637 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1639 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1645 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1646 env->fpscr |= ret << FPSCR_FPRF;
1647 env->crf[crfD] = ret;
1648 if (unlikely(ret == 0x01UL)) {
1649 if (float64_is_signaling_nan(farg1.d) ||
1650 float64_is_signaling_nan(farg2.d)) {
1651 /* sNaN comparison */
1652 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1653 POWERPC_EXCP_FP_VXVC);
1655 /* qNaN comparison */
1656 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1661 #if !defined(CONFIG_USER_ONLY)
1662 void helper_store_msr(target_ulong val)
1664 val = hreg_store_msr(env, val, 0);
1666 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1667 helper_raise_exception(val);
1671 static inline void do_rfi(target_ulong nip, target_ulong msr,
1672 target_ulong msrm, int keep_msrh)
1674 #if defined(TARGET_PPC64)
1675 if (msr & (1ULL << MSR_SF)) {
1676 nip = (uint64_t)nip;
1677 msr &= (uint64_t)msrm;
1679 nip = (uint32_t)nip;
1680 msr = (uint32_t)(msr & msrm);
1682 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1686 nip = (uint32_t)nip;
1687 msr &= (uint32_t)msrm;
1689 /* XXX: beware: this is false if VLE is supported */
1690 env->nip = nip & ~((target_ulong)0x00000003);
1691 hreg_store_msr(env, msr, 1);
1692 #if defined(DEBUG_OP)
1693 cpu_dump_rfi(env->nip, env->msr);
1695 /* No need to raise an exception here,
1696 * as rfi is always the last insn of a TB
1698 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1701 void helper_rfi(void)
1703 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1704 ~((target_ulong)0x783F0000), 1);
1707 #if defined(TARGET_PPC64)
1708 void helper_rfid(void)
1710 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1711 ~((target_ulong)0x783F0000), 0);
1714 void helper_hrfid(void)
1716 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1717 ~((target_ulong)0x783F0000), 0);
1722 void helper_tw(target_ulong arg1, target_ulong arg2, uint32_t flags)
1724 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1725 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1726 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1727 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1728 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1729 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1733 #if defined(TARGET_PPC64)
1734 void helper_td(target_ulong arg1, target_ulong arg2, uint32_t flags)
1736 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1737 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1738 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1739 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1740 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
1741 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1746 /*****************************************************************************/
1747 /* PowerPC 601 specific instructions (POWER bridge) */
1749 target_ulong helper_clcs(uint32_t arg)
1753 /* Instruction cache line size */
1754 return env->icache_line_size;
1757 /* Data cache line size */
1758 return env->dcache_line_size;
1761 /* Minimum cache line size */
1762 return (env->icache_line_size < env->dcache_line_size) ?
1763 env->icache_line_size : env->dcache_line_size;
1766 /* Maximum cache line size */
1767 return (env->icache_line_size > env->dcache_line_size) ?
1768 env->icache_line_size : env->dcache_line_size;
1777 target_ulong helper_div(target_ulong arg1, target_ulong arg2)
1779 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1781 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1782 (int32_t)arg2 == 0) {
1783 env->spr[SPR_MQ] = 0;
1786 env->spr[SPR_MQ] = tmp % arg2;
1787 return tmp / (int32_t)arg2;
1791 target_ulong helper_divo(target_ulong arg1, target_ulong arg2)
1793 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1795 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1796 (int32_t)arg2 == 0) {
1797 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1798 env->spr[SPR_MQ] = 0;
1801 env->spr[SPR_MQ] = tmp % arg2;
1802 tmp /= (int32_t)arg2;
1803 if ((int32_t)tmp != tmp) {
1804 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1806 env->xer &= ~(1 << XER_OV);
1812 target_ulong helper_divs(target_ulong arg1, target_ulong arg2)
1814 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1815 (int32_t)arg2 == 0) {
1816 env->spr[SPR_MQ] = 0;
1819 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1820 return (int32_t)arg1 / (int32_t)arg2;
1824 target_ulong helper_divso(target_ulong arg1, target_ulong arg2)
1826 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1827 (int32_t)arg2 == 0) {
1828 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1829 env->spr[SPR_MQ] = 0;
1832 env->xer &= ~(1 << XER_OV);
1833 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1834 return (int32_t)arg1 / (int32_t)arg2;
1838 #if !defined(CONFIG_USER_ONLY)
1839 target_ulong helper_rac(target_ulong addr)
1843 target_ulong ret = 0;
1845 /* We don't have to generate many instances of this instruction,
1846 * as rac is supervisor only.
1848 /* XXX: FIX THIS: Pretend we have no BAT */
1849 nb_BATs = env->nb_BATs;
1851 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
1854 env->nb_BATs = nb_BATs;
1858 void helper_rfsvc(void)
1860 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1864 /*****************************************************************************/
1865 /* 602 specific instructions */
1866 /* mfrom is the most crazy instruction ever seen, imho ! */
1867 /* Real implementation uses a ROM table. Do the same */
1868 /* Extremely decomposed:
1870 * return 256 * log10(10 + 1.0) + 0.5
1872 #if !defined(CONFIG_USER_ONLY)
1873 target_ulong helper_602_mfrom(target_ulong arg)
1875 if (likely(arg < 602)) {
1876 #include "mfrom_table.c"
1877 return mfrom_ROM_table[arg];
1884 /*****************************************************************************/
1885 /* Embedded PowerPC specific helpers */
1887 /* XXX: to be improved to check access rights when in user-mode */
1888 target_ulong helper_load_dcr(target_ulong dcrn)
1892 if (unlikely(env->dcr_env == NULL)) {
1893 qemu_log("No DCR environment\n");
1894 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1895 POWERPC_EXCP_INVAL |
1896 POWERPC_EXCP_INVAL_INVAL);
1897 } else if (unlikely(ppc_dcr_read(env->dcr_env,
1898 (uint32_t)dcrn, &val) != 0)) {
1899 qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1900 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1901 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1906 void helper_store_dcr(target_ulong dcrn, target_ulong val)
1908 if (unlikely(env->dcr_env == NULL)) {
1909 qemu_log("No DCR environment\n");
1910 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1911 POWERPC_EXCP_INVAL |
1912 POWERPC_EXCP_INVAL_INVAL);
1913 } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn,
1914 (uint32_t)val) != 0)) {
1915 qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
1916 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1917 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1921 #if !defined(CONFIG_USER_ONLY)
1922 void helper_40x_rfci(void)
1924 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1925 ~((target_ulong)0xFFFF0000), 0);
1928 void helper_rfci(void)
1930 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1931 ~((target_ulong)0x3FFF0000), 0);
1934 void helper_rfdi(void)
1936 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1937 ~((target_ulong)0x3FFF0000), 0);
1940 void helper_rfmci(void)
1942 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1943 ~((target_ulong)0x3FFF0000), 0);
1948 target_ulong helper_dlmzb(target_ulong high, target_ulong low,
1955 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1956 if ((high & mask) == 0) {
1964 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1965 if ((low & mask) == 0) {
1977 env->xer = (env->xer & ~0x7F) | i;
1979 env->crf[0] |= xer_so;
1984 /*****************************************************************************/
1985 /* Altivec extension helpers */
1986 #if defined(HOST_WORDS_BIGENDIAN)
1994 #if defined(HOST_WORDS_BIGENDIAN)
1995 #define VECTOR_FOR_INORDER_I(index, element) \
1996 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1998 #define VECTOR_FOR_INORDER_I(index, element) \
1999 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
2002 /* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
2003 * execute the following block. */
2004 #define DO_HANDLE_NAN(result, x) \
2005 if (float32_is_any_nan(x)) { \
2008 __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
2012 #define HANDLE_NAN1(result, x) \
2013 DO_HANDLE_NAN(result, x)
2014 #define HANDLE_NAN2(result, x, y) \
2015 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
2016 #define HANDLE_NAN3(result, x, y, z) \
2017 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
2019 /* Saturating arithmetic helpers. */
2020 #define SATCVT(from, to, from_type, to_type, min, max) \
2021 static inline to_type cvt##from##to(from_type x, int *sat) \
2025 if (x < (from_type)min) { \
2028 } else if (x > (from_type)max) { \
2036 #define SATCVTU(from, to, from_type, to_type, min, max) \
2037 static inline to_type cvt##from##to(from_type x, int *sat) \
2041 if (x > (from_type)max) { \
2049 SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
2050 SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
2051 SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
2053 SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
2054 SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
2055 SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
2056 SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
2057 SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
2058 SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
2062 #define LVE(name, access, swap, element) \
2063 void helper_##name(ppc_avr_t *r, target_ulong addr) \
2065 size_t n_elems = ARRAY_SIZE(r->element); \
2066 int adjust = HI_IDX*(n_elems - 1); \
2067 int sh = sizeof(r->element[0]) >> 1; \
2068 int index = (addr & 0xf) >> sh; \
2071 r->element[LO_IDX ? index : (adjust - index)] = \
2072 swap(access(addr)); \
2074 r->element[LO_IDX ? index : (adjust - index)] = \
2079 LVE(lvebx, ldub, I, u8)
2080 LVE(lvehx, lduw, bswap16, u16)
2081 LVE(lvewx, ldl, bswap32, u32)
2085 void helper_lvsl(ppc_avr_t *r, target_ulong sh)
2087 int i, j = (sh & 0xf);
2089 VECTOR_FOR_INORDER_I(i, u8) {
2094 void helper_lvsr(ppc_avr_t *r, target_ulong sh)
2096 int i, j = 0x10 - (sh & 0xf);
2098 VECTOR_FOR_INORDER_I(i, u8) {
2103 #define STVE(name, access, swap, element) \
2104 void helper_##name(ppc_avr_t *r, target_ulong addr) \
2106 size_t n_elems = ARRAY_SIZE(r->element); \
2107 int adjust = HI_IDX * (n_elems - 1); \
2108 int sh = sizeof(r->element[0]) >> 1; \
2109 int index = (addr & 0xf) >> sh; \
2112 access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2114 access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2118 STVE(stvebx, stb, I, u8)
2119 STVE(stvehx, stw, bswap16, u16)
2120 STVE(stvewx, stl, bswap32, u32)
2124 void helper_mtvscr(ppc_avr_t *r)
2126 #if defined(HOST_WORDS_BIGENDIAN)
2127 env->vscr = r->u32[3];
2129 env->vscr = r->u32[0];
2131 set_flush_to_zero(vscr_nj, &env->vec_status);
2134 void helper_vaddcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2138 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2139 r->u32[i] = ~a->u32[i] < b->u32[i];
2143 #define VARITH_DO(name, op, element) \
2144 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2148 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2149 r->element[i] = a->element[i] op b->element[i]; \
2152 #define VARITH(suffix, element) \
2153 VARITH_DO(add##suffix, +, element) \
2154 VARITH_DO(sub##suffix, -, element)
2161 #define VARITHFP(suffix, func) \
2162 void helper_v##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2166 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2167 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2168 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
2172 VARITHFP(addfp, float32_add)
2173 VARITHFP(subfp, float32_sub)
2176 #define VARITHSAT_CASE(type, op, cvt, element) \
2178 type result = (type)a->element[i] op (type)b->element[i]; \
2179 r->element[i] = cvt(result, &sat); \
2182 #define VARITHSAT_DO(name, op, optype, cvt, element) \
2183 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2188 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2189 switch (sizeof(r->element[0])) { \
2191 VARITHSAT_CASE(optype, op, cvt, element); \
2194 VARITHSAT_CASE(optype, op, cvt, element); \
2197 VARITHSAT_CASE(optype, op, cvt, element); \
2202 env->vscr |= (1 << VSCR_SAT); \
2205 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2206 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2207 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2208 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2209 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2210 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2211 VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2212 VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2213 VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2214 VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2215 VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2216 VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2217 #undef VARITHSAT_CASE
2219 #undef VARITHSAT_SIGNED
2220 #undef VARITHSAT_UNSIGNED
2222 #define VAVG_DO(name, element, etype) \
2223 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2227 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2228 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2229 r->element[i] = x >> 1; \
2233 #define VAVG(type, signed_element, signed_type, unsigned_element, \
2235 VAVG_DO(avgs##type, signed_element, signed_type) \
2236 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2237 VAVG(b, s8, int16_t, u8, uint16_t)
2238 VAVG(h, s16, int32_t, u16, uint32_t)
2239 VAVG(w, s32, int64_t, u32, uint64_t)
2243 #define VCF(suffix, cvt, element) \
2244 void helper_vcf##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2248 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2249 float32 t = cvt(b->element[i], &env->vec_status); \
2250 r->f[i] = float32_scalbn(t, -uim, &env->vec_status); \
2253 VCF(ux, uint32_to_float32, u32)
2254 VCF(sx, int32_to_float32, s32)
2257 #define VCMP_DO(suffix, compare, element, record) \
2258 void helper_vcmp##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2260 uint32_t ones = (uint32_t)-1; \
2261 uint32_t all = ones; \
2262 uint32_t none = 0; \
2265 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2266 uint32_t result = (a->element[i] compare b->element[i] ? \
2268 switch (sizeof(a->element[0])) { \
2270 r->u32[i] = result; \
2273 r->u16[i] = result; \
2276 r->u8[i] = result; \
2283 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2286 #define VCMP(suffix, compare, element) \
2287 VCMP_DO(suffix, compare, element, 0) \
2288 VCMP_DO(suffix##_dot, compare, element, 1)
2301 #define VCMPFP_DO(suffix, compare, order, record) \
2302 void helper_vcmp##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2304 uint32_t ones = (uint32_t)-1; \
2305 uint32_t all = ones; \
2306 uint32_t none = 0; \
2309 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2311 int rel = float32_compare_quiet(a->f[i], b->f[i], \
2312 &env->vec_status); \
2313 if (rel == float_relation_unordered) { \
2315 } else if (rel compare order) { \
2320 r->u32[i] = result; \
2325 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2328 #define VCMPFP(suffix, compare, order) \
2329 VCMPFP_DO(suffix, compare, order, 0) \
2330 VCMPFP_DO(suffix##_dot, compare, order, 1)
2331 VCMPFP(eqfp, ==, float_relation_equal)
2332 VCMPFP(gefp, !=, float_relation_less)
2333 VCMPFP(gtfp, ==, float_relation_greater)
2337 static inline void vcmpbfp_internal(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
2343 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2344 int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2345 if (le_rel == float_relation_unordered) {
2346 r->u32[i] = 0xc0000000;
2347 /* ALL_IN does not need to be updated here. */
2349 float32 bneg = float32_chs(b->f[i]);
2350 int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2351 int le = le_rel != float_relation_greater;
2352 int ge = ge_rel != float_relation_less;
2354 r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2355 all_in |= (!le | !ge);
2359 env->crf[6] = (all_in == 0) << 1;
2363 void helper_vcmpbfp(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2365 vcmpbfp_internal(r, a, b, 0);
2368 void helper_vcmpbfp_dot(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2370 vcmpbfp_internal(r, a, b, 1);
2373 #define VCT(suffix, satcvt, element) \
2374 void helper_vct##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2378 float_status s = env->vec_status; \
2380 set_float_rounding_mode(float_round_to_zero, &s); \
2381 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2382 if (float32_is_any_nan(b->f[i])) { \
2383 r->element[i] = 0; \
2385 float64 t = float32_to_float64(b->f[i], &s); \
2388 t = float64_scalbn(t, uim, &s); \
2389 j = float64_to_int64(t, &s); \
2390 r->element[i] = satcvt(j, &sat); \
2394 env->vscr |= (1 << VSCR_SAT); \
2397 VCT(uxs, cvtsduw, u32)
2398 VCT(sxs, cvtsdsw, s32)
2401 void helper_vmaddfp(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2405 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2406 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2407 /* Need to do the computation in higher precision and round
2408 * once at the end. */
2409 float64 af, bf, cf, t;
2411 af = float32_to_float64(a->f[i], &env->vec_status);
2412 bf = float32_to_float64(b->f[i], &env->vec_status);
2413 cf = float32_to_float64(c->f[i], &env->vec_status);
2414 t = float64_mul(af, cf, &env->vec_status);
2415 t = float64_add(t, bf, &env->vec_status);
2416 r->f[i] = float64_to_float32(t, &env->vec_status);
2421 void helper_vmhaddshs(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2426 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2427 int32_t prod = a->s16[i] * b->s16[i];
2428 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2430 r->s16[i] = cvtswsh(t, &sat);
2434 env->vscr |= (1 << VSCR_SAT);
2438 void helper_vmhraddshs(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2443 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2444 int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2445 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2446 r->s16[i] = cvtswsh(t, &sat);
2450 env->vscr |= (1 << VSCR_SAT);
2454 #define VMINMAX_DO(name, compare, element) \
2455 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2459 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2460 if (a->element[i] compare b->element[i]) { \
2461 r->element[i] = b->element[i]; \
2463 r->element[i] = a->element[i]; \
2467 #define VMINMAX(suffix, element) \
2468 VMINMAX_DO(min##suffix, >, element) \
2469 VMINMAX_DO(max##suffix, <, element)
2479 #define VMINMAXFP(suffix, rT, rF) \
2480 void helper_v##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2484 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2485 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2486 if (float32_lt_quiet(a->f[i], b->f[i], \
2487 &env->vec_status)) { \
2488 r->f[i] = rT->f[i]; \
2490 r->f[i] = rF->f[i]; \
2495 VMINMAXFP(minfp, a, b)
2496 VMINMAXFP(maxfp, b, a)
2499 void helper_vmladduhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2503 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2504 int32_t prod = a->s16[i] * b->s16[i];
2505 r->s16[i] = (int16_t) (prod + c->s16[i]);
2509 #define VMRG_DO(name, element, highp) \
2510 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2514 size_t n_elems = ARRAY_SIZE(r->element); \
2516 for (i = 0; i < n_elems / 2; i++) { \
2518 result.element[i*2+HI_IDX] = a->element[i]; \
2519 result.element[i*2+LO_IDX] = b->element[i]; \
2521 result.element[n_elems - i * 2 - (1 + HI_IDX)] = \
2522 b->element[n_elems - i - 1]; \
2523 result.element[n_elems - i * 2 - (1 + LO_IDX)] = \
2524 a->element[n_elems - i - 1]; \
2529 #if defined(HOST_WORDS_BIGENDIAN)
2536 #define VMRG(suffix, element) \
2537 VMRG_DO(mrgl##suffix, element, MRGHI) \
2538 VMRG_DO(mrgh##suffix, element, MRGLO)
2547 void helper_vmsummbm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2552 for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2553 prod[i] = (int32_t)a->s8[i] * b->u8[i];
2556 VECTOR_FOR_INORDER_I(i, s32) {
2557 r->s32[i] = c->s32[i] + prod[4 * i] + prod[4 * i + 1] +
2558 prod[4 * i + 2] + prod[4 * i + 3];
2562 void helper_vmsumshm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2567 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2568 prod[i] = a->s16[i] * b->s16[i];
2571 VECTOR_FOR_INORDER_I(i, s32) {
2572 r->s32[i] = c->s32[i] + prod[2 * i] + prod[2 * i + 1];
2576 void helper_vmsumshs(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2582 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2583 prod[i] = (int32_t)a->s16[i] * b->s16[i];
2586 VECTOR_FOR_INORDER_I(i, s32) {
2587 int64_t t = (int64_t)c->s32[i] + prod[2 * i] + prod[2 * i + 1];
2589 r->u32[i] = cvtsdsw(t, &sat);
2593 env->vscr |= (1 << VSCR_SAT);
2597 void helper_vmsumubm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2602 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2603 prod[i] = a->u8[i] * b->u8[i];
2606 VECTOR_FOR_INORDER_I(i, u32) {
2607 r->u32[i] = c->u32[i] + prod[4 * i] + prod[4 * i + 1] +
2608 prod[4 * i + 2] + prod[4 * i + 3];
2612 void helper_vmsumuhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2617 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2618 prod[i] = a->u16[i] * b->u16[i];
2621 VECTOR_FOR_INORDER_I(i, u32) {
2622 r->u32[i] = c->u32[i] + prod[2 * i] + prod[2 * i + 1];
2626 void helper_vmsumuhs(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2632 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2633 prod[i] = a->u16[i] * b->u16[i];
2636 VECTOR_FOR_INORDER_I(i, s32) {
2637 uint64_t t = (uint64_t)c->u32[i] + prod[2 * i] + prod[2 * i + 1];
2639 r->u32[i] = cvtuduw(t, &sat);
2643 env->vscr |= (1 << VSCR_SAT);
2647 #define VMUL_DO(name, mul_element, prod_element, evenp) \
2648 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2652 VECTOR_FOR_INORDER_I(i, prod_element) { \
2654 r->prod_element[i] = a->mul_element[i * 2 + HI_IDX] * \
2655 b->mul_element[i * 2 + HI_IDX]; \
2657 r->prod_element[i] = a->mul_element[i * 2 + LO_IDX] * \
2658 b->mul_element[i * 2 + LO_IDX]; \
2662 #define VMUL(suffix, mul_element, prod_element) \
2663 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2664 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2672 void helper_vnmsubfp(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2676 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2677 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2678 /* Need to do the computation is higher precision and round
2679 * once at the end. */
2680 float64 af, bf, cf, t;
2682 af = float32_to_float64(a->f[i], &env->vec_status);
2683 bf = float32_to_float64(b->f[i], &env->vec_status);
2684 cf = float32_to_float64(c->f[i], &env->vec_status);
2685 t = float64_mul(af, cf, &env->vec_status);
2686 t = float64_sub(t, bf, &env->vec_status);
2688 r->f[i] = float64_to_float32(t, &env->vec_status);
2693 void helper_vperm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2698 VECTOR_FOR_INORDER_I(i, u8) {
2699 int s = c->u8[i] & 0x1f;
2700 #if defined(HOST_WORDS_BIGENDIAN)
2701 int index = s & 0xf;
2703 int index = 15 - (s & 0xf);
2707 result.u8[i] = b->u8[index];
2709 result.u8[i] = a->u8[index];
2715 #if defined(HOST_WORDS_BIGENDIAN)
2720 void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2724 #if defined(HOST_WORDS_BIGENDIAN)
2725 const ppc_avr_t *x[2] = { a, b };
2727 const ppc_avr_t *x[2] = { b, a };
2730 VECTOR_FOR_INORDER_I(i, u64) {
2731 VECTOR_FOR_INORDER_I(j, u32) {
2732 uint32_t e = x[i]->u32[j];
2734 result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2735 ((e >> 6) & 0x3e0) |
2742 #define VPK(suffix, from, to, cvt, dosat) \
2743 void helper_vpk##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2748 ppc_avr_t *a0 = PKBIG ? a : b; \
2749 ppc_avr_t *a1 = PKBIG ? b : a; \
2751 VECTOR_FOR_INORDER_I(i, from) { \
2752 result.to[i] = cvt(a0->from[i], &sat); \
2753 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2756 if (dosat && sat) { \
2757 env->vscr |= (1 << VSCR_SAT); \
2761 VPK(shss, s16, s8, cvtshsb, 1)
2762 VPK(shus, s16, u8, cvtshub, 1)
2763 VPK(swss, s32, s16, cvtswsh, 1)
2764 VPK(swus, s32, u16, cvtswuh, 1)
2765 VPK(uhus, u16, u8, cvtuhub, 1)
2766 VPK(uwus, u32, u16, cvtuwuh, 1)
2767 VPK(uhum, u16, u8, I, 0)
2768 VPK(uwum, u32, u16, I, 0)
2773 void helper_vrefp(ppc_avr_t *r, ppc_avr_t *b)
2777 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2778 HANDLE_NAN1(r->f[i], b->f[i]) {
2779 r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2784 #define VRFI(suffix, rounding) \
2785 void helper_vrfi##suffix(ppc_avr_t *r, ppc_avr_t *b) \
2788 float_status s = env->vec_status; \
2790 set_float_rounding_mode(rounding, &s); \
2791 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2792 HANDLE_NAN1(r->f[i], b->f[i]) { \
2793 r->f[i] = float32_round_to_int (b->f[i], &s); \
2797 VRFI(n, float_round_nearest_even)
2798 VRFI(m, float_round_down)
2799 VRFI(p, float_round_up)
2800 VRFI(z, float_round_to_zero)
2803 #define VROTATE(suffix, element) \
2804 void helper_vrl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2808 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2809 unsigned int mask = ((1 << \
2810 (3 + (sizeof(a->element[0]) >> 1))) \
2812 unsigned int shift = b->element[i] & mask; \
2813 r->element[i] = (a->element[i] << shift) | \
2814 (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2822 void helper_vrsqrtefp(ppc_avr_t *r, ppc_avr_t *b)
2826 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2827 HANDLE_NAN1(r->f[i], b->f[i]) {
2828 float32 t = float32_sqrt(b->f[i], &env->vec_status);
2830 r->f[i] = float32_div(float32_one, t, &env->vec_status);
2835 void helper_vsel(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2837 r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2838 r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2841 void helper_vexptefp(ppc_avr_t *r, ppc_avr_t *b)
2845 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2846 HANDLE_NAN1(r->f[i], b->f[i]) {
2847 r->f[i] = float32_exp2(b->f[i], &env->vec_status);
2852 void helper_vlogefp(ppc_avr_t *r, ppc_avr_t *b)
2856 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2857 HANDLE_NAN1(r->f[i], b->f[i]) {
2858 r->f[i] = float32_log2(b->f[i], &env->vec_status);
2863 #if defined(HOST_WORDS_BIGENDIAN)
2870 /* The specification says that the results are undefined if all of the
2871 * shift counts are not identical. We check to make sure that they are
2872 * to conform to what real hardware appears to do. */
2873 #define VSHIFT(suffix, leftp) \
2874 void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2876 int shift = b->u8[LO_IDX*15] & 0x7; \
2880 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2881 doit = doit && ((b->u8[i] & 0x7) == shift); \
2886 } else if (leftp) { \
2887 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2889 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2890 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2892 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2894 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2895 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2905 #define VSL(suffix, element) \
2906 void helper_vsl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2910 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2911 unsigned int mask = ((1 << \
2912 (3 + (sizeof(a->element[0]) >> 1))) \
2914 unsigned int shift = b->element[i] & mask; \
2916 r->element[i] = a->element[i] << shift; \
2924 void helper_vsldoi(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2926 int sh = shift & 0xf;
2930 #if defined(HOST_WORDS_BIGENDIAN)
2931 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2934 result.u8[i] = b->u8[index - 0x10];
2936 result.u8[i] = a->u8[index];
2940 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2941 int index = (16 - sh) + i;
2943 result.u8[i] = a->u8[index - 0x10];
2945 result.u8[i] = b->u8[index];
2952 void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2954 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2956 #if defined(HOST_WORDS_BIGENDIAN)
2957 memmove(&r->u8[0], &a->u8[sh], 16 - sh);
2958 memset(&r->u8[16-sh], 0, sh);
2960 memmove(&r->u8[sh], &a->u8[0], 16 - sh);
2961 memset(&r->u8[0], 0, sh);
2965 /* Experimental testing shows that hardware masks the immediate. */
2966 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2967 #if defined(HOST_WORDS_BIGENDIAN)
2968 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2970 #define SPLAT_ELEMENT(element) \
2971 (ARRAY_SIZE(r->element) - 1 - _SPLAT_MASKED(element))
2973 #define VSPLT(suffix, element) \
2974 void helper_vsplt##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2976 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2979 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2980 r->element[i] = s; \
2987 #undef SPLAT_ELEMENT
2988 #undef _SPLAT_MASKED
2990 #define VSPLTI(suffix, element, splat_type) \
2991 void helper_vspltis##suffix(ppc_avr_t *r, uint32_t splat) \
2993 splat_type x = (int8_t)(splat << 3) >> 3; \
2996 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2997 r->element[i] = x; \
3000 VSPLTI(b, s8, int8_t)
3001 VSPLTI(h, s16, int16_t)
3002 VSPLTI(w, s32, int32_t)
3005 #define VSR(suffix, element) \
3006 void helper_vsr##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
3010 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
3011 unsigned int mask = ((1 << \
3012 (3 + (sizeof(a->element[0]) >> 1))) \
3014 unsigned int shift = b->element[i] & mask; \
3016 r->element[i] = a->element[i] >> shift; \
3027 void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
3029 int sh = (b->u8[LO_IDX * 0xf] >> 3) & 0xf;
3031 #if defined(HOST_WORDS_BIGENDIAN)
3032 memmove(&r->u8[sh], &a->u8[0], 16 - sh);
3033 memset(&r->u8[0], 0, sh);
3035 memmove(&r->u8[0], &a->u8[sh], 16 - sh);
3036 memset(&r->u8[16 - sh], 0, sh);
3040 void helper_vsubcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
3044 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
3045 r->u32[i] = a->u32[i] >= b->u32[i];
3049 void helper_vsumsws(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
3056 #if defined(HOST_WORDS_BIGENDIAN)
3057 upper = ARRAY_SIZE(r->s32)-1;
3061 t = (int64_t)b->s32[upper];
3062 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
3066 result.s32[upper] = cvtsdsw(t, &sat);
3070 env->vscr |= (1 << VSCR_SAT);
3074 void helper_vsum2sws(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
3080 #if defined(HOST_WORDS_BIGENDIAN)
3085 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
3086 int64_t t = (int64_t)b->s32[upper + i * 2];
3089 for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
3090 t += a->s32[2 * i + j];
3092 result.s32[upper + i * 2] = cvtsdsw(t, &sat);
3097 env->vscr |= (1 << VSCR_SAT);
3101 void helper_vsum4sbs(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
3106 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
3107 int64_t t = (int64_t)b->s32[i];
3109 for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
3110 t += a->s8[4 * i + j];
3112 r->s32[i] = cvtsdsw(t, &sat);
3116 env->vscr |= (1 << VSCR_SAT);
3120 void helper_vsum4shs(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
3125 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
3126 int64_t t = (int64_t)b->s32[i];
3128 t += a->s16[2 * i] + a->s16[2 * i + 1];
3129 r->s32[i] = cvtsdsw(t, &sat);
3133 env->vscr |= (1 << VSCR_SAT);
3137 void helper_vsum4ubs(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
3142 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
3143 uint64_t t = (uint64_t)b->u32[i];
3145 for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
3146 t += a->u8[4 * i + j];
3148 r->u32[i] = cvtuduw(t, &sat);
3152 env->vscr |= (1 << VSCR_SAT);
3156 #if defined(HOST_WORDS_BIGENDIAN)
3163 #define VUPKPX(suffix, hi) \
3164 void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
3169 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
3170 uint16_t e = b->u16[hi ? i : i+4]; \
3171 uint8_t a = (e >> 15) ? 0xff : 0; \
3172 uint8_t r = (e >> 10) & 0x1f; \
3173 uint8_t g = (e >> 5) & 0x1f; \
3174 uint8_t b = e & 0x1f; \
3176 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
3184 #define VUPK(suffix, unpacked, packee, hi) \
3185 void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
3191 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
3192 result.unpacked[i] = b->packee[i]; \
3195 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); \
3197 result.unpacked[i - ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3202 VUPK(hsb, s16, s8, UPKHI)
3203 VUPK(hsh, s32, s16, UPKHI)
3204 VUPK(lsb, s16, s8, UPKLO)
3205 VUPK(lsh, s32, s16, UPKLO)
3210 #undef DO_HANDLE_NAN
3214 #undef VECTOR_FOR_INORDER_I
3218 /*****************************************************************************/
3219 /* SPE extension helpers */
3220 /* Use a table to make this quicker */
3221 static uint8_t hbrev[16] = {
3222 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3223 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3226 static inline uint8_t byte_reverse(uint8_t val)
3228 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3231 static inline uint32_t word_reverse(uint32_t val)
3233 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3234 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3237 #define MASKBITS 16 /* Random value - to be fixed (implementation dependent) */
3238 target_ulong helper_brinc(target_ulong arg1, target_ulong arg2)
3240 uint32_t a, b, d, mask;
3242 mask = UINT32_MAX >> (32 - MASKBITS);
3245 d = word_reverse(1 + word_reverse(a | ~b));
3246 return (arg1 & ~mask) | (d & b);
3249 uint32_t helper_cntlsw32(uint32_t val)
3251 if (val & 0x80000000) {
3258 uint32_t helper_cntlzw32(uint32_t val)
3263 /* Single-precision floating-point conversions */
3264 static inline uint32_t efscfsi(uint32_t val)
3268 u.f = int32_to_float32(val, &env->vec_status);
3273 static inline uint32_t efscfui(uint32_t val)
3277 u.f = uint32_to_float32(val, &env->vec_status);
3282 static inline int32_t efsctsi(uint32_t val)
3287 /* NaN are not treated the same way IEEE 754 does */
3288 if (unlikely(float32_is_quiet_nan(u.f))) {
3292 return float32_to_int32(u.f, &env->vec_status);
3295 static inline uint32_t efsctui(uint32_t val)
3300 /* NaN are not treated the same way IEEE 754 does */
3301 if (unlikely(float32_is_quiet_nan(u.f))) {
3305 return float32_to_uint32(u.f, &env->vec_status);
3308 static inline uint32_t efsctsiz(uint32_t val)
3313 /* NaN are not treated the same way IEEE 754 does */
3314 if (unlikely(float32_is_quiet_nan(u.f))) {
3318 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
3321 static inline uint32_t efsctuiz(uint32_t val)
3326 /* NaN are not treated the same way IEEE 754 does */
3327 if (unlikely(float32_is_quiet_nan(u.f))) {
3331 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
3334 static inline uint32_t efscfsf(uint32_t val)
3339 u.f = int32_to_float32(val, &env->vec_status);
3340 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3341 u.f = float32_div(u.f, tmp, &env->vec_status);
3346 static inline uint32_t efscfuf(uint32_t val)
3351 u.f = uint32_to_float32(val, &env->vec_status);
3352 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3353 u.f = float32_div(u.f, tmp, &env->vec_status);
3358 static inline uint32_t efsctsf(uint32_t val)
3364 /* NaN are not treated the same way IEEE 754 does */
3365 if (unlikely(float32_is_quiet_nan(u.f))) {
3368 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3369 u.f = float32_mul(u.f, tmp, &env->vec_status);
3371 return float32_to_int32(u.f, &env->vec_status);
3374 static inline uint32_t efsctuf(uint32_t val)
3380 /* NaN are not treated the same way IEEE 754 does */
3381 if (unlikely(float32_is_quiet_nan(u.f))) {
3384 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3385 u.f = float32_mul(u.f, tmp, &env->vec_status);
3387 return float32_to_uint32(u.f, &env->vec_status);
3390 #define HELPER_SPE_SINGLE_CONV(name) \
3391 uint32_t helper_e##name(uint32_t val) \
3393 return e##name(val); \
3396 HELPER_SPE_SINGLE_CONV(fscfsi);
3398 HELPER_SPE_SINGLE_CONV(fscfui);
3400 HELPER_SPE_SINGLE_CONV(fscfuf);
3402 HELPER_SPE_SINGLE_CONV(fscfsf);
3404 HELPER_SPE_SINGLE_CONV(fsctsi);
3406 HELPER_SPE_SINGLE_CONV(fsctui);
3408 HELPER_SPE_SINGLE_CONV(fsctsiz);
3410 HELPER_SPE_SINGLE_CONV(fsctuiz);
3412 HELPER_SPE_SINGLE_CONV(fsctsf);
3414 HELPER_SPE_SINGLE_CONV(fsctuf);
3416 #define HELPER_SPE_VECTOR_CONV(name) \
3417 uint64_t helper_ev##name(uint64_t val) \
3419 return ((uint64_t)e##name(val >> 32) << 32) | \
3420 (uint64_t)e##name(val); \
3423 HELPER_SPE_VECTOR_CONV(fscfsi);
3425 HELPER_SPE_VECTOR_CONV(fscfui);
3427 HELPER_SPE_VECTOR_CONV(fscfuf);
3429 HELPER_SPE_VECTOR_CONV(fscfsf);
3431 HELPER_SPE_VECTOR_CONV(fsctsi);
3433 HELPER_SPE_VECTOR_CONV(fsctui);
3435 HELPER_SPE_VECTOR_CONV(fsctsiz);
3437 HELPER_SPE_VECTOR_CONV(fsctuiz);
3439 HELPER_SPE_VECTOR_CONV(fsctsf);
3441 HELPER_SPE_VECTOR_CONV(fsctuf);
3443 /* Single-precision floating-point arithmetic */
3444 static inline uint32_t efsadd(uint32_t op1, uint32_t op2)
3450 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
3454 static inline uint32_t efssub(uint32_t op1, uint32_t op2)
3460 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
3464 static inline uint32_t efsmul(uint32_t op1, uint32_t op2)
3470 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
3474 static inline uint32_t efsdiv(uint32_t op1, uint32_t op2)
3480 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
3484 #define HELPER_SPE_SINGLE_ARITH(name) \
3485 uint32_t helper_e##name(uint32_t op1, uint32_t op2) \
3487 return e##name(op1, op2); \
3490 HELPER_SPE_SINGLE_ARITH(fsadd);
3492 HELPER_SPE_SINGLE_ARITH(fssub);
3494 HELPER_SPE_SINGLE_ARITH(fsmul);
3496 HELPER_SPE_SINGLE_ARITH(fsdiv);
3498 #define HELPER_SPE_VECTOR_ARITH(name) \
3499 uint64_t helper_ev##name(uint64_t op1, uint64_t op2) \
3501 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3502 (uint64_t)e##name(op1, op2); \
3505 HELPER_SPE_VECTOR_ARITH(fsadd);
3507 HELPER_SPE_VECTOR_ARITH(fssub);
3509 HELPER_SPE_VECTOR_ARITH(fsmul);
3511 HELPER_SPE_VECTOR_ARITH(fsdiv);
3513 /* Single-precision floating-point comparisons */
3514 static inline uint32_t efscmplt(uint32_t op1, uint32_t op2)
3520 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3523 static inline uint32_t efscmpgt(uint32_t op1, uint32_t op2)
3529 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
3532 static inline uint32_t efscmpeq(uint32_t op1, uint32_t op2)
3538 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
3541 static inline uint32_t efststlt(uint32_t op1, uint32_t op2)
3543 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
3544 return efscmplt(op1, op2);
3547 static inline uint32_t efststgt(uint32_t op1, uint32_t op2)
3549 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
3550 return efscmpgt(op1, op2);
3553 static inline uint32_t efststeq(uint32_t op1, uint32_t op2)
3555 /* XXX: TODO: ignore special values (NaN, infinites, ...) */
3556 return efscmpeq(op1, op2);
3559 #define HELPER_SINGLE_SPE_CMP(name) \
3560 uint32_t helper_e##name(uint32_t op1, uint32_t op2) \
3562 return e##name(op1, op2) << 2; \
3565 HELPER_SINGLE_SPE_CMP(fststlt);
3567 HELPER_SINGLE_SPE_CMP(fststgt);
3569 HELPER_SINGLE_SPE_CMP(fststeq);
3571 HELPER_SINGLE_SPE_CMP(fscmplt);
3573 HELPER_SINGLE_SPE_CMP(fscmpgt);
3575 HELPER_SINGLE_SPE_CMP(fscmpeq);
3577 static inline uint32_t evcmp_merge(int t0, int t1)
3579 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
3582 #define HELPER_VECTOR_SPE_CMP(name) \
3583 uint32_t helper_ev##name(uint64_t op1, uint64_t op2) \
3585 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
3588 HELPER_VECTOR_SPE_CMP(fststlt);
3590 HELPER_VECTOR_SPE_CMP(fststgt);
3592 HELPER_VECTOR_SPE_CMP(fststeq);
3594 HELPER_VECTOR_SPE_CMP(fscmplt);
3596 HELPER_VECTOR_SPE_CMP(fscmpgt);
3598 HELPER_VECTOR_SPE_CMP(fscmpeq);
3600 /* Double-precision floating-point conversion */
3601 uint64_t helper_efdcfsi(uint32_t val)
3605 u.d = int32_to_float64(val, &env->vec_status);
3610 uint64_t helper_efdcfsid(uint64_t val)
3614 u.d = int64_to_float64(val, &env->vec_status);
3619 uint64_t helper_efdcfui(uint32_t val)
3623 u.d = uint32_to_float64(val, &env->vec_status);
3628 uint64_t helper_efdcfuid(uint64_t val)
3632 u.d = uint64_to_float64(val, &env->vec_status);
3637 uint32_t helper_efdctsi(uint64_t val)
3642 /* NaN are not treated the same way IEEE 754 does */
3643 if (unlikely(float64_is_any_nan(u.d))) {
3647 return float64_to_int32(u.d, &env->vec_status);
3650 uint32_t helper_efdctui(uint64_t val)
3655 /* NaN are not treated the same way IEEE 754 does */
3656 if (unlikely(float64_is_any_nan(u.d))) {
3660 return float64_to_uint32(u.d, &env->vec_status);
3663 uint32_t helper_efdctsiz(uint64_t val)
3668 /* NaN are not treated the same way IEEE 754 does */
3669 if (unlikely(float64_is_any_nan(u.d))) {
3673 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
3676 uint64_t helper_efdctsidz(uint64_t val)
3681 /* NaN are not treated the same way IEEE 754 does */
3682 if (unlikely(float64_is_any_nan(u.d))) {
3686 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
3689 uint32_t helper_efdctuiz(uint64_t val)
3694 /* NaN are not treated the same way IEEE 754 does */
3695 if (unlikely(float64_is_any_nan(u.d))) {
3699 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
3702 uint64_t helper_efdctuidz(uint64_t val)
3707 /* NaN are not treated the same way IEEE 754 does */
3708 if (unlikely(float64_is_any_nan(u.d))) {
3712 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
3715 uint64_t helper_efdcfsf(uint32_t val)
3720 u.d = int32_to_float64(val, &env->vec_status);
3721 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3722 u.d = float64_div(u.d, tmp, &env->vec_status);
3727 uint64_t helper_efdcfuf(uint32_t val)
3732 u.d = uint32_to_float64(val, &env->vec_status);
3733 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3734 u.d = float64_div(u.d, tmp, &env->vec_status);
3739 uint32_t helper_efdctsf(uint64_t val)
3745 /* NaN are not treated the same way IEEE 754 does */
3746 if (unlikely(float64_is_any_nan(u.d))) {
3749 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3750 u.d = float64_mul(u.d, tmp, &env->vec_status);
3752 return float64_to_int32(u.d, &env->vec_status);
3755 uint32_t helper_efdctuf(uint64_t val)
3761 /* NaN are not treated the same way IEEE 754 does */
3762 if (unlikely(float64_is_any_nan(u.d))) {
3765 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3766 u.d = float64_mul(u.d, tmp, &env->vec_status);
3768 return float64_to_uint32(u.d, &env->vec_status);
3771 uint32_t helper_efscfd(uint64_t val)
3777 u2.f = float64_to_float32(u1.d, &env->vec_status);
3782 uint64_t helper_efdcfs(uint32_t val)
3788 u2.d = float32_to_float64(u1.f, &env->vec_status);
3793 /* Double precision fixed-point arithmetic */
3794 uint64_t helper_efdadd(uint64_t op1, uint64_t op2)
3800 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
3804 uint64_t helper_efdsub(uint64_t op1, uint64_t op2)
3810 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
3814 uint64_t helper_efdmul(uint64_t op1, uint64_t op2)
3820 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
3824 uint64_t helper_efddiv(uint64_t op1, uint64_t op2)
3830 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
3834 /* Double precision floating point helpers */
3835 uint32_t helper_efdtstlt(uint64_t op1, uint64_t op2)
3841 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3844 uint32_t helper_efdtstgt(uint64_t op1, uint64_t op2)
3850 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
3853 uint32_t helper_efdtsteq(uint64_t op1, uint64_t op2)
3859 return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
3862 uint32_t helper_efdcmplt(uint64_t op1, uint64_t op2)
3864 /* XXX: TODO: test special values (NaN, infinites, ...) */
3865 return helper_efdtstlt(op1, op2);
3868 uint32_t helper_efdcmpgt(uint64_t op1, uint64_t op2)
3870 /* XXX: TODO: test special values (NaN, infinites, ...) */
3871 return helper_efdtstgt(op1, op2);
3874 uint32_t helper_efdcmpeq(uint64_t op1, uint64_t op2)
3876 /* XXX: TODO: test special values (NaN, infinites, ...) */
3877 return helper_efdtsteq(op1, op2);
3880 /*****************************************************************************/
3881 /* Softmmu support */
3882 #if !defined(CONFIG_USER_ONLY)
3884 #define MMUSUFFIX _mmu
3887 #include "softmmu_template.h"
3890 #include "softmmu_template.h"
3893 #include "softmmu_template.h"
3896 #include "softmmu_template.h"
3898 /* try to fill the TLB and return an exception if error. If retaddr is
3899 NULL, it means that the function was called in C code (i.e. not
3900 from generated code or from helper.c) */
3901 /* XXX: fix it to restore all registers */
3902 void tlb_fill(CPUPPCState *env1, target_ulong addr, int is_write, int mmu_idx,
3905 TranslationBlock *tb;
3906 CPUPPCState *saved_env;
3911 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx);
3912 if (unlikely(ret != 0)) {
3913 if (likely(retaddr)) {
3914 /* now we have a real cpu fault */
3915 tb = tb_find_pc(retaddr);
3917 /* the PC is inside the translated code. It means that we have
3918 a virtual CPU fault */
3919 cpu_restore_state(tb, env, retaddr);
3922 helper_raise_exception_err(env->exception_index, env->error_code);
3927 /* Segment registers load and store */
3928 target_ulong helper_load_sr(target_ulong sr_num)
3930 #if defined(TARGET_PPC64)
3931 if (env->mmu_model & POWERPC_MMU_64) {
3932 return ppc_load_sr(env, sr_num);
3935 return env->sr[sr_num];
3938 void helper_store_sr(target_ulong sr_num, target_ulong val)
3940 ppc_store_sr(env, sr_num, val);
3943 /* SLB management */
3944 #if defined(TARGET_PPC64)
3945 void helper_store_slb(target_ulong rb, target_ulong rs)
3947 if (ppc_store_slb(env, rb, rs) < 0) {
3948 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3952 target_ulong helper_load_slb_esid(target_ulong rb)
3956 if (ppc_load_slb_esid(env, rb, &rt) < 0) {
3957 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3962 target_ulong helper_load_slb_vsid(target_ulong rb)
3966 if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
3967 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL);
3972 void helper_slbia(void)
3974 ppc_slb_invalidate_all(env);
3977 void helper_slbie(target_ulong addr)
3979 ppc_slb_invalidate_one(env, addr);
3982 #endif /* defined(TARGET_PPC64) */
3984 /* TLB management */
3985 void helper_tlbia(void)
3987 ppc_tlb_invalidate_all(env);
3990 void helper_tlbie(target_ulong addr)
3992 ppc_tlb_invalidate_one(env, addr);
3995 /* Software driven TLBs management */
3996 /* PowerPC 602/603 software TLB load instructions helpers */
3997 static void do_6xx_tlb(target_ulong new_EPN, int is_code)
3999 target_ulong RPN, CMP, EPN;
4002 RPN = env->spr[SPR_RPA];
4004 CMP = env->spr[SPR_ICMP];
4005 EPN = env->spr[SPR_IMISS];
4007 CMP = env->spr[SPR_DCMP];
4008 EPN = env->spr[SPR_DMISS];
4010 way = (env->spr[SPR_SRR1] >> 17) & 1;
4011 (void)EPN; /* avoid a compiler warning */
4012 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
4013 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
4015 /* Store this TLB */
4016 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
4017 way, is_code, CMP, RPN);
4020 void helper_6xx_tlbd(target_ulong EPN)
4025 void helper_6xx_tlbi(target_ulong EPN)
4030 /* PowerPC 74xx software TLB load instructions helpers */
4031 static void do_74xx_tlb(target_ulong new_EPN, int is_code)
4033 target_ulong RPN, CMP, EPN;
4036 RPN = env->spr[SPR_PTELO];
4037 CMP = env->spr[SPR_PTEHI];
4038 EPN = env->spr[SPR_TLBMISS] & ~0x3;
4039 way = env->spr[SPR_TLBMISS] & 0x3;
4040 (void)EPN; /* avoid a compiler warning */
4041 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
4042 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
4044 /* Store this TLB */
4045 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
4046 way, is_code, CMP, RPN);
4049 void helper_74xx_tlbd(target_ulong EPN)
4051 do_74xx_tlb(EPN, 0);
4054 void helper_74xx_tlbi(target_ulong EPN)
4056 do_74xx_tlb(EPN, 1);
4059 static inline target_ulong booke_tlb_to_page_size(int size)
4061 return 1024 << (2 * size);
4064 static inline int booke_page_size_to_tlb(target_ulong page_size)
4068 switch (page_size) {
4102 #if defined(TARGET_PPC64)
4103 case 0x000100000000ULL:
4106 case 0x000400000000ULL:
4109 case 0x001000000000ULL:
4112 case 0x004000000000ULL:
4115 case 0x010000000000ULL:
4127 /* Helpers for 4xx TLB management */
4128 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
4130 #define PPC4XX_TLBHI_V 0x00000040
4131 #define PPC4XX_TLBHI_E 0x00000020
4132 #define PPC4XX_TLBHI_SIZE_MIN 0
4133 #define PPC4XX_TLBHI_SIZE_MAX 7
4134 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
4135 #define PPC4XX_TLBHI_SIZE_SHIFT 7
4136 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
4138 #define PPC4XX_TLBLO_EX 0x00000200
4139 #define PPC4XX_TLBLO_WR 0x00000100
4140 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
4141 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
4143 target_ulong helper_4xx_tlbre_hi(target_ulong entry)
4149 entry &= PPC4XX_TLB_ENTRY_MASK;
4150 tlb = &env->tlb.tlbe[entry];
4152 if (tlb->prot & PAGE_VALID) {
4153 ret |= PPC4XX_TLBHI_V;
4155 size = booke_page_size_to_tlb(tlb->size);
4156 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
4157 size = PPC4XX_TLBHI_SIZE_DEFAULT;
4159 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
4160 env->spr[SPR_40x_PID] = tlb->PID;
4164 target_ulong helper_4xx_tlbre_lo(target_ulong entry)
4169 entry &= PPC4XX_TLB_ENTRY_MASK;
4170 tlb = &env->tlb.tlbe[entry];
4172 if (tlb->prot & PAGE_EXEC) {
4173 ret |= PPC4XX_TLBLO_EX;
4175 if (tlb->prot & PAGE_WRITE) {
4176 ret |= PPC4XX_TLBLO_WR;
4181 void helper_4xx_tlbwe_hi(target_ulong entry, target_ulong val)
4184 target_ulong page, end;
4186 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
4188 entry &= PPC4XX_TLB_ENTRY_MASK;
4189 tlb = &env->tlb.tlbe[entry];
4190 /* Invalidate previous TLB (if it's valid) */
4191 if (tlb->prot & PAGE_VALID) {
4192 end = tlb->EPN + tlb->size;
4193 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
4194 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4195 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4196 tlb_flush_page(env, page);
4199 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
4200 & PPC4XX_TLBHI_SIZE_MASK);
4201 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
4202 * If this ever occurs, one should use the ppcemb target instead
4203 * of the ppc or ppc64 one
4205 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
4206 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
4207 "are not supported (%d)\n",
4208 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
4210 tlb->EPN = val & ~(tlb->size - 1);
4211 if (val & PPC4XX_TLBHI_V) {
4212 tlb->prot |= PAGE_VALID;
4213 if (val & PPC4XX_TLBHI_E) {
4214 /* XXX: TO BE FIXED */
4216 "Little-endian TLB entries are not supported by now\n");
4219 tlb->prot &= ~PAGE_VALID;
4221 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
4222 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4223 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4224 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4225 tlb->prot & PAGE_READ ? 'r' : '-',
4226 tlb->prot & PAGE_WRITE ? 'w' : '-',
4227 tlb->prot & PAGE_EXEC ? 'x' : '-',
4228 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4229 /* Invalidate new TLB (if valid) */
4230 if (tlb->prot & PAGE_VALID) {
4231 end = tlb->EPN + tlb->size;
4232 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
4233 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
4234 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
4235 tlb_flush_page(env, page);
4240 void helper_4xx_tlbwe_lo(target_ulong entry, target_ulong val)
4244 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
4246 entry &= PPC4XX_TLB_ENTRY_MASK;
4247 tlb = &env->tlb.tlbe[entry];
4248 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
4249 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
4250 tlb->prot = PAGE_READ;
4251 if (val & PPC4XX_TLBLO_EX) {
4252 tlb->prot |= PAGE_EXEC;
4254 if (val & PPC4XX_TLBLO_WR) {
4255 tlb->prot |= PAGE_WRITE;
4257 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
4258 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
4259 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
4260 tlb->prot & PAGE_READ ? 'r' : '-',
4261 tlb->prot & PAGE_WRITE ? 'w' : '-',
4262 tlb->prot & PAGE_EXEC ? 'x' : '-',
4263 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
4266 target_ulong helper_4xx_tlbsx(target_ulong address)
4268 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4271 /* PowerPC 440 TLB management */
4272 void helper_440_tlbwe(uint32_t word, target_ulong entry, target_ulong value)
4275 target_ulong EPN, RPN, size;
4278 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
4279 __func__, word, (int)entry, value);
4282 tlb = &env->tlb.tlbe[entry];
4285 /* Just here to please gcc */
4287 EPN = value & 0xFFFFFC00;
4288 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) {
4292 size = booke_tlb_to_page_size((value >> 4) & 0xF);
4293 if ((tlb->prot & PAGE_VALID) && tlb->size < size) {
4298 tlb->attr |= (value >> 8) & 1;
4299 if (value & 0x200) {
4300 tlb->prot |= PAGE_VALID;
4302 if (tlb->prot & PAGE_VALID) {
4303 tlb->prot &= ~PAGE_VALID;
4307 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4308 if (do_flush_tlbs) {
4313 RPN = value & 0xFFFFFC0F;
4314 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) {
4320 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
4321 tlb->prot = tlb->prot & PAGE_VALID;
4323 tlb->prot |= PAGE_READ << 4;
4326 tlb->prot |= PAGE_WRITE << 4;
4329 tlb->prot |= PAGE_EXEC << 4;
4332 tlb->prot |= PAGE_READ;
4335 tlb->prot |= PAGE_WRITE;
4338 tlb->prot |= PAGE_EXEC;
4344 target_ulong helper_440_tlbre(uint32_t word, target_ulong entry)
4351 tlb = &env->tlb.tlbe[entry];
4354 /* Just here to please gcc */
4357 size = booke_page_size_to_tlb(tlb->size);
4358 if (size < 0 || size > 0xF) {
4362 if (tlb->attr & 0x1) {
4365 if (tlb->prot & PAGE_VALID) {
4368 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4369 env->spr[SPR_440_MMUCR] |= tlb->PID;
4375 ret = tlb->attr & ~0x1;
4376 if (tlb->prot & (PAGE_READ << 4)) {
4379 if (tlb->prot & (PAGE_WRITE << 4)) {
4382 if (tlb->prot & (PAGE_EXEC << 4)) {
4385 if (tlb->prot & PAGE_READ) {
4388 if (tlb->prot & PAGE_WRITE) {
4391 if (tlb->prot & PAGE_EXEC) {
4399 target_ulong helper_440_tlbsx(target_ulong address)
4401 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4404 /* PowerPC BookE 2.06 TLB management */
4406 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
4408 uint32_t tlbncfg = 0;
4409 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
4410 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
4413 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
4414 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
4416 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
4417 cpu_abort(env, "we don't support HES yet\n");
4420 return booke206_get_tlbm(env, tlb, ea, esel);
4423 void helper_booke_setpid(uint32_t pidn, target_ulong pid)
4425 env->spr[pidn] = pid;
4426 /* changing PIDs mean we're in a different address space now */
4430 void helper_booke206_tlbwe(void)
4432 uint32_t tlbncfg, tlbn;
4434 uint32_t size_tlb, size_ps;
4436 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
4437 case MAS0_WQ_ALWAYS:
4438 /* good to go, write that entry */
4441 /* XXX check if reserved */
4446 case MAS0_WQ_CLR_RSRV:
4447 /* XXX clear entry */
4450 /* no idea what to do */
4454 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
4456 /* XXX we don't support direct LRAT setting yet */
4457 fprintf(stderr, "cpu: don't support LRAT setting yet\n");
4461 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
4462 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
4464 tlb = booke206_cur_tlb(env);
4467 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
4468 POWERPC_EXCP_INVAL |
4469 POWERPC_EXCP_INVAL_INVAL);
4472 /* check that we support the targeted size */
4473 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
4474 size_ps = booke206_tlbnps(env, tlbn);
4475 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) &&
4476 !(size_ps & (1 << size_tlb))) {
4477 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
4478 POWERPC_EXCP_INVAL |
4479 POWERPC_EXCP_INVAL_INVAL);
4483 cpu_abort(env, "missing HV implementation\n");
4485 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
4486 env->spr[SPR_BOOKE_MAS3];
4487 tlb->mas1 = env->spr[SPR_BOOKE_MAS1];
4490 if (!(tlbncfg & TLBnCFG_AVAIL)) {
4491 /* force !AVAIL TLB entries to correct page size */
4492 tlb->mas1 &= ~MAS1_TSIZE_MASK;
4493 /* XXX can be configured in MMUCSR0 */
4494 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12;
4497 /* XXX needs to change when supporting 64-bit e500 */
4498 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & 0xffffffff;
4500 if (!(tlbncfg & TLBnCFG_IPROT)) {
4501 /* no IPROT supported by TLB */
4502 tlb->mas1 &= ~MAS1_IPROT;
4505 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
4506 tlb_flush_page(env, tlb->mas2 & MAS2_EPN_MASK);
4512 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb)
4514 int tlbn = booke206_tlbm_to_tlbn(env, tlb);
4515 int way = booke206_tlbm_to_way(env, tlb);
4517 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
4518 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
4519 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
4521 env->spr[SPR_BOOKE_MAS1] = tlb->mas1;
4522 env->spr[SPR_BOOKE_MAS2] = tlb->mas2;
4523 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3;
4524 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32;
4527 void helper_booke206_tlbre(void)
4529 ppcmas_tlb_t *tlb = NULL;
4531 tlb = booke206_cur_tlb(env);
4533 env->spr[SPR_BOOKE_MAS1] = 0;
4535 booke206_tlb_to_mas(env, tlb);
4539 void helper_booke206_tlbsx(target_ulong address)
4541 ppcmas_tlb_t *tlb = NULL;
4543 target_phys_addr_t raddr;
4546 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
4547 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
4549 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
4550 int ways = booke206_tlb_ways(env, i);
4552 for (j = 0; j < ways; j++) {
4553 tlb = booke206_get_tlbm(env, i, address, j);
4559 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) {
4563 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
4567 booke206_tlb_to_mas(env, tlb);
4572 /* no entry found, fill with defaults */
4573 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
4574 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
4575 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
4576 env->spr[SPR_BOOKE_MAS3] = 0;
4577 env->spr[SPR_BOOKE_MAS7] = 0;
4579 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
4580 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
4583 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
4586 /* next victim logic */
4587 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
4589 env->last_way &= booke206_tlb_ways(env, 0) - 1;
4590 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
4593 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
4597 int ways = booke206_tlb_ways(env, tlbn);
4600 for (i = 0; i < ways; i++) {
4601 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i);
4605 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
4606 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) &&
4607 !(tlb->mas1 & MAS1_IPROT)) {
4608 tlb->mas1 &= ~MAS1_VALID;
4613 void helper_booke206_tlbivax(target_ulong address)
4615 if (address & 0x4) {
4616 /* flush all entries */
4617 if (address & 0x8) {
4618 /* flush all of TLB1 */
4619 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
4621 /* flush all of TLB0 */
4622 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
4627 if (address & 0x8) {
4628 /* flush TLB1 entries */
4629 booke206_invalidate_ea_tlb(env, 1, address);
4632 /* flush TLB0 entries */
4633 booke206_invalidate_ea_tlb(env, 0, address);
4634 tlb_flush_page(env, address & MAS2_EPN_MASK);
4638 void helper_booke206_tlbilx0(target_ulong address)
4640 /* XXX missing LPID handling */
4641 booke206_flush_tlb(env, -1, 1);
4644 void helper_booke206_tlbilx1(target_ulong address)
4647 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
4648 ppcmas_tlb_t *tlb = env->tlb.tlbm;
4651 /* XXX missing LPID handling */
4652 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
4653 tlb_size = booke206_tlb_size(env, i);
4654 for (j = 0; j < tlb_size; j++) {
4655 if (!(tlb[j].mas1 & MAS1_IPROT) &&
4656 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) {
4657 tlb[j].mas1 &= ~MAS1_VALID;
4660 tlb += booke206_tlb_size(env, i);
4665 void helper_booke206_tlbilx3(target_ulong address)
4669 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
4670 int pid = tid >> MAS6_SPID_SHIFT;
4671 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS;
4672 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0;
4673 /* XXX check for unsupported isize and raise an invalid opcode then */
4674 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK;
4675 /* XXX implement MAV2 handling */
4678 /* XXX missing LPID handling */
4679 /* flush by pid and ea */
4680 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
4681 int ways = booke206_tlb_ways(env, i);
4683 for (j = 0; j < ways; j++) {
4684 tlb = booke206_get_tlbm(env, i, address, j);
4688 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) ||
4689 (tlb->mas1 & MAS1_IPROT) ||
4690 ((tlb->mas1 & MAS1_IND) != ind) ||
4691 ((tlb->mas8 & MAS8_TGS) != sgs)) {
4694 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) {
4695 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
4698 /* XXX e500mc doesn't match SAS, but other cores might */
4699 tlb->mas1 &= ~MAS1_VALID;
4705 void helper_booke206_tlbflush(uint32_t type)
4710 flags |= BOOKE206_FLUSH_TLB1;
4714 flags |= BOOKE206_FLUSH_TLB0;
4717 booke206_flush_tlb(env, flags, 1);
4720 /* Embedded.Processor Control */
4721 static int dbell2irq(target_ulong rb)
4723 int msg = rb & DBELL_TYPE_MASK;
4727 case DBELL_TYPE_DBELL:
4728 irq = PPC_INTERRUPT_DOORBELL;
4730 case DBELL_TYPE_DBELL_CRIT:
4731 irq = PPC_INTERRUPT_CDOORBELL;
4733 case DBELL_TYPE_G_DBELL:
4734 case DBELL_TYPE_G_DBELL_CRIT:
4735 case DBELL_TYPE_G_DBELL_MC:
4744 void helper_msgclr(target_ulong rb)
4746 int irq = dbell2irq(rb);
4752 env->pending_interrupts &= ~(1 << irq);
4755 void helper_msgsnd(target_ulong rb)
4757 int irq = dbell2irq(rb);
4758 int pir = rb & DBELL_PIRTAG_MASK;
4765 for (cenv = first_cpu; cenv != NULL; cenv = cenv->next_cpu) {
4766 if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
4767 cenv->pending_interrupts |= 1 << irq;
4768 cpu_interrupt(cenv, CPU_INTERRUPT_HARD);
4773 #endif /* !CONFIG_USER_ONLY */